Compare commits

..

No commits in common. "master" and "develop" have entirely different histories.

149 changed files with 10835 additions and 17399 deletions

84
.devenv.hosting.yaml Normal file
View file

@ -0,0 +1,84 @@
hosts:
- address: localhost
plugin_name: docker
services:
- name: s01
attributes:
container_name: s01
config_path: ../neofs-dev-env/services/storage/.storage.env
wallet_path: ../neofs-dev-env/services/storage/wallet01.json
wallet_password: ""
volume_name: storage_storage_s01
rpc_endpoint: s01.neofs.devenv:8080
control_endpoint: s01.neofs.devenv:8081
un_locode: "RU MOW"
- name: s02
attributes:
container_name: s02
config_path: ../neofs-dev-env/services/storage/.storage.env
wallet_path: ../neofs-dev-env/services/storage/wallet02.json
wallet_password: ""
volume_name: storage_storage_s02
rpc_endpoint: s02.neofs.devenv:8080
control_endpoint: s02.neofs.devenv:8081
un_locode: "RU LED"
- name: s03
attributes:
container_name: s03
config_path: ../neofs-dev-env/services/storage/.storage.env
wallet_path: ../neofs-dev-env/services/storage/wallet03.json
wallet_password: ""
volume_name: storage_storage_s03
rpc_endpoint: s03.neofs.devenv:8080
control_endpoint: s03.neofs.devenv:8081
un_locode: "SE STO"
- name: s04
attributes:
container_name: s04
config_path: ../neofs-dev-env/services/storage/.storage.env
wallet_path: ../neofs-dev-env/services/storage/wallet04.json
wallet_password: ""
volume_name: storage_storage_s04
rpc_endpoint: s04.neofs.devenv:8080
control_endpoint: s04.neofs.devenv:8081
un_locode: "FI HEL"
- name: s3-gate01
attributes:
container_name: s3_gate
config_path: ../neofs-dev-env/services/s3_gate/.s3.env
wallet_path: ../neofs-dev-env/services/s3_gate/wallet.json
wallet_password: "s3"
endpoint: https://s3.neofs.devenv:8080
- name: http-gate01
attributes:
container_name: http_gate
config_path: ../neofs-dev-env/services/http_gate/.http.env
wallet_path: ../neofs-dev-env/services/http_gate/wallet.json
wallet_password: "one"
endpoint: http://http.neofs.devenv
- name: ir01
attributes:
container_name: ir01
config_path: ../neofs-dev-env/services/ir/.ir.env
wallet_path: ../neofs-dev-env/services/ir/az.json
wallet_password: "one"
- name: morph-chain01
attributes:
container_name: morph_chain
config_path: ../neofs-dev-env/services/morph_chain/protocol.privnet.yml
wallet_path: ../neofs-dev-env/services/morph_chain/node-wallet.json
wallet_password: "one"
endpoint: http://morph-chain.neofs.devenv:30333
- name: main-chain01
attributes:
container_name: main_chain
config_path: ../neofs-dev-env/services/chain/protocol.privnet.yml
wallet_path: ../neofs-dev-env/services/chain/node-wallet.json
wallet_password: "one"
endpoint: http://main-chain.neofs.devenv:30333
- name: coredns01
attributes:
container_name: coredns
clis:
- name: neofs-cli
exec_path: neofs-cli

View file

@ -1,26 +0,0 @@
# yamllint disable rule:truthy
name: DCO check
on:
pull_request:
jobs:
dco:
name: DCO
runs-on: docker
container:
image: node:22-bookworm
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.22'
- name: Run commit format checker
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
with:
from: 'origin/${{ github.event.pull_request.base.ref }}'

1
.github/CODEOWNERS vendored Normal file
View file

@ -0,0 +1 @@
* @aprasolova @vdomnich-yadro @dansingjulia @yadro-vavdeev

22
.github/workflows/dco.yml vendored Normal file
View file

@ -0,0 +1,22 @@
name: DCO check
on:
pull_request:
branches:
- master
- develop
jobs:
commits_check_job:
runs-on: ubuntu-latest
name: Commits Check
steps:
- name: Get PR Commits
id: 'get-pr-commits'
uses: tim-actions/get-pr-commits@master
with:
token: ${{ secrets.GITHUB_TOKEN }}
- name: DCO Check
uses: tim-actions/dco@master
with:
commits: ${{ steps.get-pr-commits.outputs.commits }}

9
.gitignore vendored
View file

@ -1,11 +1,5 @@
# ignore IDE files
.vscode
.idea
.DS_Store
venv.*
venv_macos
# ignore test results
**/log.html
@ -18,7 +12,6 @@ xunit_results.xml
# ignore caches under any path
**/__pycache__
**/.pytest_cache
*.egg-info
# ignore work directories and setup files
.setup
@ -27,4 +20,4 @@ TemporaryDir/*
artifacts/*
docs/*
venv.*/*
wallet_config.yml
/*wallet_config.yml

4
.gitmodules vendored Normal file
View file

@ -0,0 +1,4 @@
[submodule "neofs-keywords"]
path = neofs-keywords
url = ssh://git@github.com/nspcc-dev/neofs-keywords.git
ignore = all

View file

@ -3,23 +3,9 @@ repos:
rev: 22.8.0
hooks:
- id: black
language_version: python3.10
language_version: python3.9
- repo: https://github.com/pycqa/isort
rev: 5.12.0
rev: 5.10.1
hooks:
- id: isort
name: isort (python)
- repo: https://git.frostfs.info/TrueCloudLab/allure-validator
rev: 1.1.1
hooks:
- id: allure-validator
args: [
"pytest_tests/",
"--plugins",
"frostfs[-_]testlib*",
]
pass_filenames: false
ci:
autofix_prs: false
autoupdate_schedule: quarterly

View file

@ -1,3 +0,0 @@
.* @TrueCloudLab/qa-committers
.forgejo/.* @potyarkin
Makefile @potyarkin

View file

@ -1,177 +0,0 @@
# Contribution guide
First, thank you for contributing! We love and encourage pull requests from
everyone. Please follow the guidelines:
- Check the open [issues](https://git.frostfs.info/TrueCloudLab/frostfs-testcases/issues) and
[pull requests](https://git.frostfs.info/TrueCloudLab/frostfs-testcases/pulls) for existing
discussions.
- Open an issue first, to discuss a new feature or enhancement.
- Write tests, and make sure the test suite passes locally.
- Open a pull request, and reference the relevant issue(s).
- Make sure your commits are logically separated and have good comments
explaining the details of your change.
- After receiving feedback, amend your commits or add new ones as appropriate.
- **Have fun!**
## Development Workflow
Start by forking the `frostfs-testcases` repository, make changes in a branch and then
send a pull request. We encourage pull requests to discuss code changes. Here
are the steps in details:
### Set up your Git Repository
Fork [FrosfFS testcases upstream](https://git.frostfs.info/TrueCloudLab/frostfs-testcases/forks) source
repository to your own personal repository. Copy the URL of your fork and clone it:
```shell
$ git clone <url of your fork>
```
### Set up git remote as ``upstream``
```sh
$ cd frostfs-testcases
$ git remote add upstream https://git.frostfs.info/TrueCloudLab/frostfs-testcases
$ git fetch upstream
```
### Set up development environment
To setup development environment for `frosfs-testcases`, please, take the following steps:
1. Prepare virtualenv
```shell
$ make venv
$ source frostfs-testcases-3.10/bin/activate
```
Optionally you might want to integrate code formatters with your code editor to apply formatters to code files as you go:
* isort is supported by [PyCharm](https://plugins.jetbrains.com/plugin/15434-isortconnect), [VS Code](https://cereblanco.medium.com/setup-black-and-isort-in-vscode-514804590bf9). Plugins exist for other IDEs/editors as well.
* black can be integrated with multiple editors, please, instructions are available [here](https://black.readthedocs.io/en/stable/integrations/editors.html).
### Create your feature branch
Before making code changes, make sure you create a separate branch for these
changes. Maybe you will find it convenient to name branch in
`<type>/<issue>-<changes_topic>` format.
```shell
$ git checkout -b feature/123-something_awesome
```
### Commit changes
After verification, commit your changes. There is a [great
post](https://chris.beams.io/posts/git-commit/) on how to write useful commit
messages. Try following this template:
```
[#Issue] Summary
Description
<Macros>
<Sign-Off>
```
```shell
$ git commit -am '[#123] Add some feature'
```
### Push to the branch
Push your locally committed changes to the remote origin (your fork):
```shell
$ git push origin feature/123-something_awesome
```
### Create a Pull Request
Pull requests can be created via Git. Refer to [this
document](https://docs.codeberg.org/collaborating/pull-requests-and-git-flow/) for
detailed steps on how to create a pull request. After a Pull Request gets peer
reviewed and approved, it will be merged.
## Code Style
The names of Python variables, functions and classes must comply with [PEP8](https://peps.python.org/pep-0008) rules, in particular:
* Name of a variable/function must be in snake_case (lowercase, with words separated by underscores as necessary to improve readability).
* Name of a global variable must be in UPPER_SNAKE_CASE, the underscore (`_`) symbol must be used as a separator between words.
* Name of a class must be in PascalCase (the first letter of each compound word in a variable name is capitalized).
* Names of other variables should not be ended with the underscore symbol.
Line length limit is set as 100 characters.
Imports should be ordered in accordance with [isort default rules](https://pycqa.github.io/isort/).
We use `black` and `isort` for code formatting. Please, refer to [Black code style](https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html) for details.
Type hints are mandatory for library's code:
- class attributes;
- function or method's parameters;
- function or method's return type.
The only exception is return type of test functions or methods - there's no much use in specifying `None` as return type for each test function.
Do not use relative imports. Even if the module is in the same package, use the full package name.
To format docstrings, please, use [Google Style Docstrings](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html). Type annotations should be specified in the code and not in docstrings (please, refer to [this sample](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/index.html#type-annotations)).
## DCO Sign off
All authors to the project retain copyright to their work. However, to ensure
that they are only submitting work that they have rights to, we are requiring
everyone to acknowledge this by signing their work.
Any copyright notices in this repository should specify the authors as "the
contributors".
To sign your work, just add a line like this at the end of your commit message:
```
Signed-off-by: Samii Sakisaka <samii@nspcc.ru>
```
This can easily be done with the `--signoff` option to `git commit`.
By doing this you state that you can certify the following (from [The Developer
Certificate of Origin](https://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
1 Letterman Drive
Suite D4700
San Francisco, CA, 94129
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```

674
LICENSE
View file

@ -1,674 +0,0 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.

View file

@ -1,42 +1,38 @@
SHELL := /bin/bash
PYTHON_VERSION := 3.10
VENV_NAME = frostfs-testcases-${PYTHON_VERSION}
VENV_DIR := venv.${VENV_NAME}
#!/usr/bin/make -f
current_dir := $(shell pwd)
FROM_VENV := . ${VENV_DIR}/bin/activate &&
.DEFAULT_GOAL := help
venv: create requirements paths precommit
@echo Ready
SHELL = bash
precommit:
@echo Isntalling pre-commit hooks
${FROM_VENV} pre-commit install
OUTPUT_DIR = artifacts/
KEYWORDS_REPO = git@github.com:nspcc-dev/neofs-keywords.git
VENVS = $(shell ls -1d venv/*/ | sort -u | xargs basename -a)
paths:
@echo Append paths for project
@echo Virtual environment: ${VENV_DIR}
@rm -rf ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth
@touch ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth
@echo ${current_dir} | tee ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth
.PHONY: all
all: venvs
create: ${VENV_DIR}
include venv_template.mk
${VENV_DIR}:
@echo Create virtual environment ${VENV_DIR}
virtualenv --python=python${PYTHON_VERSION} --prompt=${VENV_NAME} ${VENV_DIR}
run: venvs
@echo "⇒ Test Run"
@robot --timestampoutputs --outputdir $(OUTPUT_DIR) robot/testsuites/integration/
requirements:
@echo Isntalling pip requirements
${FROM_VENV} pip install -e ../frostfs-testlib
${FROM_VENV} pip install -Ur requirements.txt
${FROM_VENV} pip install -Ur requirements_dev.txt
.PHONY: venvs
venvs:
$(foreach venv,$(VENVS),venv.$(venv))
$(foreach venv,$(VENVS),$(eval $(call VENV_template,$(venv))))
#### VALIDATION SECTION ####
lint: create requirements
${FROM_VENV} pip install -e ../frostfs-testlib;
${FROM_VENV} pylint --disable R,C,W pytest_tests
submodules:
@git submodule init
@git submodule update --recursive --remote
validation: lint
${FROM_VENV} pytest --collect-only
clean:
rm -rf venv.*
pytest-local:
@echo "⇒ Run Pytest"
python -m pytest pytest_tests/testsuites/
help:
@echo "⇒ run Run testcases ${R}"

View file

@ -2,58 +2,70 @@
Tests written with PyTest Framework are located under `pytest_tests/testsuites` directory.
These tests rely on resources and utility modules that have been originally developed for Pytest Framework.
These tests rely on resources and utility modules that have been originally developed for Robot Framework:
`robot/resources/files` - static files that are used in tests' commands.
`robot/resources/lib/` - common Python libraries that provide utility functions used as building blocks in tests.
`robot/variables/` - constants and configuration variables for tests.
## Testcases execution
### Initial preparation
1. Install frostfs-cli
- `git clone git@git.frostfs.info:TrueCloudLab/frostfs-node.git`
- `cd frostfs-node`
1. Install neofs-cli
- `git clone git@github.com:nspcc-dev/neofs-node.git`
- `cd neofs-node`
- `make`
- `sudo cp bin/frostfs-cli /usr/local/bin/frostfs-cli`
- `sudo cp bin/neofs-cli /usr/local/bin/neofs-cli`
2. Install frostfs-s3-authmate
- `git clone git@git.frostfs.info:TrueCloudLab/frostfs-s3-gw.git`
- `cd frostfs-s3-gw`
2. Install neofs-authmate
- `git clone git@github.com:nspcc-dev/neofs-s3-gw.git`
- `cd neofs-s3-gw`
- `make`
- `sudo cp bin/frostfs-s3-authmate /usr/local/bin/frostfs-s3-authmate`
- `sudo cp bin/neofs-authmate /usr/local/bin/neofs-authmate`
3. Install neo-go
- `git clone git@git.frostfs.info:TrueCloudLab/neo-go.git`
- `git clone git@github.com:nspcc-dev/neo-go.git`
- `cd neo-go`
- `git checkout v0.101.0` (or the current version in the frostfs-dev-env)
- `git checkout v0.92.0` (or the current version in the neofs-dev-env)
- `make`
- `sudo cp bin/neo-go /usr/local/bin/neo-go`
or download binary from releases: https://git.frostfs.info/TrueCloudLab/neo-go/releases
or download binary from releases: https://github.com/nspcc-dev/neo-go/releases
4. Clone frostfs-dev-env
`git clone git@git.frostfs.info:TrueCloudLab/frostfs-dev-env.git`
4. Clone neofs-dev-env
`git clone git@github.com:nspcc-dev/neofs-dev-env.git`
Note that we expect frostfs-dev-env to be located under
the `<testcases_root_dir>/../frostfs-dev-env` directory. If you put this repo in any other place,
manually set the full path to frostfs-dev-env in the environment variable `DEVENV_PATH` at this step.
Note that we expect neofs-dev-env to be located under
the `<testcases_root_dir>/../neofs-dev-env` directory. If you put this repo in any other place,
manually set the full path to neofs-dev-env in the environment variable `DEVENV_PATH` at this step.
5. Make sure you have installed all the following prerequisites on your machine
5. Make sure you have installed all of the following prerequisites on your machine
```
make
python3.10
python3.10-dev
python3.9
python3.9-dev
libssl-dev
```
As we use frostfs-dev-env, you'll also need to install
[prerequisites](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env#prerequisites) of this repository.
As we use neofs-dev-env, you'll also need to install
[prerequisites](https://github.com/nspcc-dev/neofs-dev-env#prerequisites) of this repository.
6. Prepare virtualenv
```shell
$ make venv
$ source venv.frostfs-testcases-3.10/bin/activate
$ make venv.local-pytest
$ . venv.local-pytest/bin/activate
```
7. Optionally you might want to integrate code formatters with your code editor to apply formatters to code files as you go:
7. Setup pre-commit hooks to run code formatters on staged files before you run a `git commit` command:
```shell
$ pre-commit install
```
Optionally you might want to integrate code formatters with your code editor to apply formatters to code files as you go:
* isort is supported by [PyCharm](https://plugins.jetbrains.com/plugin/15434-isortconnect), [VS Code](https://cereblanco.medium.com/setup-black-and-isort-in-vscode-514804590bf9). Plugins exist for other IDEs/editors as well.
* black can be integrated with multiple editors, please, instructions are available [here](https://black.readthedocs.io/en/stable/integrations/editors.html).
@ -110,23 +122,16 @@ $ docker run -p 5050:5050 -e CHECK_RESULTS_EVERY_SECONDS=30 -e KEEP_HISTORY=1 \
Then, you can check the allure report in your browser [by this link](http://localhost:5050/allure-docker-service/projects/default/reports/latest/index.html?redirect=false)
NOTE: feel free to select a different location for `allure-reports` directory, there is no requirement to have it inside `frostfs-testcases`. For example, you can place it under `/tmp` path.
NOTE: feel free to select a different location for `allure-reports` directory, there is no requirement to have it inside `neofs-testcases`. For example, you can place it under `/tmp` path.
# Contributing
## Code style
Feel free to contribute to this project after reading the [contributing
guidelines](CONTRIBUTING.md).
The names of Python variables, functions and classes must comply with [PEP8](https://peps.python.org/pep-0008) rules, in particular:
* Name of a variable/function must be in snake_case (lowercase, with words separated by underscores as necessary to improve readability).
* Name of a global variable must be in UPPER_SNAKE_CASE, the underscore (`_`) symbol must be used as a separator between words.
* Name of a class must be in PascalCase (the first letter of each compound word in a variable name is capitalized).
* Names of other variables should not be ended with the underscore symbol.
Before starting to work on a certain topic, create a new issue first, describing
the feature/topic you are going to implement.
Line length limit is set as 100 characters.
# License
- [GNU General Public License v3.0](LICENSE)
## Pytest marks
Custom pytest marks used in tests:
* `sanity` - Tests must be runs in sanity testruns.
* `smoke` - Tests must be runs in smoke testruns.
Imports should be ordered in accordance with [isort default rules](https://pycqa.github.io/isort/).

View file

@ -0,0 +1,31 @@
diff -urN bin.orig/activate bin/activate
--- bin.orig/activate 2018-12-27 14:55:13.916461020 +0900
+++ bin/activate 2018-12-27 20:38:35.223248728 +0900
@@ -30,6 +30,15 @@
unset _OLD_VIRTUAL_PS1
fi
+ # Unset exported dev-env variables
+ pushd ${DEVENV_PATH} > /dev/null
+ unset `make env | awk -F= '{print $1}'`
+ popd > /dev/null
+
+ # Unset external env variables
+ declare -f env_deactivate > /dev/null && env_deactivate
+ declare -f venv_deactivate > /dev/null && venv_deactivate
+
unset VIRTUAL_ENV
if [ ! "${1-}" = "nondestructive" ] ; then
# Self destruct!
@@ -47,6 +56,11 @@
PATH="$VIRTUAL_ENV/bin:$PATH"
export PATH
+# Set external variables
+if [ -f ${VIRTUAL_ENV}/bin/environment.sh ] ; then
+ . ${VIRTUAL_ENV}/bin/environment.sh
+fi
+
# unset PYTHONHOME if set
if ! [ -z "${PYTHONHOME+_}" ] ; then
_OLD_VIRTUAL_PYTHONHOME="$PYTHONHOME"

1
neofs-keywords Submodule

@ -0,0 +1 @@
Subproject commit 58650099362f05b51b97f04e2244a4ba20f88351

View file

@ -1,8 +1,8 @@
[tool.isort]
profile = "black"
src_paths = ["pytest_tests"]
line_length = 140
src_paths = ["neofs-keywords", "pytest_tests", "robot"]
line_length = 100
[tool.black]
line-length = 140
target-version = ["py310"]
line-length = 100
target-version = ["py39"]

View file

@ -1,74 +0,0 @@
[pytest]
log_cli = 1
log_cli_level = DEBUG
log_cli_format = %(asctime)s [%(levelname)4s] %(message)s
log_format = %(asctime)s [%(levelname)4s] %(message)s
log_cli_date_format = %Y-%m-%d %H:%M:%S
log_date_format = %H:%M:%S
markers =
# special markers
staging: test to be excluded from run in verifier/pr-validation/sanity jobs and run test in staging job
sanity: test runs in sanity testrun
smoke: test runs in smoke testrun
exclude_sanity: tests which should not be in sanity scope
# controlling markers
order: manual control of test order
logs_after_session: Make the last test in session
# parametrizing markers
container: specify container details for container creation
# functional markers
maintenance: tests for change mode node
container: tests for container creation
grpc_api: standard gRPC API tests
grpc_control: tests related to using frostfs-cli control commands
grpc_object_lock: gRPC lock tests
grpc_without_user: gRPC without user tests
http_gate: HTTP gate contract
http_put: HTTP gate test cases with PUT call
s3_gate: All S3 gate tests
s3_gate_base: Base S3 gate tests
s3_gate_bucket: Bucket S3 gate tests
s3_gate_locking: Locking S3 gate tests
s3_gate_multipart: S3 gate tests with multipart object
s3_gate_object: Object S3 gate tests
s3_gate_tagging: Tagging S3 gate tests
s3_gate_versioning: Versioning S3 gate tests
long: long tests (with long execution time)
node_mgmt: frostfs control commands
session_token: tests for operations with session token
static_session: tests for operations with static session token
bearer: tests for bearer tokens
ape: tests for APE
ape_allow: tests for APE allow rules
ape_deny: tests for APE deny rules
ape_container: tests for APE on container operations
ape_object: tests for APE on object operations
ape_namespace: tests for APE on namespace scope
storage_group: tests for storage groups
failover: tests for system recovery after a failure
failover_panic: tests for system recovery after panic reboot of a node
failover_network: tests for network failure
failover_reboot: tests for system recovery after reboot of a node
interfaces: tests down interface to system
check_binaries: check frostfs installed binaries versions
payments: tests for payment associated operations
load: performance tests
simple: tests with simple characteristics
complex: tests with complex characteristics
aws: AWS related tests
boto3: tests using the boto3
policy: policy tests
failover_baremetal: failover tests on hardware (baremetal)
failover_server: server failover tests
failover_storage: storage failover tests
failover_empty_map: failover tests for an empty map
failover_empty_map_offlne: offline failover tests for an empty map
failover_empty_map_stop_service: failover tests for stopped empty map service
failover_data_loss: failover tests in case of data loss
metabase_loss: tests for metadata loss
write_cache_loss: tests for write cache loss
replication: replication tests
ec_replication: replication EC
static_session_container: tests for a static session in a container
shard: shard management tests
session_logs: check logs messages

View file

@ -1,3 +0,0 @@
import os
TESTS_BASE_PATH = os.path.dirname(os.path.relpath(__file__))

View file

@ -1,17 +0,0 @@
import os
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.storage.dataclasses import ape
from frostfs_testlib.utils import string_utils
def create_bearer_token(frostfs_cli: FrostfsCli, directory: str, cid: str, rule: ape.Rule, endpoint: str) -> str:
chain_file = os.path.join(directory, string_utils.unique_name("chain-", ".json"))
bearer_token_file = os.path.join(directory, string_utils.unique_name("bt-", ".json"))
signed_bearer_token_file = os.path.join(directory, string_utils.unique_name("bt-sign-", ".json"))
frostfs_cli.bearer.generate_ape_override(rule.chain_id, rule=rule.as_string(), cid=cid, output=chain_file)
frostfs_cli.bearer.create(endpoint, bearer_token_file, issued_at=1, expire_at=9999, ape=chain_file)
frostfs_cli.util.sign_bearer_token(bearer_token_file, signed_bearer_token_file)
return signed_bearer_token_file

View file

@ -0,0 +1,73 @@
import logging
import re
from common import NEOFS_ADM_EXEC, NEOFS_CLI_EXEC, WALLET_CONFIG
from neofs_testlib.cli import NeofsAdm, NeofsCli
from neofs_testlib.hosting import Hosting
from neofs_testlib.shell import Shell
logger = logging.getLogger("NeoLogger")
def get_local_binaries_versions(shell: Shell) -> dict[str, str]:
versions = {}
for binary in ["neo-go", "neofs-authmate"]:
out = shell.exec(f"{binary} --version").stdout
versions[binary] = _parse_version(out)
neofs_cli = NeofsCli(shell, NEOFS_CLI_EXEC, WALLET_CONFIG)
versions["neofs-cli"] = _parse_version(neofs_cli.version.get().stdout)
try:
neofs_adm = NeofsAdm(shell, NEOFS_ADM_EXEC)
versions["neofs-adm"] = _parse_version(neofs_adm.version.get().stdout)
except RuntimeError:
logger.info(f"neofs-adm not installed")
out = shell.exec("aws --version").stdout
out_lines = out.split("\n")
versions["AWS"] = out_lines[0] if out_lines else "Unknown"
return versions
def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]:
versions_by_host = {}
for host in hosting.hosts:
binary_path_by_name = {} # Maps binary name to executable path
for service_config in host.config.services:
exec_path = service_config.attributes.get("exec_path")
if exec_path:
binary_path_by_name[service_config.name] = exec_path
for cli_config in host.config.clis:
binary_path_by_name[cli_config.name] = cli_config.exec_path
shell = host.get_shell()
versions_at_host = {}
for binary_name, binary_path in binary_path_by_name.items():
try:
result = shell.exec(f"{binary_path} --version")
versions_at_host[binary_name] = _parse_version(result.stdout)
except Exception as exc:
logger.error(f"Cannot get version for {binary_path} because of\n{exc}")
versions_at_host[binary_name] = "Unknown"
versions_by_host[host.config.address] = versions_at_host
# Consolidate versions across all hosts
versions = {}
for host, binary_versions in versions_by_host.items():
for name, version in binary_versions.items():
captured_version = versions.get(name)
if captured_version:
assert (
captured_version == version
), f"Binary {name} has inconsistent version on host {host}"
else:
versions[name] = version
return versions
def _parse_version(version_output: str) -> str:
version = re.search(r"version[:\s]*v?(.+)", version_output, re.IGNORECASE)
return version.group(1).strip() if version else "Unknown"

View file

@ -1,60 +0,0 @@
import functools
from typing import Optional
from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses import ape
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from ..helpers.object_access import (
can_delete_object,
can_get_head_object,
can_get_object,
can_get_range_hash_of_object,
can_get_range_of_object,
can_put_object,
can_search_object,
)
ALL_OBJECT_OPERATIONS = ape.ObjectOperations.get_all()
FULL_ACCESS = {op: True for op in ALL_OBJECT_OPERATIONS}
NO_ACCESS = {op: False for op in ALL_OBJECT_OPERATIONS}
RO_ACCESS = {
op: True if op not in [ape.ObjectOperations.PUT, ape.ObjectOperations.DELETE, ape.ObjectOperations.PATCH] else False
for op in ALL_OBJECT_OPERATIONS
}
def assert_access_to_container(
access_matrix: dict[ape.ObjectOperations, bool],
wallet: WalletInfo,
cid: str,
oid: str,
file_name: str,
shell: Shell,
cluster: Cluster,
bearer: Optional[str] = None,
xhdr: Optional[dict] = None,
):
endpoint = cluster.default_rpc_endpoint
results: dict = {}
results[ape.ObjectOperations.PUT] = can_put_object(wallet, cid, file_name, shell, cluster, bearer, xhdr)
results[ape.ObjectOperations.HEAD] = can_get_head_object(wallet, cid, oid, shell, endpoint, bearer, xhdr)
results[ape.ObjectOperations.GET_RANGE] = can_get_range_of_object(wallet, cid, oid, shell, endpoint, bearer, xhdr)
results[ape.ObjectOperations.GET_RANGE_HASH] = can_get_range_hash_of_object(wallet, cid, oid, shell, endpoint, bearer, xhdr)
results[ape.ObjectOperations.SEARCH] = can_search_object(wallet, cid, shell, endpoint, oid, bearer, xhdr)
results[ape.ObjectOperations.GET] = can_get_object(wallet, cid, oid, file_name, shell, cluster, bearer, xhdr)
results[ape.ObjectOperations.DELETE] = can_delete_object(wallet, cid, oid, shell, endpoint, bearer, xhdr)
failed_checks = [
f"allowed {action} failed" for action, success in results.items() if not success and access_matrix[action] != results[action]
] + [f"denied {action} succeeded" for action, success in results.items() if success and access_matrix[action] != results[action]]
assert not failed_checks, ", ".join(failed_checks)
assert_full_access_to_container = functools.partial(assert_access_to_container, FULL_ACCESS)
assert_no_access_to_container = functools.partial(assert_access_to_container, NO_ACCESS)
assert_read_only_container = functools.partial(assert_access_to_container, RO_ACCESS)

View file

@ -1,87 +0,0 @@
import json
import time
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.shell.interfaces import Shell
from frostfs_testlib.steps.cli.container import create_container, search_nodes_with_container
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses import ape
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.parallel import parallel
from frostfs_testlib.utils import datetime_utils
from .container_request import ContainerRequest, MultipleContainersRequest
def create_container_with_ape(
container_request: ContainerRequest,
frostfs_cli: FrostfsCli,
wallet: WalletInfo,
shell: Shell,
cluster: Cluster,
endpoint: str,
) -> str:
with reporter.step("Create container"):
cid = _create_container_by_spec(container_request, wallet, shell, cluster, endpoint)
if container_request.ape_rules:
with reporter.step("Apply APE rules for container"):
_apply_ape_rules(cid, frostfs_cli, endpoint, container_request.ape_rules)
with reporter.step("Wait for one block"):
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
with reporter.step("Search nodes holding the container"):
container_holder_nodes = search_nodes_with_container(wallet, cid, shell, cluster.default_rpc_endpoint, cluster)
report_data = {node.id: node.host_ip for node in container_holder_nodes}
reporter.attach(json.dumps(report_data, indent=2), "container_nodes.json")
return cid
@reporter.step("Create multiple containers with APE")
def create_containers_with_ape(
frostfs_cli: FrostfsCli,
wallet: WalletInfo,
shell: Shell,
cluster: Cluster,
endpoint: str,
multiple_containers_request: MultipleContainersRequest,
) -> list[str]:
cids_futures = parallel(create_container_with_ape, multiple_containers_request, frostfs_cli, wallet, shell, cluster, endpoint)
return [future.result() for future in cids_futures]
@reporter.step("Create container by spec {container_request}")
def _create_container_by_spec(
container_request: ContainerRequest,
wallet: WalletInfo,
shell: Shell,
cluster: Cluster,
endpoint: str,
) -> str:
return create_container(
wallet,
shell,
endpoint,
container_request.parsed_rule(cluster),
wait_for_creation=False,
nns_zone=container_request.ns_zone,
nns_name=container_request.ns_name,
)
def _apply_ape_rules(cid: str, frostfs_cli: FrostfsCli, endpoint: str, ape_rules: list[ape.Rule]):
for ape_rule in ape_rules:
rule_str = ape_rule.as_string()
with reporter.step(f"Apply APE rule '{rule_str}' for container {cid}"):
frostfs_cli.ape_manager.add(
endpoint,
ape_rule.chain_id,
target_name=cid,
target_type="container",
rule=rule_str,
)

View file

@ -1,107 +0,0 @@
from dataclasses import dataclass
from functools import partial
import pytest
from frostfs_testlib.steps.cli.container import DEFAULT_PLACEMENT_RULE
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses import ape
APE_EVERYONE_ALLOW_ALL = [ape.Rule(ape.Verb.ALLOW, ape.ObjectOperations.WILDCARD_ALL)]
# In case if we need container operations
# ape.Rule(ape.Verb.ALLOW, ape.ContainerOperations.WILDCARD_ALL)]
APE_OWNER_ALLOW_ALL = [ape.Rule(ape.Verb.ALLOW, ape.ObjectOperations.WILDCARD_ALL, ape.Condition.by_role(ape.Role.OWNER))]
# In case if we need container operations
# ape.Rule(ape.Verb.ALLOW, ape.ContainerOperations.WILDCARD_ALL, ape.Condition.by_role(ape.Role.OWNER))]
@dataclass
class ContainerRequest:
policy: str = None
ape_rules: list[ape.Rule] = None
short_name: str | None = None
ns_name: str | None = None
ns_zone: str | None = None
def __post_init__(self):
if self.ape_rules is None:
self.ape_rules = []
# For pytest instead of ids=[...] everywhere
self.__name__ = self.short_name
def parsed_rule(self, cluster: Cluster):
if self.policy is None:
return None
substitutions = {"%NODE_COUNT%": str(len(cluster.cluster_nodes))}
parsed_rule = self.policy
for sub, replacement in substitutions.items():
parsed_rule = parsed_rule.replace(sub, replacement)
return parsed_rule
def __repr__(self):
if self.short_name:
return self.short_name
spec_info: list[str] = []
if self.policy:
spec_info.append(f"policy='{self.policy}'")
if self.ape_rules:
ape_rules_list = ", ".join([f"'{rule.as_string()}'" for rule in self.ape_rules])
spec_info.append(f"ape_rules=[{ape_rules_list}]")
return f"({', '.join(spec_info)})"
class MultipleContainersRequest(list[ContainerRequest]):
def __init__(self, iterable=None):
"""Override initializer which can accept iterable"""
super(MultipleContainersRequest, self).__init__()
if iterable:
self.extend(iterable)
self.__set_name()
def __set_name(self):
self.__name__ = ", ".join([s.__name__ for s in self])
PUBLIC_WITH_POLICY = partial(ContainerRequest, ape_rules=APE_EVERYONE_ALLOW_ALL, short_name="Custom_policy_with_allow_all_ape_rule")
# REPS
REP_1_1_1 = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
REP_2_1_2 = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
REP_2_1_4 = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
REP_2_2_2 = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
REP_2_2_4 = "REP 2 IN X CBF 2 SELECT 4 FROM * AS X"
#
# Public means it has APE rule which allows everything for everyone
REP_1_1_1_PUBLIC = PUBLIC_WITH_POLICY(REP_1_1_1, short_name="REP 1 CBF 1 SELECT 1 (public)")
REP_2_1_2_PUBLIC = PUBLIC_WITH_POLICY(REP_2_1_2, short_name="REP 2 CBF 1 SELECT 2 (public)")
REP_2_1_4_PUBLIC = PUBLIC_WITH_POLICY(REP_2_1_4, short_name="REP 2 CBF 1 SELECT 4 (public)")
REP_2_2_2_PUBLIC = PUBLIC_WITH_POLICY(REP_2_2_2, short_name="REP 2 CBF 2 SELECT 2 (public)")
REP_2_2_4_PUBLIC = PUBLIC_WITH_POLICY(REP_2_2_4, short_name="REP 2 CBF 2 SELECT 4 (public)")
#
EVERYONE_ALLOW_ALL = ContainerRequest(policy=DEFAULT_PLACEMENT_RULE, ape_rules=APE_EVERYONE_ALLOW_ALL, short_name="Everyone_Allow_All")
OWNER_ALLOW_ALL = ContainerRequest(policy=DEFAULT_PLACEMENT_RULE, ape_rules=APE_OWNER_ALLOW_ALL, short_name="Owner_Allow_All")
PRIVATE = ContainerRequest(policy=DEFAULT_PLACEMENT_RULE, ape_rules=[], short_name="Private_No_APE")
def requires_container(container_request: None | ContainerRequest | list[ContainerRequest] = None) -> pytest.MarkDecorator:
if container_request is None:
container_request = EVERYONE_ALLOW_ALL
if not isinstance(container_request, list):
container_request = [container_request]
return pytest.mark.parametrize("container_request", container_request, indirect=True)

View file

@ -0,0 +1,40 @@
import logging
import re
import allure
from pytest import Config
logger = logging.getLogger("NeoLogger")
@allure.step("Read environment.properties")
def read_env_properties(config: Config) -> dict:
environment_dir = config.getoption("--alluredir")
if not environment_dir:
return None
file_path = f"{environment_dir}/environment.properties"
with open(file_path, "r") as file:
raw_content = file.read()
env_properties = {}
for line in raw_content.split("\n"):
m = re.match("(.*?)=(.*)", line)
if not m:
logger.warning(f"Could not parse env property from {line}")
continue
key, value = m.group(1), m.group(2)
env_properties[key] = value
return env_properties
@allure.step("Update data in environment.properties")
def save_env_properties(config: Config, env_data: dict) -> None:
environment_dir = config.getoption("--alluredir")
if not environment_dir:
return None
file_path = f"{environment_dir}/environment.properties"
with open(file_path, "a+") as env_file:
for env, env_value in env_data.items():
env_file.write(f"{env}={env_value}\n")

View file

@ -0,0 +1,150 @@
import hashlib
import logging
import os
import uuid
from typing import Any, Optional
import allure
from common import ASSETS_DIR, SIMPLE_OBJ_SIZE
logger = logging.getLogger("NeoLogger")
def generate_file(size: int = SIMPLE_OBJ_SIZE) -> str:
"""Generates a binary file with the specified size in bytes.
Args:
size: Size in bytes, can be declared as 6e+6 for example.
Returns:
The path to the generated file.
"""
file_path = f"{os.getcwd()}/{ASSETS_DIR}/{str(uuid.uuid4())}"
with open(file_path, "wb") as file:
file.write(os.urandom(size))
logger.info(f"File with size {size} bytes has been generated: {file_path}")
return file_path
def generate_file_with_content(
file_path: Optional[str] = None,
content: Optional[str] = None,
) -> str:
"""Creates a new file with specified content.
Args:
file_path: Path to the file that should be created. If not specified, then random file
path will be generated.
content: Content that should be stored in the file. If not specified, then random binary
content will be generated.
Returns:
Path to the generated file.
"""
mode = "w+"
if content is None:
content = os.urandom(SIMPLE_OBJ_SIZE)
mode = "wb"
if not file_path:
file_path = f"{os.getcwd()}/{ASSETS_DIR}/{str(uuid.uuid4())}"
else:
if not os.path.exists(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
with open(file_path, mode) as file:
file.write(content)
return file_path
@allure.step("Get File Hash")
def get_file_hash(file_path: str, len: Optional[int] = None) -> str:
"""Generates hash for the specified file.
Args:
file_path: Path to the file to generate hash for.
len: How many bytes to read.
Returns:
Hash of the file as hex-encoded string.
"""
file_hash = hashlib.sha256()
with open(file_path, "rb") as out:
if len:
file_hash.update(out.read(len))
else:
file_hash.update(out.read())
return file_hash.hexdigest()
@allure.step("Concatenation set of files to one file")
def concat_files(file_paths: list, resulting_file_path: Optional[str] = None) -> str:
"""Concatenates several files into a single file.
Args:
file_paths: Paths to the files to concatenate.
resulting_file_name: Path to the file where concatenated content should be stored.
Returns:
Path to the resulting file.
"""
if not resulting_file_path:
resulting_file_path = f"{os.getcwd()}/{ASSETS_DIR}/{str(uuid.uuid4())}"
with open(resulting_file_path, "wb") as f:
for file in file_paths:
with open(file, "rb") as part_file:
f.write(part_file.read())
return resulting_file_path
def split_file(file_path: str, parts: int) -> list[str]:
"""Splits specified file into several specified number of parts.
Each part is saved under name `{original_file}_part_{i}`.
Args:
file_path: Path to the file that should be split.
parts: Number of parts the file should be split into.
Returns:
Paths to the part files.
"""
with open(file_path, "rb") as file:
content = file.read()
content_size = len(content)
chunk_size = int((content_size + parts) / parts)
part_id = 1
part_file_paths = []
for content_offset in range(0, content_size + 1, chunk_size):
part_file_name = f"{file_path}_part_{part_id}"
part_file_paths.append(part_file_name)
with open(part_file_name, "wb") as out_file:
out_file.write(content[content_offset : content_offset + chunk_size])
part_id += 1
return part_file_paths
def get_file_content(file_path: str, content_len: Optional[int] = None, mode: str = "r") -> Any:
"""Returns content of specified file.
Args:
file_path: Path to the file.
content_len: Limit of content length. If None, then entire file content is returned;
otherwise only the first content_len bytes of the content are returned.
mode: Mode of opening the file.
Returns:
Content of the specified file.
"""
with open(file_path, mode) as file:
if content_len:
content = file.read(content_len)
else:
content = file.read()
return content

View file

@ -0,0 +1,21 @@
import re
# Regex patterns of status codes of Container service (https://github.com/nspcc-dev/neofs-spec/blob/98b154848116223e486ce8b43eaa35fec08b4a99/20-api-v2/container.md)
CONTAINER_NOT_FOUND = "code = 3072.*message = container not found"
# Regex patterns of status codes of Object service (https://github.com/nspcc-dev/neofs-spec/blob/98b154848116223e486ce8b43eaa35fec08b4a99/20-api-v2/object.md)
OBJECT_ACCESS_DENIED = "code = 2048.*message = access to object operation denied"
OBJECT_NOT_FOUND = "code = 2049.*message = object not found"
OBJECT_ALREADY_REMOVED = "code = 2052.*message = object already removed"
SESSION_NOT_FOUND = "code = 4096.*message = session token not found"
def error_matches_status(error: Exception, status_pattern: str) -> bool:
"""
Determines whether exception matches specified status pattern.
We use re.search to be consistent with pytest.raises.
"""
match = re.search(status_pattern, str(error))
return match is not None

View file

@ -0,0 +1,13 @@
from neofs_testlib.shell import Shell
class IpTablesHelper:
@staticmethod
def drop_input_traffic_to_port(shell: Shell, ports: list[str]) -> None:
for port in ports:
shell.exec(f"sudo iptables -A INPUT -p tcp --dport {port} -j DROP")
@staticmethod
def restore_input_traffic_to_port(shell: Shell, ports: list[str]) -> None:
for port in ports:
shell.exec(f"sudo iptables -D INPUT -p tcp --dport {port} -j DROP")

216
pytest_tests/helpers/k6.py Normal file
View file

@ -0,0 +1,216 @@
import re
from contextlib import contextmanager
from dataclasses import dataclass
from time import sleep
import allure
from pytest_tests.helpers.remote_process import RemoteProcess
from pytest_tests.helpers.ssh_helper import HostClient
EXIT_RESULT_CODE = 0
@dataclass
class LoadParams:
obj_size: int
containers_count: int
out_file: str
obj_count: int
writers: int
readers: int
deleters: int
load_time: int
load_type: str
endpoint: str
@dataclass
class LoadResults:
latency_write_min: float = 0.0
latency_write_max: float = 0.0
latency_write_med: float = 0.0
latency_write_avg: float = 0.0
latency_read_min: float = 0.0
latency_read_max: float = 0.0
latency_read_med: float = 0.0
latency_read_avg: float = 0.0
read_ops: float = 0.0
write_ops: float = 0.0
class K6:
def __init__(self, load_params: LoadParams, host_client: HostClient):
self.load_params = load_params
self.host_client = host_client
self._k6_dir = None
self._k6_result = None
self._k6_process = None
self._k6_stop_attempts = 5
self._k6_stop_timeout = 15
@property
def process_dir(self) -> str:
return self._k6_process.process_dir
@property
def k6_dir(self) -> str:
if not self._k6_dir:
self._k6_dir = self.host_client.exec("locate -l 1 'k6'").stdout.strip("\n")
return self._k6_dir
@allure.step("Prepare containers and objects")
def prepare(self) -> str:
self._k6_dir = self.k6_dir
if self.load_params.load_type == "http" or self.load_params.load_type == "grpc":
command = (
f"{self.k6_dir}/scenarios/preset/preset_grpc.py "
f"--size {self.load_params.obj_size} "
f"--containers {self.load_params.containers_count} "
f"--out {self.k6_dir}/{self.load_params.load_type}_{self.load_params.out_file} "
f"--endpoint {self.load_params.endpoint.split(',')[0]} "
f"--preload_obj {self.load_params.obj_count} "
)
terminal = self.host_client.exec(command)
return terminal.stdout.strip("\n")
elif self.load_params.load_type == "s3":
command = (
f"{self.k6_dir}/scenarios/preset/preset_s3.py --size {self.load_params.obj_size} "
f"--buckets {self.load_params.containers_count} "
f"--out {self.k6_dir}/{self.load_params.load_type}_{self.load_params.out_file} "
f"--endpoint {self.load_params.endpoint.split(',')[0]} "
f"--preload_obj {self.load_params.obj_count} "
f"--location load-1-1"
)
terminal = self.host_client.exec(command)
return terminal.stdout.strip("\n")
else:
raise AssertionError("Wrong K6 load type")
@allure.step("Start K6 on initiator")
def start(self) -> None:
self._k6_dir = self.k6_dir
command = (
f"{self.k6_dir}/k6 run "
f"-e DURATION={self.load_params.load_time} "
f"-e WRITE_OBJ_SIZE={self.load_params.obj_size} "
f"-e WRITERS={self.load_params.writers} -e READERS={self.load_params.readers} "
f"-e DELETERS={self.load_params.deleters} "
f"-e {self.load_params.load_type.upper()}_ENDPOINTS={self.load_params.endpoint} "
f"-e PREGEN_JSON={self.k6_dir}/"
f"{self.load_params.load_type}_{self.load_params.out_file} "
f"{self.k6_dir}/scenarios/{self.load_params.load_type}.js"
)
self._k6_process = RemoteProcess.create(command, self.host_client)
@allure.step("Wait until K6 is finished")
def wait_until_finished(self, timeout: int = 0, k6_should_be_running: bool = False) -> None:
if self._k6_process is None:
assert "No k6 instances were executed"
if k6_should_be_running:
assert self._k6_process.running(), "k6 should be running."
for __attempt in reversed(range(5)) if timeout else [0]:
if not self._k6_process.running():
return
if __attempt: # no sleep in last iteration
sleep(int(timeout / 5))
self._stop_k6()
raise TimeoutError(f"Expected K6 finished in {timeout} sec.")
@contextmanager
def start_context(
self, warm_up_time: int = 0, expected_finish: bool = False, expected_fail: bool = False
) -> None:
self.start()
sleep(warm_up_time)
try:
yield self
except Exception as err:
if self._k6_process.running():
self._kill_k6()
raise
if expected_fail:
self._kill_k6()
elif expected_finish:
if self._k6_process.running():
self._kill_k6()
raise AssertionError("K6 has not finished in expected time")
else:
self._k6_should_be_finished()
else:
self._stop_k6()
@allure.step("Get K6 results")
def get_k6_results(self) -> None:
self.__log_k6_output()
@allure.step("Assert K6 should be finished")
def _k6_should_be_finished(self) -> None:
k6_rc = self._k6_process.rc()
assert k6_rc == 0, f"K6 unexpectedly finished with RC {k6_rc}"
@allure.step("Terminate K6 on initiator")
def stop(self) -> None:
if not self._k6_process.running():
raise AssertionError("K6 unexpectedly finished")
self._stop_k6()
k6_rc = self._k6_process.rc()
assert k6_rc == EXIT_RESULT_CODE, f"Return code of K6 job should be 0, but {k6_rc}"
def check_k6_is_running(self) -> bool:
if self._k6_process:
return self._k6_process.running()
return False
@property
def is_finished(self) -> bool:
return not self._k6_process.running()
def parsing_results(self) -> LoadResults:
output = self._k6_process.stdout(full=True).replace("\n", "")
metric_regex_map = {
"latency_write_min": r"neofs_obj_put_duration.*?min=(?P<latency_write_min>\d*\.\d*)",
"latency_write_max": r"neofs_obj_put_duration.*?max=(?P<latency_write_max>\d*\.\d*)",
"latency_write_med": r"neofs_obj_put_duration.*?med=(?P<latency_write_med>\d*\.\d*)",
"latency_write_avg": r"neofs_obj_put_duration.*?avg=(?P<latency_write_avg>\d*\.\d*)",
"write_ops": r"neofs_obj_put_total\W*\d*\W*(?P<write_ops>\d*\.\d*)",
"latency_read_min": r"neofs_obj_get_duration.*?min=(?P<latency_read_min>\d*\.\d*)",
"latency_read_max": r"neofs_obj_get_duration.*?max=(?P<latency_read_max>\d*\.\d*)",
"latency_read_med": r"neofs_obj_get_duration.*?med=(?P<latency_read_med>\d*\.\d*)",
"latency_read_avg": r"neofs_obj_get_duration.*?avg=(?P<latency_read_avg>\d*\.\d*)",
"read_ops": r"neofs_obj_get_total\W*\d*\W*(?P<read_ops>\d*\.\d*)",
}
metric_values = {}
for metric_name, metric_regex in metric_regex_map.items():
match = re.search(metric_regex, output)
if match:
metric_values[metric_name] = float(match.group(metric_name))
continue
metric_values[metric_name] = 0.0
load_result = LoadResults(**metric_values)
return load_result
@allure.step("Try to stop K6 with SIGTERM")
def _stop_k6(self) -> None:
for __attempt in range(self._k6_stop_attempts):
if not self._k6_process.running():
break
self._k6_process.stop()
sleep(self._k6_stop_timeout)
else:
raise AssertionError("Can not stop K6 process within timeout")
def _kill_k6(self) -> None:
self._k6_process.kill()
@allure.step("Log K6 output")
def __log_k6_output(self) -> None:
allure.attach(self._k6_process.stdout(full=True), "K6 output", allure.attachment_type.TEXT)

View file

@ -1,218 +0,0 @@
from typing import Optional
from frostfs_testlib import reporter
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
from frostfs_testlib.resources.error_patterns import OBJECT_ACCESS_DENIED
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import (
delete_object,
get_object_from_random_node,
get_range,
get_range_hash,
head_object,
put_object_to_random_node,
search_object,
)
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.utils import string_utils
from frostfs_testlib.utils.file_utils import get_file_hash
OPERATION_ERROR_TYPE = RuntimeError
def can_get_object(
wallet: WalletInfo,
cid: str,
oid: str,
file_name: str,
shell: Shell,
cluster: Cluster,
bearer: Optional[str] = None,
xhdr: Optional[dict] = None,
) -> bool:
with reporter.step("Try get object from container"):
try:
got_file_path = get_object_from_random_node(
wallet,
cid,
oid,
bearer=bearer,
xhdr=xhdr,
shell=shell,
cluster=cluster,
)
except OPERATION_ERROR_TYPE as err:
assert string_utils.is_str_match_pattern(err, OBJECT_ACCESS_DENIED), f"Expected {err} to match {OBJECT_ACCESS_DENIED}"
return False
assert get_file_hash(file_name) == get_file_hash(got_file_path)
return True
def can_put_object(
wallet: WalletInfo,
cid: str,
file_name: str,
shell: Shell,
cluster: Cluster,
bearer: Optional[str] = None,
xhdr: Optional[dict] = None,
attributes: Optional[dict] = None,
) -> bool:
with reporter.step("Try put object to container"):
try:
put_object_to_random_node(
wallet,
file_name,
cid,
bearer=bearer,
xhdr=xhdr,
attributes=attributes,
shell=shell,
cluster=cluster,
)
except OPERATION_ERROR_TYPE as err:
assert string_utils.is_str_match_pattern(err, OBJECT_ACCESS_DENIED), f"Expected {err} to match {OBJECT_ACCESS_DENIED}"
return False
return True
def can_delete_object(
wallet: WalletInfo,
cid: str,
oid: str,
shell: Shell,
endpoint: str,
bearer: Optional[str] = None,
xhdr: Optional[dict] = None,
) -> bool:
with reporter.step("Try delete object from container"):
try:
delete_object(
wallet,
cid,
oid,
bearer=bearer,
xhdr=xhdr,
shell=shell,
endpoint=endpoint,
)
except OPERATION_ERROR_TYPE as err:
assert string_utils.is_str_match_pattern(err, OBJECT_ACCESS_DENIED), f"Expected {err} to match {OBJECT_ACCESS_DENIED}"
return False
return True
def can_get_head_object(
wallet: WalletInfo,
cid: str,
oid: str,
shell: Shell,
endpoint: str,
bearer: Optional[str] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> bool:
with reporter.step("Try get head of object"):
try:
head_object(
wallet,
cid,
oid,
bearer=bearer,
xhdr=xhdr,
shell=shell,
endpoint=endpoint,
timeout=timeout,
)
except OPERATION_ERROR_TYPE as err:
assert string_utils.is_str_match_pattern(err, OBJECT_ACCESS_DENIED), f"Expected {err} to match {OBJECT_ACCESS_DENIED}"
return False
return True
def can_get_range_of_object(
wallet: WalletInfo,
cid: str,
oid: str,
shell: Shell,
endpoint: str,
bearer: Optional[str] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> bool:
with reporter.step("Try get range of object"):
try:
get_range(
wallet,
cid,
oid,
bearer=bearer,
range_cut="0:10",
xhdr=xhdr,
shell=shell,
endpoint=endpoint,
timeout=timeout,
)
except OPERATION_ERROR_TYPE as err:
assert string_utils.is_str_match_pattern(err, OBJECT_ACCESS_DENIED), f"Expected {err} to match {OBJECT_ACCESS_DENIED}"
return False
return True
def can_get_range_hash_of_object(
wallet: WalletInfo,
cid: str,
oid: str,
shell: Shell,
endpoint: str,
bearer: Optional[str] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> bool:
with reporter.step("Try get range hash of object"):
try:
get_range_hash(
wallet,
cid,
oid,
bearer=bearer,
range_cut="0:10",
xhdr=xhdr,
shell=shell,
endpoint=endpoint,
timeout=timeout,
)
except OPERATION_ERROR_TYPE as err:
assert string_utils.is_str_match_pattern(err, OBJECT_ACCESS_DENIED), f"Expected {err} to match {OBJECT_ACCESS_DENIED}"
return False
return True
def can_search_object(
wallet: WalletInfo,
cid: str,
shell: Shell,
endpoint: str,
oid: Optional[str] = None,
bearer: Optional[str] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> bool:
with reporter.step("Try search object in container"):
try:
oids = search_object(
wallet,
cid,
bearer=bearer,
xhdr=xhdr,
shell=shell,
endpoint=endpoint,
timeout=timeout,
)
except OPERATION_ERROR_TYPE as err:
assert string_utils.is_str_match_pattern(err, OBJECT_ACCESS_DENIED), f"Expected {err} to match {OBJECT_ACCESS_DENIED}"
return False
if oid:
return oid in oids
return True

View file

@ -1,26 +0,0 @@
from frostfs_testlib.shell.interfaces import Shell
from frostfs_testlib.steps.cli.container import get_container
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetmapInfo
from ..helpers.utility import placement_policy_from_container
def validate_object_policy(wallet: str, shell: Shell, placement_rule: str, cid: str, endpoint: str):
got_policy = placement_policy_from_container(get_container(wallet, cid, shell, endpoint, False))
assert got_policy.replace("'", "") == placement_rule.replace(
"'", ""
), f"Expected \n{placement_rule} and got policy \n{got_policy} are the same"
def get_netmap_param(netmap_info: list[NodeNetmapInfo]) -> dict:
dict_external = dict()
for node in netmap_info:
dict_external[node.node] = {
"country": node.country,
"country_code": node.country_code,
"Price": node.price,
"continent": node.continent,
"un_locode": node.un_locode,
"location": node.location,
}
return dict_external

View file

@ -0,0 +1,191 @@
from __future__ import annotations
import uuid
from typing import Optional
import allure
# This file is broken, because tenacity is not registered in requirements.txt
# So, the file won't be fixed in scope of this PR, alipay will fix himself by
# switching RemoteProcess and K6 classes from HostClient to shell from hosting
from tenacity import retry, stop_after_attempt, wait_fixed
from pytest_tests.helpers.ssh_helper import HostClient
class RemoteProcess:
def __init__(self, cmd: str, process_dir: str, host_client: HostClient):
self.process_dir = process_dir
self.cmd = cmd
self.stdout_last_line_number = 0
self.stderr_last_line_number = 0
self.pid: Optional[str] = None
self.proc_rc: Optional[int] = None
self.saved_stdout: Optional[str] = None
self.saved_stderr: Optional[str] = None
self.host_client = host_client
@classmethod
@allure.step("Create remote process")
def create(cls, command: str, host_client: HostClient) -> RemoteProcess:
"""
Create a process on a remote host.
Created dir for process with following files:
command.sh: script to execute
pid: contains process id
rc: contains script return code
stderr: contains script errors
stdout: contains script output
Args:
host_client: Host client instance
command: command to be run on a remote host
Returns:
RemoteProcess instance for further examination
"""
remote_process = cls(
cmd=command, process_dir=f"/tmp/proc_{uuid.uuid4()}", host_client=host_client
)
remote_process._create_process_dir()
remote_process._generate_command_script(command)
remote_process._start_process()
remote_process.pid = remote_process._get_pid()
return remote_process
@allure.step("Get process stdout")
def stdout(self, full: bool = False) -> str:
"""
Method to get process stdout, either fresh info or full.
Args:
full: returns full stdout that we have to this moment
Returns:
Fresh stdout. By means of stdout_last_line_number only new stdout lines are returned.
If process is finished (proc_rc is not None) saved stdout is returned
"""
if self.saved_stdout is not None:
cur_stdout = self.saved_stdout
else:
terminal = self.host_client.exec(f"cat {self.process_dir}/stdout")
if self.proc_rc is not None:
self.saved_stdout = terminal.stdout
cur_stdout = terminal.stdout
if full:
return cur_stdout
whole_stdout = cur_stdout.split("\n")
if len(whole_stdout) > self.stdout_last_line_number:
resulted_stdout = "\n".join(whole_stdout[self.stdout_last_line_number :])
self.stdout_last_line_number = len(whole_stdout)
return resulted_stdout
return ""
@allure.step("Get process stderr")
def stderr(self, full: bool = False) -> str:
"""
Method to get process stderr, either fresh info or full.
Args:
full: returns full stderr that we have to this moment
Returns:
Fresh stderr. By means of stderr_last_line_number only new stderr lines are returned.
If process is finished (proc_rc is not None) saved stderr is returned
"""
if self.saved_stderr is not None:
cur_stderr = self.saved_stderr
else:
terminal = self.host_client.exec(f"cat {self.process_dir}/stderr")
if self.proc_rc is not None:
self.saved_stderr = terminal.stdout
cur_stderr = terminal.stdout
if full:
return cur_stderr
whole_stderr = cur_stderr.split("\n")
if len(whole_stderr) > self.stderr_last_line_number:
resulted_stderr = "\n".join(whole_stderr[self.stderr_last_line_number :])
self.stderr_last_line_number = len(whole_stderr)
return resulted_stderr
return ""
@allure.step("Get process rc")
def rc(self) -> Optional[int]:
if self.proc_rc is not None:
return self.proc_rc
terminal = self.host_client.exec(f"cat {self.process_dir}/rc", verify=False)
if "No such file or directory" in terminal.stderr:
return None
elif terminal.stderr or terminal.rc != 0:
raise AssertionError(f"cat process rc was not successfull: {terminal.stderr}")
self.proc_rc = int(terminal.stdout)
return self.proc_rc
@allure.step("Check if process is running")
def running(self) -> bool:
return self.rc() is None
@allure.step("Send signal to process")
def send_signal(self, signal: int) -> None:
kill_res = self.host_client.exec(f"kill -{signal} {self.pid}", verify=False)
if "No such process" in kill_res.stderr:
return
if kill_res.rc:
raise AssertionError(f"Signal {signal} not sent. Return code of kill: {kill_res.rc}")
@allure.step("Stop process")
def stop(self) -> None:
self.send_signal(15)
@allure.step("Kill process")
def kill(self) -> None:
self.send_signal(9)
@allure.step("Clear process directory")
def clear(self) -> None:
if self.process_dir == "/":
raise AssertionError(f"Invalid path to delete: {self.process_dir}")
self.host_client.exec(f"rm -rf {self.process_dir}")
@allure.step("Start remote process")
def _start_process(self) -> None:
self.host_client.exec(
f"nohup {self.process_dir}/command.sh </dev/null "
f">{self.process_dir}/stdout "
f"2>{self.process_dir}/stderr &"
)
@allure.step("Create process directory")
def _create_process_dir(self) -> None:
self.host_client.exec(f"mkdir {self.process_dir}; chmod 777 {self.process_dir}")
terminal = self.host_client.exec(f"realpath {self.process_dir}")
self.process_dir = terminal.stdout.strip()
@allure.step("Get pid")
@retry(wait=wait_fixed(10), stop=stop_after_attempt(5), reraise=True)
def _get_pid(self) -> str:
terminal = self.host_client.exec(f"cat {self.process_dir}/pid")
assert terminal.stdout, f"invalid pid: {terminal.stdout}"
return terminal.stdout.strip()
@allure.step("Generate command script")
def _generate_command_script(self, command: str) -> None:
command = command.replace('"', '\\"').replace("\\", "\\\\")
script = (
f"#!/bin/bash\n"
f"cd {self.process_dir}\n"
f"{command} &\n"
f"pid=\$!\n"
f"cd {self.process_dir}\n"
f"echo \$pid > {self.process_dir}/pid\n"
f"wait \$pid\n"
f"echo $? > {self.process_dir}/rc"
)
self.host_client.exec(f'echo "{script}" > {self.process_dir}/command.sh')
self.host_client.exec(f"cat {self.process_dir}/command.sh")
self.host_client.exec(f"chmod +x {self.process_dir}/command.sh")

View file

@ -0,0 +1,118 @@
import os
from typing import Optional
import allure
import s3_gate_bucket
import s3_gate_object
@allure.step("Expected all objects are presented in the bucket")
def check_objects_in_bucket(
s3_client, bucket, expected_objects: list, unexpected_objects: Optional[list] = None
) -> None:
unexpected_objects = unexpected_objects or []
bucket_objects = s3_gate_object.list_objects_s3(s3_client, bucket)
assert len(bucket_objects) == len(
expected_objects
), f"Expected {len(expected_objects)} objects in the bucket"
for bucket_object in expected_objects:
assert (
bucket_object in bucket_objects
), f"Expected object {bucket_object} in objects list {bucket_objects}"
for bucket_object in unexpected_objects:
assert (
bucket_object not in bucket_objects
), f"Expected object {bucket_object} not in objects list {bucket_objects}"
@allure.step("Try to get object and got error")
def try_to_get_objects_and_expect_error(s3_client, bucket: str, object_keys: list) -> None:
for obj in object_keys:
try:
s3_gate_object.get_object_s3(s3_client, bucket, obj)
raise AssertionError(f"Object {obj} found in bucket {bucket}")
except Exception as err:
assert "The specified key does not exist" in str(
err
), f"Expected error in exception {err}"
@allure.step("Set versioning enable for bucket")
def set_bucket_versioning(s3_client, bucket: str, status: s3_gate_bucket.VersioningStatus):
s3_gate_bucket.get_bucket_versioning_status(s3_client, bucket)
s3_gate_bucket.set_bucket_versioning(s3_client, bucket, status=status)
bucket_status = s3_gate_bucket.get_bucket_versioning_status(s3_client, bucket)
assert bucket_status == status.value, f"Expected {bucket_status} status. Got {status.value}"
def object_key_from_file_path(full_path: str) -> str:
return os.path.basename(full_path)
def assert_tags(
actual_tags: list, expected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None
) -> None:
expected_tags = (
[{"Key": key, "Value": value} for key, value in expected_tags] if expected_tags else []
)
unexpected_tags = (
[{"Key": key, "Value": value} for key, value in unexpected_tags] if unexpected_tags else []
)
if expected_tags == []:
assert not actual_tags, f"Expected there is no tags, got {actual_tags}"
assert len(expected_tags) == len(actual_tags)
for tag in expected_tags:
assert tag in actual_tags, f"Tag {tag} must be in {actual_tags}"
for tag in unexpected_tags:
assert tag not in actual_tags, f"Tag {tag} should not be in {actual_tags}"
@allure.step("Expected all tags are presented in object")
def check_tags_by_object(
s3_client,
bucket: str,
key_name: str,
expected_tags: list,
unexpected_tags: Optional[list] = None,
) -> None:
actual_tags = s3_gate_object.get_object_tagging(s3_client, bucket, key_name)
assert_tags(
expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags
)
@allure.step("Expected all tags are presented in bucket")
def check_tags_by_bucket(
s3_client, bucket: str, expected_tags: list, unexpected_tags: Optional[list] = None
) -> None:
actual_tags = s3_gate_bucket.get_bucket_tagging(s3_client, bucket)
assert_tags(
expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags
)
def assert_object_lock_mode(
s3_client,
bucket: str,
file_name: str,
object_lock_mode: str,
retain_untile_date,
legal_hold_status: str,
):
object_dict = s3_gate_object.get_object_s3(s3_client, bucket, file_name, full_output=True)
assert (
object_dict.get("ObjectLockMode") == object_lock_mode
), f"Expected Object Lock Mode is {object_lock_mode}"
assert (
object_dict.get("ObjectLockLegalHoldStatus") == legal_hold_status
), f"Expected Object Lock Legal Hold Status is {legal_hold_status}"
object_retain_date = object_dict.get("ObjectLockRetainUntilDate")
retain_date = (
object_retain_date
if isinstance(object_retain_date, str)
else object_retain_date.strftime("%Y-%m-%dT%H:%M:%S")
)
assert str(retain_untile_date.strftime("%Y-%m-%dT%H:%M:%S")) in str(
retain_date
), f'Expected Object Lock Retain Until Date is {str(retain_untile_date.strftime("%Y-%m-%dT%H:%M:%S"))}'

View file

@ -1,8 +1,33 @@
import time
from frostfs_testlib import reporter
from frostfs_testlib.resources.common import STORAGE_GC_TIME
from frostfs_testlib.utils import datetime_utils
import allure
from common import STORAGE_GC_TIME
def parse_time(value: str) -> int:
"""Converts time interval in text form into time interval as number of seconds.
Args:
value: time interval as text.
Returns:
Number of seconds in the parsed time interval.
"""
value = value.lower()
for suffix in ["s", "sec"]:
if value.endswith(suffix):
return int(value[: -len(suffix)])
for suffix in ["m", "min"]:
if value.endswith(suffix):
return int(value[: -len(suffix)]) * 60
for suffix in ["h", "hr", "hour"]:
if value.endswith(suffix):
return int(value[: -len(suffix)]) * 60 * 60
raise ValueError(f"Unknown units in time value '{value}'")
def placement_policy_from_container(container_info: str) -> str:
@ -22,7 +47,7 @@ def placement_policy_from_container(container_info: str) -> str:
FILTER Country EQ Sweden AS LOC_SW
Args:
container_info: output from frostfs-cli container get command
container_info: output from neofs-cli container get command
Returns:
placement policy as a string
@ -32,19 +57,6 @@ def placement_policy_from_container(container_info: str) -> str:
def wait_for_gc_pass_on_storage_nodes() -> None:
wait_time = datetime_utils.parse_time(STORAGE_GC_TIME)
with reporter.step(f"Wait {wait_time}s until GC completes on storage nodes"):
wait_time = parse_time(STORAGE_GC_TIME)
with allure.step(f"Wait {wait_time}s until GC completes on storage nodes"):
time.sleep(wait_time)
def are_numbers_similar(num1, num2, tolerance_percentage: float = 1.0):
"""
if difference of numbers is less than permissible deviation than numbers are similar
"""
# Calculate the permissible deviation
average = (num1 + num2) / 2
tolerance = average * (tolerance_percentage / 100)
# Calculate the real difference
difference = abs(num1 - num2)
return difference <= tolerance

28
pytest_tests/pytest.ini Normal file
View file

@ -0,0 +1,28 @@
[pytest]
log_cli = 1
log_cli_level = DEBUG
log_cli_format = %(asctime)s [%(levelname)4s] %(message)s
log_format = %(asctime)s [%(levelname)4s] %(message)s
log_cli_date_format = %Y-%m-%d %H:%M:%S
log_date_format = %H:%M:%S
markers =
# special markers
sanity: small tests subset
staging: test to be excluded from run in verifier/pr-validation/sanity jobs and run test in staging job
# functional markers
container: tests for container creation
grpc_api: standard gRPC API tests
http_gate: HTTP gate contract
s3_gate: S3 gate tests
curl: tests for HTTP gate with curl utility
long: long tests (with long execution time)
node_mgmt: neofs control commands
session_token: tests for operations with session token
acl: tests for basic and extended ACL
storage_group: tests for storage groups
failover: tests for system recovery after a failure
failover_panic: tests for system recovery after panic reboot of a node
failover_net: tests for network failure
add_nodes: add nodes to cluster
check_binaries: check neofs installed binaries versions
payments: tests for payment associated operations

View file

@ -1 +1,68 @@
-r ../requirements.txt
aiodns==3.0.0
aiohttp==3.7.4.post0
aioresponses==0.7.2
allure-pytest==2.9.45
allure-python-commons==2.9.45
async-timeout==3.0.1
asynctest==0.13.0
attrs==21.4.0
base58==2.1.0
bitarray==2.3.4
black==22.8.0
boto3==1.16.33
botocore==1.19.33
certifi==2022.5.18
cffi==1.15.0
chardet==4.0.0
charset-normalizer==2.0.12
coverage==6.3.3
docker==4.4.0
docutils==0.17.1
Events==0.4
flake8==4.0.1
idna==3.3
iniconfig==1.1.1
isort==5.10.1
jmespath==0.10.0
jsonschema==4.5.1
lz4==3.1.3
mccabe==0.6.1
mmh3==3.0.0
multidict==6.0.2
mypy==0.950
mypy-extensions==0.4.3
neo-mamba==0.10.0
neo3crypto==0.2.1
neo3vm==0.9.0
neo3vm-stubs==0.9.0
neofs-testlib==0.2.1
netaddr==0.8.0
orjson==3.6.8
packaging==21.3
paramiko==2.10.3
pexpect==4.8.0
pluggy==1.0.0
pre-commit==2.20.0
ptyprocess==0.7.0
py==1.11.0
pybiginteger==1.2.6
pybiginteger-stubs==1.2.6
pycares==4.1.2
pycodestyle==2.8.0
pycparser==2.21
pycryptodome==3.11.0
pyflakes==2.4.0
pyparsing==3.0.9
pyrsistent==0.18.1
pytest==7.1.2
python-dateutil==2.8.2
requests==2.28.0
robotframework==4.1.2
s3transfer==0.3.7
six==1.16.0
tenacity==8.0.1
tomli==2.0.1
typing-extensions==4.2.0
urllib3==1.26.9
websocket-client==1.3.2
yarl==1.7.2

View file

@ -1,8 +0,0 @@
import os
from .. import TESTS_BASE_PATH
TEST_CYCLES_COUNT = int(os.getenv("TEST_CYCLES_COUNT", "1"))
DEVENV_PATH = os.getenv("DEVENV_PATH", os.path.join("..", "frostfs-dev-env"))
S3_POLICY_FILE_LOCATION = os.path.join(TESTS_BASE_PATH, "resources/files/policy.json")

View file

@ -1,6 +0,0 @@
{
"rep-3": "REP 3",
"rep-1": "REP 1",
"complex": "REP 1 IN X CBF 1 SELECT 1 FROM * AS X",
"ec3.1": "EC 3.1 CBF 1 SELECT 4 FROM *"
}

View file

@ -1,4 +0,0 @@
NOT_PARSE_POLICY = "can't parse placement policy"
NOT_ENOUGH_TO_SELECT = "selector is not enough"
NOT_FOUND_FILTER = "filter not found"
NOT_FOUND_SELECTOR = "selector not found"

View file

@ -0,0 +1,475 @@
import json
import logging
import os
from datetime import datetime
from typing import Optional
import allure
from cli_helpers import _cmd_run
from common import ASSETS_DIR, S3_GATE
logger = logging.getLogger("NeoLogger")
REGULAR_TIMEOUT = 90
LONG_TIMEOUT = 240
class AwsCliClient:
# Flags that we use for all S3 commands: disable SSL verification (as we use self-signed
# certificate in devenv) and disable automatic pagination in CLI output
common_flags = "--no-verify-ssl --no-paginate"
def create_bucket(
self,
Bucket: str,
ObjectLockEnabledForBucket: Optional[bool] = None,
ACL: Optional[str] = None,
GrantFullControl: Optional[str] = None,
GrantRead: Optional[str] = None,
GrantWrite: Optional[str] = None,
CreateBucketConfiguration: Optional[dict] = None,
):
if ObjectLockEnabledForBucket is None:
object_lock = ""
elif ObjectLockEnabledForBucket:
object_lock = " --object-lock-enabled-for-bucket"
else:
object_lock = " --no-object-lock-enabled-for-bucket"
cmd = (
f"aws {self.common_flags} s3api create-bucket --bucket {Bucket} "
f"{object_lock} --endpoint {S3_GATE}"
)
if ACL:
cmd += f" --acl {ACL}"
if GrantFullControl:
cmd += f" --grant-full-control {GrantFullControl}"
if GrantWrite:
cmd += f" --grant-write {GrantWrite}"
if GrantRead:
cmd += f" --grant-read {GrantRead}"
if CreateBucketConfiguration:
cmd += f" --create-bucket-configuration LocationConstraint={CreateBucketConfiguration['LocationConstraint']}"
_cmd_run(cmd, REGULAR_TIMEOUT)
def list_buckets(self) -> dict:
cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {S3_GATE}"
output = _cmd_run(cmd)
return self._to_json(output)
def get_bucket_acl(self, Bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api get-bucket-acl --bucket {Bucket} "
f"--endpoint {S3_GATE}"
)
output = _cmd_run(cmd, REGULAR_TIMEOUT)
return self._to_json(output)
def get_bucket_versioning(self, Bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api get-bucket-versioning --bucket {Bucket} "
f"--endpoint {S3_GATE}"
)
output = _cmd_run(cmd, REGULAR_TIMEOUT)
return self._to_json(output)
def put_bucket_versioning(self, Bucket: str, VersioningConfiguration: dict) -> dict:
cmd = (
f"aws {self.common_flags} s3api put-bucket-versioning --bucket {Bucket} "
f'--versioning-configuration Status={VersioningConfiguration.get("Status")} '
f"--endpoint {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def list_objects(self, Bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api list-objects --bucket {Bucket} " f"--endpoint {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def list_objects_v2(self, Bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api list-objects-v2 --bucket {Bucket} "
f"--endpoint {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def list_object_versions(self, Bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api list-object-versions --bucket {Bucket} "
f"--endpoint {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def copy_object(
self,
Bucket: str,
CopySource: str,
Key: str,
ACL: Optional[str] = None,
MetadataDirective: Optional[str] = None,
Metadata: Optional[dict] = None,
TaggingDirective: Optional[str] = None,
Tagging: Optional[str] = None,
) -> dict:
cmd = (
f"aws {self.common_flags} s3api copy-object --copy-source {CopySource} "
f"--bucket {Bucket} --key {Key} --endpoint {S3_GATE}"
)
if ACL:
cmd += f" --acl {ACL}"
if MetadataDirective:
cmd += f" --metadata-directive {MetadataDirective}"
if Metadata:
cmd += " --metadata "
for key, value in Metadata.items():
cmd += f" {key}={value}"
if TaggingDirective:
cmd += f" --tagging-directive {TaggingDirective}"
if Tagging:
cmd += f" --tagging {Tagging}"
output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output)
def head_bucket(self, Bucket: str) -> dict:
cmd = f"aws {self.common_flags} s3api head-bucket --bucket {Bucket} --endpoint {S3_GATE}"
output = _cmd_run(cmd)
return self._to_json(output)
def put_object(
self,
Body: str,
Bucket: str,
Key: str,
Metadata: Optional[dict] = None,
Tagging: Optional[str] = None,
ACL: Optional[str] = None,
ObjectLockMode: Optional[str] = None,
ObjectLockRetainUntilDate: Optional[datetime] = None,
ObjectLockLegalHoldStatus: Optional[str] = None,
GrantFullControl: Optional[str] = None,
GrantRead: Optional[str] = None,
) -> dict:
cmd = (
f"aws {self.common_flags} s3api put-object --bucket {Bucket} --key {Key} "
f"--body {Body} --endpoint {S3_GATE}"
)
if Metadata:
cmd += f" --metadata"
for key, value in Metadata.items():
cmd += f" {key}={value}"
if Tagging:
cmd += f" --tagging '{Tagging}'"
if ACL:
cmd += f" --acl {ACL}"
if ObjectLockMode:
cmd += f" --object-lock-mode {ObjectLockMode}"
if ObjectLockRetainUntilDate:
cmd += f' --object-lock-retain-until-date "{ObjectLockRetainUntilDate}"'
if ObjectLockLegalHoldStatus:
cmd += f" --object-lock-legal-hold-status {ObjectLockLegalHoldStatus}"
if GrantFullControl:
cmd += f" --grant-full-control {GrantFullControl}"
if GrantRead:
cmd += f" --grant-read {GrantRead}"
output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output)
def head_object(self, Bucket: str, Key: str, VersionId: str = None) -> dict:
version = f" --version-id {VersionId}" if VersionId else ""
cmd = (
f"aws {self.common_flags} s3api head-object --bucket {Bucket} --key {Key} "
f"{version} --endpoint {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def get_object(
self,
Bucket: str,
Key: str,
file_path: str,
VersionId: Optional[str] = None,
Range: Optional[str] = None,
) -> dict:
version = f" --version-id {VersionId}" if VersionId else ""
cmd = (
f"aws {self.common_flags} s3api get-object --bucket {Bucket} --key {Key} "
f"{version} {file_path} --endpoint {S3_GATE}"
)
if Range:
cmd += f" --range {Range}"
output = _cmd_run(cmd, REGULAR_TIMEOUT)
return self._to_json(output)
def get_object_acl(self, Bucket: str, Key: str, VersionId: Optional[str] = None) -> dict:
version = f" --version-id {VersionId}" if VersionId else ""
cmd = (
f"aws {self.common_flags} s3api get-object-acl --bucket {Bucket} --key {Key} "
f"{version} --endpoint {S3_GATE}"
)
output = _cmd_run(cmd, REGULAR_TIMEOUT)
return self._to_json(output)
def put_object_acl(
self,
Bucket: str,
Key: str,
ACL: Optional[str] = None,
GrantWrite: Optional[str] = None,
GrantRead: Optional[str] = None,
) -> dict:
cmd = (
f"aws {self.common_flags} s3api put-object-acl --bucket {Bucket} --key {Key} "
f" --endpoint {S3_GATE}"
)
if ACL:
cmd += f" --acl {ACL}"
if GrantWrite:
cmd += f" --grant-write {GrantWrite}"
if GrantRead:
cmd += f" --grant-read {GrantRead}"
output = _cmd_run(cmd, REGULAR_TIMEOUT)
return self._to_json(output)
def put_bucket_acl(
self,
Bucket: str,
ACL: Optional[str] = None,
GrantWrite: Optional[str] = None,
GrantRead: Optional[str] = None,
) -> dict:
cmd = (
f"aws {self.common_flags} s3api put-bucket-acl --bucket {Bucket} "
f" --endpoint {S3_GATE}"
)
if ACL:
cmd += f" --acl {ACL}"
if GrantWrite:
cmd += f" --grant-write {GrantWrite}"
if GrantRead:
cmd += f" --grant-read {GrantRead}"
output = _cmd_run(cmd, REGULAR_TIMEOUT)
return self._to_json(output)
def delete_objects(self, Bucket: str, Delete: dict) -> dict:
file_path = f"{os.getcwd()}/{ASSETS_DIR}/delete.json"
with open(file_path, "w") as out_file:
out_file.write(json.dumps(Delete))
logger.info(f"Input file for delete-objects: {json.dumps(Delete)}")
cmd = (
f"aws {self.common_flags} s3api delete-objects --bucket {Bucket} "
f"--delete file://{file_path} --endpoint {S3_GATE}"
)
output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output)
def delete_object(self, Bucket: str, Key: str, VersionId: str = None) -> dict:
version = f" --version-id {VersionId}" if VersionId else ""
cmd = (
f"aws {self.common_flags} s3api delete-object --bucket {Bucket} "
f"--key {Key} {version} --endpoint {S3_GATE}"
)
output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output)
def get_object_attributes(
self,
bucket: str,
key: str,
*attributes: str,
version_id: str = None,
max_parts: int = None,
part_number: int = None,
) -> dict:
attrs = ",".join(attributes)
version = f" --version-id {version_id}" if version_id else ""
parts = f"--max-parts {max_parts}" if max_parts else ""
part_number = f"--part-number-marker {part_number}" if part_number else ""
cmd = (
f"aws {self.common_flags} s3api get-object-attributes --bucket {bucket} "
f"--key {key} {version} {parts} {part_number} --object-attributes {attrs} "
f"--endpoint {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def delete_bucket(self, Bucket: str) -> dict:
cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {Bucket} --endpoint {S3_GATE}"
output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output)
def get_bucket_tagging(self, Bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api get-bucket-tagging --bucket {Bucket} "
f"--endpoint {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def put_bucket_tagging(self, Bucket: str, Tagging: dict) -> dict:
cmd = (
f"aws {self.common_flags} s3api put-bucket-tagging --bucket {Bucket} "
f"--tagging '{json.dumps(Tagging)}' --endpoint {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def delete_bucket_tagging(self, Bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {Bucket} "
f"--endpoint {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def put_object_tagging(self, Bucket: str, Key: str, Tagging: dict) -> dict:
cmd = (
f"aws {self.common_flags} s3api put-object-tagging --bucket {Bucket} --key {Key} "
f"--tagging '{json.dumps(Tagging)}' --endpoint {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def get_object_tagging(self, Bucket: str, Key: str, VersionId: Optional[str] = None) -> dict:
version = f" --version-id {VersionId}" if VersionId else ""
cmd = (
f"aws {self.common_flags} s3api get-object-tagging --bucket {Bucket} --key {Key} "
f"{version} --endpoint {S3_GATE}"
)
output = _cmd_run(cmd, REGULAR_TIMEOUT)
return self._to_json(output)
def delete_object_tagging(self, Bucket: str, Key: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api delete-object-tagging --bucket {Bucket} "
f"--key {Key} --endpoint {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
@allure.step("Sync directory S3")
def sync(
self,
bucket_name: str,
dir_path: str,
ACL: Optional[str] = None,
Metadata: Optional[dict] = None,
) -> dict:
cmd = (
f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket_name} "
f"--endpoint-url {S3_GATE}"
)
if Metadata:
cmd += f" --metadata"
for key, value in Metadata.items():
cmd += f" {key}={value}"
if ACL:
cmd += f" --acl {ACL}"
output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output)
@allure.step("CP directory S3")
def cp(
self,
bucket_name: str,
dir_path: str,
ACL: Optional[str] = None,
Metadata: Optional[dict] = None,
) -> dict:
cmd = (
f"aws {self.common_flags} s3 cp {dir_path} s3://{bucket_name} "
f"--endpoint-url {S3_GATE} --recursive"
)
if Metadata:
cmd += f" --metadata"
for key, value in Metadata.items():
cmd += f" {key}={value}"
if ACL:
cmd += f" --acl {ACL}"
output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output)
def create_multipart_upload(self, Bucket: str, Key: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api create-multipart-upload --bucket {Bucket} "
f"--key {Key} --endpoint-url {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def list_multipart_uploads(self, Bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api list-multipart-uploads --bucket {Bucket} "
f"--endpoint-url {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def abort_multipart_upload(self, Bucket: str, Key: str, UploadId: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api abort-multipart-upload --bucket {Bucket} "
f"--key {Key} --upload-id {UploadId} --endpoint-url {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def upload_part(self, UploadId: str, Bucket: str, Key: str, PartNumber: int, Body: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api upload-part --bucket {Bucket} --key {Key} "
f"--upload-id {UploadId} --part-number {PartNumber} --body {Body} "
f"--endpoint-url {S3_GATE}"
)
output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output)
def upload_part_copy(
self, UploadId: str, Bucket: str, Key: str, PartNumber: int, CopySource: str
) -> dict:
cmd = (
f"aws {self.common_flags} s3api upload-part-copy --bucket {Bucket} --key {Key} "
f"--upload-id {UploadId} --part-number {PartNumber} --copy-source {CopySource} "
f"--endpoint-url {S3_GATE}"
)
output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output)
def list_parts(self, UploadId: str, Bucket: str, Key: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api list-parts --bucket {Bucket} --key {Key} "
f"--upload-id {UploadId} --endpoint-url {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def complete_multipart_upload(
self, Bucket: str, Key: str, UploadId: str, MultipartUpload: dict
) -> dict:
file_path = f"{os.getcwd()}/{ASSETS_DIR}/parts.json"
with open(file_path, "w") as out_file:
out_file.write(json.dumps(MultipartUpload))
logger.info(f"Input file for complete-multipart-upload: {json.dumps(MultipartUpload)}")
cmd = (
f"aws {self.common_flags} s3api complete-multipart-upload --bucket {Bucket} "
f"--key {Key} --upload-id {UploadId} --multipart-upload file://{file_path} "
f"--endpoint-url {S3_GATE}"
)
output = _cmd_run(cmd)
return self._to_json(output)
@staticmethod
def _to_json(output: str) -> dict:
json_output = {}
try:
json_output = json.loads(output)
except Exception:
if "{" not in output and "}" not in output:
logger.warning(f"Could not parse json from output {output}")
return json_output
json_output = json.loads(output[output.index("{") :])
return json_output

View file

@ -0,0 +1,147 @@
import json
import logging
import os
import re
import uuid
from typing import Optional
import allure
import boto3
import pytest
import urllib3
from botocore.config import Config
from botocore.exceptions import ClientError
from cli_helpers import _cmd_run, _configure_aws_cli, _run_with_passwd
from common import (
NEOFS_AUTHMATE_EXEC,
NEOFS_ENDPOINT,
S3_GATE,
S3_GATE_WALLET_PASS,
S3_GATE_WALLET_PATH,
)
from data_formatters import get_wallet_public_key
from neofs_testlib.shell import Shell
from python_keywords.container import list_containers
from steps.aws_cli_client import AwsCliClient
# Disable warnings on self-signed certificate which the
# boto library produces on requests to S3-gate in dev-env
urllib3.disable_warnings()
logger = logging.getLogger("NeoLogger")
CREDENTIALS_CREATE_TIMEOUT = "1m"
# Number of attempts that S3 clients will attempt per each request (1 means single attempt
# without any retries)
MAX_REQUEST_ATTEMPTS = 1
RETRY_MODE = "standard"
class TestS3GateBase:
s3_client = None
@pytest.fixture(scope="class", autouse=True)
@allure.title("[Class/Autouse]: Create S3 client")
def s3_client(self, prepare_wallet_and_deposit, client_shell: Shell, request):
wallet = prepare_wallet_and_deposit
s3_bearer_rules_file = f"{os.getcwd()}/robot/resources/files/s3_bearer_rules.json"
(
cid,
bucket,
access_key_id,
secret_access_key,
owner_private_key,
) = init_s3_credentials(wallet, s3_bearer_rules_file=s3_bearer_rules_file)
containers_list = list_containers(wallet, shell=client_shell)
assert cid in containers_list, f"Expected cid {cid} in {containers_list}"
if request.param == "aws cli":
client = configure_cli_client(access_key_id, secret_access_key)
else:
client = configure_boto3_client(access_key_id, secret_access_key)
TestS3GateBase.s3_client = client
TestS3GateBase.wallet = wallet
@allure.step("Init S3 Credentials")
def init_s3_credentials(wallet_path: str, s3_bearer_rules_file: Optional[str] = None):
bucket = str(uuid.uuid4())
s3_bearer_rules = s3_bearer_rules_file or "robot/resources/files/s3_bearer_rules.json"
gate_public_key = get_wallet_public_key(S3_GATE_WALLET_PATH, S3_GATE_WALLET_PASS)
cmd = (
f"{NEOFS_AUTHMATE_EXEC} --debug --with-log --timeout {CREDENTIALS_CREATE_TIMEOUT} "
f"issue-secret --wallet {wallet_path} --gate-public-key={gate_public_key} "
f"--peer {NEOFS_ENDPOINT} --container-friendly-name {bucket} "
f"--bearer-rules {s3_bearer_rules}"
)
logger.info(f"Executing command: {cmd}")
try:
output = _run_with_passwd(cmd)
logger.info(f"Command completed with output: {output}")
# output contains some debug info and then several JSON structures, so we find each
# JSON structure by curly brackets (naive approach, but works while JSON is not nested)
# and then we take JSON containing secret_access_key
json_blocks = re.findall(r"\{.*?\}", output, re.DOTALL)
for json_block in json_blocks:
try:
parsed_json_block = json.loads(json_block)
if "secret_access_key" in parsed_json_block:
return (
parsed_json_block["container_id"],
bucket,
parsed_json_block["access_key_id"],
parsed_json_block["secret_access_key"],
parsed_json_block["owner_private_key"],
)
except json.JSONDecodeError:
raise AssertionError(f"Could not parse info from output\n{output}")
raise AssertionError(f"Could not find AWS credentials in output:\n{output}")
except Exception as exc:
raise RuntimeError(f"Failed to init s3 credentials because of error\n{exc}") from exc
@allure.step("Configure S3 client (boto3)")
def configure_boto3_client(access_key_id: str, secret_access_key: str):
try:
session = boto3.session.Session()
config = Config(
retries={
"max_attempts": MAX_REQUEST_ATTEMPTS,
"mode": RETRY_MODE,
}
)
s3_client = session.client(
service_name="s3",
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key,
config=config,
endpoint_url=S3_GATE,
verify=False,
)
return s3_client
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Configure S3 client (aws cli)")
def configure_cli_client(access_key_id: str, secret_access_key: str):
try:
client = AwsCliClient()
_configure_aws_cli("aws configure", access_key_id, secret_access_key)
_cmd_run(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS}")
_cmd_run(f"aws configure set retry_mode {RETRY_MODE}")
return client
except Exception as err:
if "command was not found or was not executable" in str(err):
pytest.skip("AWS CLI was not found")
else:
raise RuntimeError("Error while configuring AwsCliClient") from err

View file

@ -0,0 +1,208 @@
import logging
import uuid
from enum import Enum
from time import sleep
from typing import Optional
import allure
from botocore.exceptions import ClientError
from cli_helpers import log_command_execution
logger = logging.getLogger("NeoLogger")
# Artificial delay that we add after object deletion and container creation
# Delay is added because sometimes immediately after deletion object still appears
# to be existing (probably because tombstone object takes some time to replicate)
# TODO: remove after https://github.com/nspcc-dev/neofs-s3-gw/issues/610 is fixed
S3_SYNC_WAIT_TIME = 5
class VersioningStatus(Enum):
ENABLED = "Enabled"
SUSPENDED = "Suspended"
@allure.step("Create bucket S3")
def create_bucket_s3(
s3_client,
object_lock_enabled_for_bucket: Optional[bool] = None,
acl: Optional[str] = None,
grant_write: Optional[str] = None,
grant_read: Optional[str] = None,
grant_full_control: Optional[str] = None,
bucket_configuration: Optional[str] = None,
) -> str:
bucket_name = str(uuid.uuid4())
try:
params = {"Bucket": bucket_name}
if object_lock_enabled_for_bucket is not None:
params.update({"ObjectLockEnabledForBucket": object_lock_enabled_for_bucket})
if acl is not None:
params.update({"ACL": acl})
elif grant_write or grant_read or grant_full_control:
if grant_write:
params.update({"GrantWrite": grant_write})
elif grant_read:
params.update({"GrantRead": grant_read})
elif grant_full_control:
params.update({"GrantFullControl": grant_full_control})
if bucket_configuration:
params.update(
{"CreateBucketConfiguration": {"LocationConstraint": bucket_configuration}}
)
s3_bucket = s3_client.create_bucket(**params)
log_command_execution(f"Created S3 bucket {bucket_name}", s3_bucket)
sleep(S3_SYNC_WAIT_TIME)
return bucket_name
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("List buckets S3")
def list_buckets_s3(s3_client):
found_buckets = []
try:
response = s3_client.list_buckets()
log_command_execution("S3 List buckets result", response)
for bucket in response["Buckets"]:
found_buckets.append(bucket["Name"])
return found_buckets
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Delete bucket S3")
def delete_bucket_s3(s3_client, bucket: str):
try:
response = s3_client.delete_bucket(Bucket=bucket)
log_command_execution("S3 Delete bucket result", response)
sleep(S3_SYNC_WAIT_TIME)
return response
except ClientError as err:
log_command_execution("S3 Delete bucket error", str(err))
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Head bucket S3")
def head_bucket(s3_client, bucket: str):
try:
response = s3_client.head_bucket(Bucket=bucket)
log_command_execution("S3 Head bucket result", response)
return response
except ClientError as err:
log_command_execution("S3 Head bucket error", str(err))
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Set bucket versioning status")
def set_bucket_versioning(s3_client, bucket_name: str, status: VersioningStatus) -> None:
try:
response = s3_client.put_bucket_versioning(
Bucket=bucket_name, VersioningConfiguration={"Status": status.value}
)
log_command_execution("S3 Set bucket versioning to", response)
except ClientError as err:
raise Exception(f"Got error during set bucket versioning: {err}") from err
@allure.step("Get bucket versioning status")
def get_bucket_versioning_status(s3_client, bucket_name: str) -> str:
try:
response = s3_client.get_bucket_versioning(Bucket=bucket_name)
status = response.get("Status")
log_command_execution("S3 Got bucket versioning status", response)
return status
except ClientError as err:
raise Exception(f"Got error during get bucket versioning status: {err}") from err
@allure.step("Put bucket tagging")
def put_bucket_tagging(s3_client, bucket_name: str, tags: list):
try:
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
tagging = {"TagSet": tags}
response = s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging=tagging)
log_command_execution("S3 Put bucket tagging", response)
except ClientError as err:
raise Exception(f"Got error during put bucket tagging: {err}") from err
@allure.step("Get bucket acl")
def get_bucket_acl(s3_client, bucket_name: str) -> list:
try:
response = s3_client.get_bucket_acl(Bucket=bucket_name)
log_command_execution("S3 Get bucket acl", response)
return response.get("Grants")
except ClientError as err:
raise Exception(f"Got error during get bucket tagging: {err}") from err
@allure.step("Get bucket tagging")
def get_bucket_tagging(s3_client, bucket_name: str) -> list:
try:
response = s3_client.get_bucket_tagging(Bucket=bucket_name)
log_command_execution("S3 Get bucket tagging", response)
return response.get("TagSet")
except ClientError as err:
raise Exception(f"Got error during get bucket tagging: {err}") from err
@allure.step("Delete bucket tagging")
def delete_bucket_tagging(s3_client, bucket_name: str) -> None:
try:
response = s3_client.delete_bucket_tagging(Bucket=bucket_name)
log_command_execution("S3 Delete bucket tagging", response)
except ClientError as err:
raise Exception(f"Got error during delete bucket tagging: {err}") from err
@allure.step("Put bucket ACL")
def put_bucket_acl_s3(
s3_client,
bucket: str,
acl: Optional[str] = None,
grant_write: Optional[str] = None,
grant_read: Optional[str] = None,
) -> list:
params = {"Bucket": bucket}
if acl:
params.update({"ACL": acl})
elif grant_write or grant_read:
if grant_write:
params.update({"GrantWrite": grant_write})
elif grant_read:
params.update({"GrantRead": grant_read})
try:
response = s3_client.put_bucket_acl(**params)
log_command_execution("S3 ACL bucket result", response)
return response.get("Grants")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err

View file

@ -0,0 +1,525 @@
#!/usr/bin/python3.9
import logging
import os
import uuid
from enum import Enum
from time import sleep
from typing import Optional
import allure
import pytest
import urllib3
from botocore.exceptions import ClientError
from cli_helpers import log_command_execution
from steps.aws_cli_client import AwsCliClient
from steps.s3_gate_bucket import S3_SYNC_WAIT_TIME
##########################################################
# Disabling warnings on self-signed certificate which the
# boto library produces on requests to S3-gate in dev-env.
urllib3.disable_warnings()
##########################################################
logger = logging.getLogger("NeoLogger")
ACL_COPY = [
"private",
"public-read",
"public-read-write",
"authenticated-read",
"aws-exec-read",
"bucket-owner-read",
"bucket-owner-full-control",
]
ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/")
@allure.step("List objects S3 v2")
def list_objects_s3_v2(s3_client, bucket: str, full_output: bool = False) -> list:
try:
response = s3_client.list_objects_v2(Bucket=bucket)
content = response.get("Contents", [])
log_command_execution("S3 v2 List objects result", response)
obj_list = []
for obj in content:
obj_list.append(obj["Key"])
logger.info(f"Found s3 objects: {obj_list}")
return response if full_output else obj_list
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("List objects S3")
def list_objects_s3(s3_client, bucket: str, full_output: bool = False) -> list:
try:
response = s3_client.list_objects(Bucket=bucket)
content = response.get("Contents", [])
log_command_execution("S3 List objects result", response)
obj_list = []
for obj in content:
obj_list.append(obj["Key"])
logger.info(f"Found s3 objects: {obj_list}")
return response if full_output else obj_list
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("List objects versions S3")
def list_objects_versions_s3(s3_client, bucket: str, full_output: bool = False) -> list:
try:
response = s3_client.list_object_versions(Bucket=bucket)
versions = response.get("Versions", [])
log_command_execution("S3 List objects versions result", response)
return response if full_output else versions
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Put object S3")
def put_object_s3(s3_client, bucket: str, filepath: str, **kwargs):
filename = os.path.basename(filepath)
if isinstance(s3_client, AwsCliClient):
file_content = filepath
else:
with open(filepath, "rb") as put_file:
file_content = put_file.read()
try:
params = {"Body": file_content, "Bucket": bucket, "Key": filename}
if kwargs:
params = {**params, **kwargs}
response = s3_client.put_object(**params)
log_command_execution("S3 Put object result", response)
return response.get("VersionId")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Head object S3")
def head_object_s3(s3_client, bucket: str, object_key: str, version_id: Optional[str] = None):
try:
params = {"Bucket": bucket, "Key": object_key}
if version_id:
params["VersionId"] = version_id
response = s3_client.head_object(**params)
log_command_execution("S3 Head object result", response)
return response
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Delete object S3")
def delete_object_s3(
s3_client, bucket: str, object_key: str, version_id: Optional[str] = None
) -> dict:
try:
params = {"Bucket": bucket, "Key": object_key}
if version_id:
params["VersionId"] = version_id
response = s3_client.delete_object(**params)
log_command_execution("S3 Delete object result", response)
sleep(S3_SYNC_WAIT_TIME)
return response
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Delete objects S3")
def delete_objects_s3(s3_client, bucket: str, object_keys: list):
try:
response = s3_client.delete_objects(Bucket=bucket, Delete=_make_objs_dict(object_keys))
log_command_execution("S3 Delete objects result", response)
sleep(S3_SYNC_WAIT_TIME)
return response
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Delete object versions S3")
def delete_object_versions_s3(s3_client, bucket: str, object_versions: list):
try:
# Build deletion list in S3 format
delete_list = {
"Objects": [
{
"Key": object_version["Key"],
"VersionId": object_version["VersionId"],
}
for object_version in object_versions
]
}
response = s3_client.delete_objects(Bucket=bucket, Delete=delete_list)
log_command_execution("S3 Delete objects result", response)
return response
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Put object ACL")
def put_object_acl_s3(
s3_client,
bucket: str,
object_key: str,
acl: Optional[str] = None,
grant_write: Optional[str] = None,
grant_read: Optional[str] = None,
) -> list:
if not isinstance(s3_client, AwsCliClient):
pytest.skip("Method put_object_acl is not supported by boto3 client")
params = {"Bucket": bucket, "Key": object_key}
if acl:
params.update({"ACL": acl})
elif grant_write or grant_read:
if grant_write:
params.update({"GrantWrite": grant_write})
elif grant_read:
params.update({"GrantRead": grant_read})
try:
response = s3_client.put_object_acl(**params)
log_command_execution("S3 ACL objects result", response)
return response.get("Grants")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Get object ACL")
def get_object_acl_s3(
s3_client, bucket: str, object_key: str, version_id: Optional[str] = None
) -> list:
params = {"Bucket": bucket, "Key": object_key}
try:
if version_id:
params.update({"VersionId": version_id})
response = s3_client.get_object_acl(**params)
log_command_execution("S3 ACL objects result", response)
return response.get("Grants")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Copy object S3")
def copy_object_s3(
s3_client, bucket: str, object_key: str, bucket_dst: Optional[str] = None, **kwargs
) -> str:
filename = f"{os.getcwd()}/{uuid.uuid4()}"
try:
params = {
"Bucket": bucket_dst or bucket,
"CopySource": f"{bucket}/{object_key}",
"Key": filename,
}
if "ACL" in kwargs and kwargs["ACL"] in ACL_COPY:
params.update({"ACL": kwargs["ACL"]})
if "metadata_directive" in kwargs.keys():
params.update({"MetadataDirective": kwargs["metadata_directive"]})
if "metadata_directive" in kwargs.keys() and "metadata" in kwargs.keys():
params.update({"Metadata": kwargs["metadata"]})
if "tagging_directive" in kwargs.keys():
params.update({"TaggingDirective": kwargs["tagging_directive"]})
if "tagging_directive" in kwargs.keys() and "tagging" in kwargs.keys():
params.update({"Tagging": kwargs["tagging"]})
response = s3_client.copy_object(**params)
log_command_execution("S3 Copy objects result", response)
return filename
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Get object S3")
def get_object_s3(
s3_client,
bucket: str,
object_key: str,
version_id: Optional[str] = None,
range: Optional[list] = None,
full_output: bool = False,
):
filename = f"{ASSETS_DIR}/{uuid.uuid4()}"
try:
params = {"Bucket": bucket, "Key": object_key}
if version_id:
params["VersionId"] = version_id
if isinstance(s3_client, AwsCliClient):
params["file_path"] = filename
if range:
params["Range"] = f"bytes={range[0]}-{range[1]}"
response = s3_client.get_object(**params)
log_command_execution("S3 Get objects result", response)
if not isinstance(s3_client, AwsCliClient):
with open(f"{filename}", "wb") as get_file:
chunk = response["Body"].read(1024)
while chunk:
get_file.write(chunk)
chunk = response["Body"].read(1024)
return response if full_output else filename
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Create multipart upload S3")
def create_multipart_upload_s3(s3_client, bucket_name: str, object_key: str) -> str:
try:
response = s3_client.create_multipart_upload(Bucket=bucket_name, Key=object_key)
log_command_execution("S3 Created multipart upload", response)
assert response.get("UploadId"), f"Expected UploadId in response:\n{response}"
return response.get("UploadId")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("List multipart uploads S3")
def list_multipart_uploads_s3(s3_client, bucket_name: str) -> Optional[list[dict]]:
try:
response = s3_client.list_multipart_uploads(Bucket=bucket_name)
log_command_execution("S3 List multipart upload", response)
return response.get("Uploads")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Abort multipart upload S3")
def abort_multipart_uploads_s3(s3_client, bucket_name: str, object_key: str, upload_id: str):
try:
response = s3_client.abort_multipart_upload(
Bucket=bucket_name, Key=object_key, UploadId=upload_id
)
log_command_execution("S3 Abort multipart upload", response)
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Upload part S3")
def upload_part_s3(
s3_client, bucket_name: str, object_key: str, upload_id: str, part_num: int, filepath: str
) -> str:
if isinstance(s3_client, AwsCliClient):
file_content = filepath
else:
with open(filepath, "rb") as put_file:
file_content = put_file.read()
try:
response = s3_client.upload_part(
UploadId=upload_id,
Bucket=bucket_name,
Key=object_key,
PartNumber=part_num,
Body=file_content,
)
log_command_execution("S3 Upload part", response)
assert response.get("ETag"), f"Expected ETag in response:\n{response}"
return response.get("ETag")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Upload copy part S3")
def upload_part_copy_s3(
s3_client, bucket_name: str, object_key: str, upload_id: str, part_num: int, copy_source: str
) -> str:
try:
response = s3_client.upload_part_copy(
UploadId=upload_id,
Bucket=bucket_name,
Key=object_key,
PartNumber=part_num,
CopySource=copy_source,
)
log_command_execution("S3 Upload copy part", response)
assert response.get("CopyPartResult").get("ETag"), f"Expected ETag in response:\n{response}"
return response.get("CopyPartResult").get("ETag")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("List parts S3")
def list_parts_s3(s3_client, bucket_name: str, object_key: str, upload_id: str) -> list[dict]:
try:
response = s3_client.list_parts(UploadId=upload_id, Bucket=bucket_name, Key=object_key)
log_command_execution("S3 List part", response)
assert response.get("Parts"), f"Expected Parts in response:\n{response}"
return response.get("Parts")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Complete multipart upload S3")
def complete_multipart_upload_s3(
s3_client, bucket_name: str, object_key: str, upload_id: str, parts: list
):
try:
parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]
response = s3_client.complete_multipart_upload(
Bucket=bucket_name, Key=object_key, UploadId=upload_id, MultipartUpload={"Parts": parts}
)
log_command_execution("S3 Complete multipart upload", response)
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Put object tagging")
def put_object_tagging(s3_client, bucket_name: str, object_key: str, tags: list):
try:
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
tagging = {"TagSet": tags}
s3_client.put_object_tagging(Bucket=bucket_name, Key=object_key, Tagging=tagging)
log_command_execution("S3 Put object tagging", str(tags))
except ClientError as err:
raise Exception(f"Got error during put object tagging: {err}") from err
@allure.step("Get object tagging")
def get_object_tagging(
s3_client, bucket_name: str, object_key: str, version_id: Optional[str] = None
) -> list:
try:
params = {"Bucket": bucket_name, "Key": object_key}
if version_id:
params.update({"VersionId": version_id})
response = s3_client.get_object_tagging(**params)
log_command_execution("S3 Get object tagging", response)
return response.get("TagSet")
except ClientError as err:
raise Exception(f"Got error during get object tagging: {err}") from err
@allure.step("Delete object tagging")
def delete_object_tagging(s3_client, bucket_name: str, object_key: str):
try:
response = s3_client.delete_object_tagging(Bucket=bucket_name, Key=object_key)
log_command_execution("S3 Delete object tagging", response)
except ClientError as err:
raise Exception(f"Got error during delete object tagging: {err}") from err
@allure.step("Get object attributes")
def get_object_attributes(
s3_client,
bucket_name: str,
object_key: str,
*attributes: str,
version_id: Optional[str] = None,
max_parts: Optional[int] = None,
part_number: Optional[int] = None,
get_full_resp: bool = True,
) -> dict:
try:
if not isinstance(s3_client, AwsCliClient):
logger.warning("Method get_object_attributes is not supported by boto3 client")
return {}
response = s3_client.get_object_attributes(
bucket_name,
object_key,
*attributes,
version_id=version_id,
max_parts=max_parts,
part_number=part_number,
)
log_command_execution("S3 Get object attributes", response)
for attr in attributes:
assert attr in response, f"Expected attribute {attr} in {response}"
if get_full_resp:
return response
else:
return response.get(attributes[0])
except ClientError as err:
raise Exception(f"Got error during get object attributes: {err}") from err
def _make_objs_dict(key_names):
objs_list = []
for key in key_names:
obj_dict = {"Key": key}
objs_list.append(obj_dict)
objs_dict = {"Objects": objs_list}
return objs_dict

View file

@ -1,397 +0,0 @@
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.resources.error_patterns import OBJECT_ACCESS_DENIED
from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.steps.node_management import drop_object
from frostfs_testlib.storage.dataclasses import ape
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils import wallet_utils
from frostfs_testlib.utils.failover_utils import wait_object_replication
from frostfs_testlib.utils.file_utils import TestFile
from ....helpers.container_access import (
ALL_OBJECT_OPERATIONS,
assert_access_to_container,
assert_full_access_to_container,
assert_no_access_to_container,
)
from ....helpers.container_request import APE_EVERYONE_ALLOW_ALL, OWNER_ALLOW_ALL, ContainerRequest
@pytest.fixture
def denied_wallet(default_wallet: WalletInfo, other_wallet: WalletInfo, role: ape.Role) -> WalletInfo:
return other_wallet if role == ape.Role.OTHERS else default_wallet
@pytest.fixture
def allowed_wallet(default_wallet: WalletInfo, other_wallet: WalletInfo, role: ape.Role) -> WalletInfo:
return default_wallet if role == ape.Role.OTHERS else other_wallet
@pytest.mark.nightly
@pytest.mark.ape
class TestApeContainer(ClusterTestBase):
# TODO: Without PATCH operation,
# since it requires specific permissions that do not apply when testing all operations at once
@pytest.mark.sanity
@allure.title("Deny operations via APE by role (role={role}, obj_size={object_size})")
@pytest.mark.parametrize("role", [ape.Role.OWNER, ape.Role.OTHERS], indirect=True)
@pytest.mark.parametrize("objects", [4], indirect=True)
def test_deny_operations_via_ape_by_role(
self,
denied_wallet: WalletInfo,
allowed_wallet: WalletInfo,
frostfs_cli: FrostfsCli,
container: str,
objects: list[str],
role: ape.Role,
test_file: TestFile,
rpc_endpoint: str,
):
with reporter.step(f"Deny all operations for {role} via APE"):
deny_rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS, ape.Condition.by_role(role.value))
frostfs_cli.ape_manager.add(
rpc_endpoint, deny_rule.chain_id, target_name=container, target_type="container", rule=deny_rule.as_string()
)
with reporter.step("Wait for one block"):
self.wait_for_blocks()
with reporter.step(f"Assert denied role have no access to public container"):
# access checks will try to remove object, so we use .pop() to ensure we have object before deletion
assert_no_access_to_container(denied_wallet, container, objects.pop(), test_file, self.shell, self.cluster)
with reporter.step(f"Assert allowed role have full access to public container"):
assert_full_access_to_container(allowed_wallet, container, objects.pop(), test_file, self.shell, self.cluster)
with reporter.step(f"Remove deny rule from APE"):
frostfs_cli.ape_manager.remove(rpc_endpoint, deny_rule.chain_id, target_name=container, target_type="container")
with reporter.step("Wait for one block"):
self.wait_for_blocks()
with reporter.step("Assert allowed role have full access to public container"):
assert_full_access_to_container(allowed_wallet, container, objects.pop(), test_file, self.shell, self.cluster)
with reporter.step("Assert denied role have full access to public container"):
assert_full_access_to_container(denied_wallet, container, objects.pop(), test_file, self.shell, self.cluster)
# TODO: Without PATCH operation,
# since it requires specific permissions that do not apply when testing all operations at once
@allure.title("Deny operations for others via APE excluding single pubkey (obj_size={object_size})")
@pytest.mark.parametrize("objects", [2], indirect=True)
def test_deny_opeartions_excluding_pubkey(
self,
frostfs_cli: FrostfsCli,
default_wallet: WalletInfo,
other_wallet: WalletInfo,
other_wallet_2: WalletInfo,
container: str,
objects: list[str],
rpc_endpoint: str,
test_file: TestFile,
):
with reporter.step("Add deny APE rules for others except single wallet"):
rule_conditions = [
ape.Condition.by_role(ape.Role.OTHERS),
ape.Condition.by_key(
wallet_utils.get_wallet_public_key(other_wallet_2.path, other_wallet_2.password),
match_type=ape.MatchType.NOT_EQUAL,
),
]
rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS, rule_conditions)
frostfs_cli.ape_manager.add(rpc_endpoint, rule.chain_id, target_name=container, target_type="container", rule=rule.as_string())
with reporter.step("Wait for one block"):
self.wait_for_blocks()
with reporter.step("Assert others have no access to public container"):
# access checks will try to remove object, so we use .pop() to ensure we have object before deletion
assert_no_access_to_container(other_wallet, container, objects[0], test_file, self.shell, self.cluster)
with reporter.step("Assert owner have full access to public container"):
assert_full_access_to_container(default_wallet, container, objects.pop(), test_file, self.shell, self.cluster)
with reporter.step("Assert allowed wallet have full access to public container"):
assert_full_access_to_container(other_wallet_2, container, objects.pop(), test_file, self.shell, self.cluster)
@allure.title("Replication works with APE deny rules on OWNER and OTHERS (obj_size={object_size})")
@pytest.mark.parametrize(
"container_request",
[ContainerRequest(f"REP %NODE_COUNT% IN X CBF 1 SELECT %NODE_COUNT% FROM * AS X", APE_EVERYONE_ALLOW_ALL, "custom")],
indirect=True,
)
def test_replication_works_with_deny_rules(
self,
default_wallet: WalletInfo,
frostfs_cli: FrostfsCli,
container: str,
rpc_endpoint: str,
test_file: TestFile,
):
with reporter.step("Put object to container"):
oid = put_object_to_random_node(default_wallet, test_file, container, self.shell, self.cluster)
with reporter.step("Wait for object replication after upload"):
wait_object_replication(container, oid, len(self.cluster.cluster_nodes), self.shell, self.cluster.storage_nodes)
with reporter.step("Add deny APE rules for owner and others"):
rule_conditions = [
ape.Condition.by_role(ape.Role.OWNER),
ape.Condition.by_role(ape.Role.OTHERS),
]
for rule_condition in rule_conditions:
rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS, rule_condition)
frostfs_cli.ape_manager.add(
rpc_endpoint, rule.chain_id, target_name=container, target_type="container", rule=rule.as_string()
)
with reporter.step("Wait for one block"):
self.wait_for_blocks()
with reporter.step("Drop object"):
drop_object(self.cluster.storage_nodes[0], container, oid)
with reporter.step("Wait for dropped object to be replicated"):
wait_object_replication(container, oid, len(self.cluster.storage_nodes), self.shell, self.cluster.storage_nodes)
# TODO: Without PATCH operation,
# since it requires specific permissions that do not apply when testing all operations at once
@allure.title("Deny operations via APE by role (role=ir, obj_size={object_size})")
@pytest.mark.parametrize("container_request", [OWNER_ALLOW_ALL], indirect=True)
@pytest.mark.parametrize("objects", [3], indirect=True)
def test_deny_operations_via_ape_by_role_ir(
self, frostfs_cli: FrostfsCli, ir_wallet: WalletInfo, container: str, objects: list[str], rpc_endpoint: str, test_file: TestFile
):
default_ir_access = {
ape.ObjectOperations.PUT: False,
ape.ObjectOperations.GET: True,
ape.ObjectOperations.HEAD: True,
ape.ObjectOperations.GET_RANGE: True,
ape.ObjectOperations.GET_RANGE_HASH: True,
ape.ObjectOperations.SEARCH: True,
ape.ObjectOperations.PATCH: False,
ape.ObjectOperations.DELETE: False,
}
with reporter.step("Assert IR wallet access in default state"):
assert_access_to_container(default_ir_access, ir_wallet, container, objects.pop(), test_file, self.shell, self.cluster)
with reporter.step("Add deny APE rule with deny all operations for IR role"):
rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS, [ape.Condition.by_role(ape.Role.IR.value)])
frostfs_cli.ape_manager.add(rpc_endpoint, rule.chain_id, target_name=container, target_type="container", rule=rule.as_string())
with reporter.step("Wait for one block"):
self.wait_for_blocks()
with reporter.step("Assert IR wallet ignores APE rules"):
assert_access_to_container(default_ir_access, ir_wallet, container, objects.pop(), test_file, self.shell, self.cluster)
with reporter.step("Remove APE rule"):
frostfs_cli.ape_manager.remove(rpc_endpoint, rule.chain_id, target_name=container, target_type="container")
with reporter.step("Wait for one block"):
self.wait_for_blocks()
with reporter.step("Assert IR wallet access is restored"):
assert_access_to_container(default_ir_access, ir_wallet, container, objects.pop(), test_file, self.shell, self.cluster)
# TODO: Without PATCH operation,
# since it requires specific permissions that do not apply when testing all operations at once
@allure.title("Deny operations via APE by role (role=container, obj_size={object_size})")
@pytest.mark.parametrize("container_request", [OWNER_ALLOW_ALL], indirect=True)
@pytest.mark.parametrize("objects", [3], indirect=True)
def test_deny_operations_via_ape_by_role_container(
self,
frostfs_cli: FrostfsCli,
container_node_wallet: WalletInfo,
container: str,
objects: list[str],
rpc_endpoint: str,
test_file: TestFile,
):
access_matrix = {
ape.ObjectOperations.PUT: True,
ape.ObjectOperations.GET: True,
ape.ObjectOperations.HEAD: True,
ape.ObjectOperations.GET_RANGE: True,
ape.ObjectOperations.GET_RANGE_HASH: True,
ape.ObjectOperations.SEARCH: True,
ape.ObjectOperations.PATCH: True,
ape.ObjectOperations.DELETE: True,
}
with reporter.step("Assert CONTAINER wallet access in default state"):
assert_access_to_container(access_matrix, container_node_wallet, container, objects.pop(), test_file, self.shell, self.cluster)
rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS, ape.Condition.by_role(ape.Role.CONTAINER.value))
with reporter.step(f"Add APE rule with deny all operations for CONTAINER and IR roles"):
frostfs_cli.ape_manager.add(rpc_endpoint, rule.chain_id, target_name=container, target_type="container", rule=rule.as_string())
with reporter.step("Wait for one block"):
self.wait_for_blocks()
with reporter.step("Assert CONTAINER wallet ignores APE rule"):
assert_access_to_container(access_matrix, container_node_wallet, container, objects.pop(), test_file, self.shell, self.cluster)
with reporter.step("Remove APE rule"):
frostfs_cli.ape_manager.remove(rpc_endpoint, rule.chain_id, target_name=container, target_type="container")
with reporter.step("Wait for one block"):
self.wait_for_blocks()
with reporter.step("Assert CONTAINER wallet access after rule was removed"):
assert_access_to_container(access_matrix, container_node_wallet, container, objects.pop(), test_file, self.shell, self.cluster)
# ^
@allure.title("Deny PATCH operation via APE (object_size={object_size})")
@pytest.mark.parametrize("objects", [1], indirect=True)
def test_patch_object_with_deny_rule(
self,
frostfs_cli: FrostfsCli,
grpc_client: GrpcClientWrapper,
grpc_client_with_other_wallet: GrpcClientWrapper,
grpc_client_with_container_wallet: GrpcClientWrapper,
grpc_client_with_ir_wallet: GrpcClientWrapper,
container: str,
objects: list[str],
test_file: TestFile,
):
patch_params = {
"cid": container,
"oid": objects[0],
"endpoint": self.cluster.default_rpc_endpoint,
"ranges": ["300:200"],
"payloads": [test_file],
"new_attrs": "owner=true",
"timeout": "200s",
}
with reporter.step("Check that PATCH is available with owner wallet"):
patched_oid = grpc_client.object.patch(**patch_params)
assert patched_oid != patch_params["oid"], "OID of patched object must be different from original one"
patch_params["oid"] = patched_oid
with reporter.step("Check that PATCH is available with another wallet"):
patch_params["ranges"] = ["100:50"]
patch_params["new_attrs"] = "other=true"
patched_oid = grpc_client_with_other_wallet.object.patch(**patch_params)
assert patched_oid != patch_params["oid"], "OID of patched object must be different from original one"
patch_params["oid"] = patched_oid
with reporter.step("Check that PATCH is available with container wallet"):
patch_params["ranges"] = ["600:0"]
patch_params["new_attrs"] = "container=true"
patched_oid = grpc_client_with_container_wallet.object.patch(**patch_params)
assert patched_oid != patch_params["oid"], "OID of patched object must be different from original one"
patch_params["oid"] = patched_oid
with reporter.step("Check that PATCH is available with ir wallet"):
patch_params["ranges"] = ["0:1000"]
patch_params["new_attrs"] = "ir=true"
patched_oid = grpc_client_with_ir_wallet.object.patch(**patch_params)
assert patched_oid != patch_params["oid"], "OID of patched object must be different from original one"
patch_params["oid"] = patched_oid
rule = ape.Rule(ape.Verb.DENY, ape.ObjectOperations.PATCH)
with reporter.step("Add APE rule with deny PATCH operation"):
frostfs_cli.ape_manager.add(
self.cluster.default_rpc_endpoint,
rule.chain_id,
target_name=container,
target_type="container",
rule=rule.as_string(),
)
with reporter.step("Wait for one block"):
self.wait_for_blocks(1)
with reporter.step("Check that PATCH is not allowed with owner wallet"):
patch_params["ranges"] = ["300:200"]
patch_params["new_attrs"] = "owner_2=false"
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
grpc_client.object.patch(**patch_params)
with reporter.step("Check that PATCH is not allowed with another wallet"):
patch_params["ranges"] = ["100:50"]
patch_params["new_attrs"] = "other_2=false"
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
grpc_client_with_other_wallet.object.patch(**patch_params)
with reporter.step("Check that PATCH is allowed with container wallet as rule is ignored"):
patch_params["ranges"] = ["600:0"]
patch_params["new_attrs"] = "container_2=true"
patched_oid = grpc_client_with_container_wallet.object.patch(**patch_params)
assert patched_oid != patch_params["oid"], "OID of patched object must be different from original one"
patch_params["oid"] = patched_oid
with reporter.step("Check that PATCH is not allowed with ir waller"):
patch_params["ranges"] = ["0:1000"]
patch_params["new_attrs"] = "ir_2=true"
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
grpc_client_with_ir_wallet.object.patch(**patch_params)
with reporter.step("Remove APE rule"):
frostfs_cli.ape_manager.remove(
self.cluster.default_rpc_endpoint,
rule.chain_id,
target_name=container,
target_type="container",
)
with reporter.step("Wait for one block"):
self.wait_for_blocks(1)
with reporter.step("Check that PATCH is available with owner wallet"):
patch_params["ranges"] = ["300:200"]
patch_params["new_attrs"] = "owner_3=true"
patched_oid = grpc_client.object.patch(**patch_params)
assert patched_oid != patch_params["oid"], "OID of patched object must be different from original one"
patch_params["oid"] = patched_oid
with reporter.step("Check that PATCH is available with another wallet"):
patch_params["ranges"] = ["100:50"]
patch_params["new_attrs"] = "other_3=true"
patched_oid = grpc_client_with_other_wallet.object.patch(**patch_params)
assert patched_oid != patch_params["oid"], "OID of patched object must be different from original one"
patch_params["oid"] = patched_oid
with reporter.step("Check that PATCH is available with container wallet"):
patch_params["ranges"] = ["600:0"]
patch_params["new_attrs"] = "container_3=true"
patched_oid = grpc_client_with_container_wallet.object.patch(**patch_params)
assert patched_oid != patch_params["oid"], "OID of patched object must be different from original one"
patch_params["oid"] = patched_oid
with reporter.step("Check that PATCH is available with ir wallet"):
patch_params["ranges"] = ["0:1000"]
patch_params["new_attrs"] = "ir_3=true"
patched_oid = grpc_client_with_ir_wallet.object.patch(**patch_params)
assert patched_oid != patch_params["oid"], "OID of patched object must be different from original one"
patch_params["oid"] = patched_oid
attrs = {"owner", "other", "container", "ir", "container_2", "owner_3", "other_3", "container_3", "ir_3"}
with reporter.step("Ensure that all attributes match expected values"):
object_info: dict = grpc_client.object.head(container, patch_params["oid"], self.cluster.default_rpc_endpoint)
object_attrs: dict = object_info["header"]["attributes"]
assert attrs <= {k for k in object_attrs.keys()}, f"Received attributes do not match expected ones: {object_attrs}"
assert all(
v == "true" for k, v in object_attrs.items() if k in attrs
), f"Received attributes do not match expected ones: {object_attrs}"

View file

@ -1,397 +0,0 @@
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.resources.error_patterns import OBJECT_ACCESS_DENIED
from frostfs_testlib.steps.cli.object import get_object_from_random_node, head_object, put_object_to_random_node
from frostfs_testlib.storage.dataclasses import ape
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils.file_utils import TestFile
from ....helpers.bearer_token import create_bearer_token
from ....helpers.container_access import (
ALL_OBJECT_OPERATIONS,
FULL_ACCESS,
assert_access_to_container,
assert_full_access_to_container,
assert_no_access_to_container,
)
from ....helpers.container_request import OWNER_ALLOW_ALL
from ....helpers.object_access import OBJECT_ACCESS_DENIED
@pytest.mark.nightly
@pytest.mark.ape
class TestApeFilters(ClusterTestBase):
# SPEC: https://github.com/nspcc-dev/neofs-spec/blob/master/01-arch/07-acl.md
HEADER = {"check_key": "check_value"}
OTHER_HEADER = {"check_key": "other_value"}
ATTRIBUTES = {
"key_one": "check_value",
"x_key": "xvalue",
"check_key": "check_value",
}
OTHER_ATTRIBUTES = {
"key_one": "check_value",
"x_key": "other_value",
"check_key": "other_value",
}
OBJECT_COUNT = 5
RESOURCE_OPERATIONS = [
ape.ObjectOperations.GET,
ape.ObjectOperations.HEAD,
ape.ObjectOperations.PUT,
]
@pytest.fixture
def objects_with_attributes(self, default_wallet: WalletInfo, file_path: TestFile, container: str):
return [
put_object_to_random_node(
default_wallet, file_path, container, self.shell, self.cluster, attributes={**self.ATTRIBUTES, "key": val}
)
for val in range(self.OBJECT_COUNT)
]
@pytest.fixture
def objects_with_other_attributes(self, default_wallet: WalletInfo, file_path: TestFile, container: str):
return [
put_object_to_random_node(
default_wallet, file_path, container, self.shell, self.cluster, attributes={**self.OTHER_ATTRIBUTES, "key": val}
)
for val in range(self.OBJECT_COUNT)
]
@pytest.fixture
def objects_without_attributes(self, default_wallet: WalletInfo, file_path: TestFile, container: str):
return [put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster) for _ in range(self.OBJECT_COUNT)]
@pytest.mark.sanity
@allure.title("Operations with request filter (match_type={match_type}, obj_size={object_size})")
@pytest.mark.parametrize("match_type", [ape.MatchType.EQUAL, ape.MatchType.NOT_EQUAL])
@pytest.mark.skip("https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/1243")
def test_ape_filters_request(
self,
frostfs_cli: FrostfsCli,
temp_directory: str,
other_wallet: WalletInfo,
container: str,
objects_with_attributes: list[str],
objects_with_other_attributes: list[str],
objects_without_attributes: list[str],
match_type: ape.MatchType,
file_path: TestFile,
rpc_endpoint: str,
):
with reporter.step("Deny all operations for others via APE with request condition"):
request_condition = ape.Condition('"frostfs:xheader/check_key"', '"check_value"', ape.ConditionType.REQUEST, match_type)
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
deny_rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS, [request_condition, role_condition])
frostfs_cli.ape_manager.add(
rpc_endpoint, deny_rule.chain_id, target_name=container, target_type="container", rule=deny_rule.as_string()
)
with reporter.step("Wait for one block"):
self.wait_for_blocks()
with reporter.step("Create bearer token with everything allowed for others role"):
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
rule = ape.Rule(ape.Verb.ALLOW, ALL_OBJECT_OPERATIONS, role_condition)
bearer = create_bearer_token(frostfs_cli, temp_directory, container, rule, rpc_endpoint)
# Filter denies requests where "check_key {match_type} ATTRIBUTE", so when match_type
# is STRING_EQUAL, then requests with "check_key=OTHER_ATTRIBUTE" will be allowed while
# requests with "check_key=ATTRIBUTE" will be denied, and vice versa
allow_headers = self.OTHER_HEADER if match_type == ape.MatchType.EQUAL else self.HEADER
deny_headers = self.HEADER if match_type == ape.MatchType.EQUAL else self.OTHER_HEADER
# We test on 3 groups of objects with various headers,
# but APE rule should ignore object headers and only work based on request headers
for oids in [objects_with_attributes, objects_with_other_attributes, objects_without_attributes]:
with reporter.step("Check others has full access when sending request without headers"):
assert_full_access_to_container(other_wallet, container, oids.pop(), file_path, self.shell, self.cluster)
with reporter.step("Check others has full access when sending request with allowed headers"):
assert_full_access_to_container(
other_wallet, container, oids.pop(), file_path, self.shell, self.cluster, xhdr=allow_headers
)
with reporter.step("Check others has no access when sending request with denied headers"):
assert_no_access_to_container(other_wallet, container, oids.pop(), file_path, self.shell, self.cluster, xhdr=deny_headers)
with reporter.step("Check others has full access when sending request with denied headers and using bearer token"):
assert_full_access_to_container(
other_wallet, container, oids.pop(), file_path, self.shell, self.cluster, bearer, deny_headers
)
@allure.title("Operations with deny user headers filter (match_type={match_type}, obj_size={object_size})")
@pytest.mark.parametrize("match_type", [ape.MatchType.EQUAL, ape.MatchType.NOT_EQUAL])
@pytest.mark.skip("https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/1300")
def test_ape_deny_filters_object(
self,
frostfs_cli: FrostfsCli,
temp_directory: str,
other_wallet: WalletInfo,
container: str,
objects_with_attributes: list[str],
objects_with_other_attributes: list[str],
objects_without_attributes: list[str],
match_type: ape.MatchType,
rpc_endpoint: str,
file_path: TestFile,
):
allow_objects = objects_with_other_attributes if match_type == ape.MatchType.EQUAL else objects_with_attributes
deny_objects = objects_with_attributes if match_type == ape.MatchType.EQUAL else objects_with_other_attributes
# When there is no attribute on the object, it's the same as "", and "" is not equal to "<some_value>"
# So it's the same as deny_objects
no_attributes_access = {
ape.MatchType.EQUAL: FULL_ACCESS,
ape.MatchType.NOT_EQUAL: {
ape.ObjectOperations.PUT: False,
ape.ObjectOperations.GET: False,
ape.ObjectOperations.HEAD: False,
ape.ObjectOperations.GET_RANGE: True,
ape.ObjectOperations.GET_RANGE_HASH: True,
ape.ObjectOperations.SEARCH: True,
ape.ObjectOperations.DELETE: False, # Denied by restricted PUT
},
}
allowed_access = {
ape.MatchType.EQUAL: FULL_ACCESS,
ape.MatchType.NOT_EQUAL: {
ape.ObjectOperations.PUT: False, # because currently we are put without attributes
ape.ObjectOperations.GET: True,
ape.ObjectOperations.HEAD: True,
ape.ObjectOperations.GET_RANGE: True,
ape.ObjectOperations.GET_RANGE_HASH: True,
ape.ObjectOperations.SEARCH: True,
ape.ObjectOperations.DELETE: False, # Because delete needs to put a tombstone without attributes
},
}
with reporter.step("Deny operations for others via APE with resource condition"):
resource_condition = ape.Condition('"check_key"', '"check_value"', ape.ConditionType.RESOURCE, match_type)
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
deny_rule = ape.Rule(ape.Verb.DENY, self.RESOURCE_OPERATIONS, [resource_condition, role_condition])
frostfs_cli.ape_manager.add(
rpc_endpoint, deny_rule.chain_id, target_name=container, target_type="container", rule=deny_rule.as_string()
)
with reporter.step("Wait for one block"):
self.wait_for_blocks()
with reporter.step("Create bearer token with everything allowed for others role"):
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
rule = ape.Rule(ape.Verb.ALLOW, ALL_OBJECT_OPERATIONS, role_condition)
bearer = create_bearer_token(frostfs_cli, temp_directory, container, rule, rpc_endpoint)
with reporter.step("Create bearer token with allowed put for others role"):
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
rule = ape.Rule(ape.Verb.ALLOW, ape.ObjectOperations.PUT, role_condition)
bearer_put = create_bearer_token(frostfs_cli, temp_directory, container, rule, rpc_endpoint)
# We will attempt requests with various headers,
# but APE rule should ignore request headers and validate only object headers
for xhdr in (self.HEADER, self.OTHER_HEADER, None):
with reporter.step("Check others access to objects without attributes"):
assert_access_to_container(
no_attributes_access[match_type],
other_wallet,
container,
objects_without_attributes.pop(),
file_path,
self.shell,
self.cluster,
xhdr=xhdr,
)
with reporter.step("Check others have full access to objects without deny attribute"):
assert_access_to_container(
allowed_access[match_type], other_wallet, container, allow_objects.pop(), file_path, self.shell, self.cluster, xhdr=xhdr
)
with reporter.step("Check others have no access to objects with deny attribute"):
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
head_object(other_wallet, container, deny_objects[0], self.shell, rpc_endpoint, xhdr=xhdr)
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
get_object_from_random_node(other_wallet, container, deny_objects[0], self.shell, self.cluster, xhdr=xhdr)
with reporter.step("Check others have access to objects with deny attribute and using bearer token"):
assert_full_access_to_container(
other_wallet, container, deny_objects.pop(), file_path, self.shell, self.cluster, bearer, xhdr
)
allow_attribute = self.OTHER_HEADER if match_type == ape.MatchType.EQUAL else self.HEADER
with reporter.step("Check others can PUT objects without denied attribute"):
put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster, attributes=allow_attribute)
deny_attribute = self.HEADER if match_type == ape.MatchType.EQUAL else self.OTHER_HEADER
with reporter.step("Check others can not PUT objects with denied attribute"):
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster, attributes=deny_attribute)
with reporter.step("Check others can PUT objects with denied attribute and using bearer token"):
put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster, bearer_put, attributes=deny_attribute)
@allure.title("Operations with allow APE rule with resource filters (match_type={match_type}, obj_size={object_size})")
@pytest.mark.parametrize("match_type", [ape.MatchType.EQUAL, ape.MatchType.NOT_EQUAL])
@pytest.mark.parametrize("object_size, container_request", [("simple", OWNER_ALLOW_ALL)], indirect=True)
def test_ape_allow_filters_object(
self,
frostfs_cli: FrostfsCli,
other_wallet: WalletInfo,
container: str,
objects_with_attributes: list[str],
objects_with_other_attributes: list[str],
objects_without_attributes: list[str],
match_type: ape.MatchType,
rpc_endpoint: str,
file_path: TestFile,
temp_directory: str,
):
if match_type == ape.MatchType.EQUAL:
allow_objects = objects_with_attributes
deny_objects = objects_with_other_attributes
allow_attribute = self.HEADER
deny_attribute = self.OTHER_HEADER
no_attributes_match_context = pytest.raises(Exception, match=OBJECT_ACCESS_DENIED)
else:
allow_objects = objects_with_other_attributes
deny_objects = objects_with_attributes
allow_attribute = self.OTHER_HEADER
deny_attribute = self.HEADER
no_attributes_match_context = expect_not_raises()
with reporter.step("Allow operations for others except few operations by resource condition via APE"):
resource_condition = ape.Condition('"check_key"', '"check_value"', ape.ConditionType.RESOURCE, match_type)
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
deny_rule = ape.Rule(ape.Verb.ALLOW, self.RESOURCE_OPERATIONS, [resource_condition, role_condition])
frostfs_cli.ape_manager.add(
rpc_endpoint, deny_rule.chain_id, target_name=container, target_type="container", rule=deny_rule.as_string()
)
with reporter.step("Wait for one block"):
self.wait_for_blocks()
with reporter.step("Check GET, PUT and HEAD operations with objects without attributes for OTHERS role"):
oid = objects_without_attributes.pop()
with no_attributes_match_context:
assert head_object(other_wallet, container, oid, self.shell, rpc_endpoint)
with no_attributes_match_context:
assert get_object_from_random_node(other_wallet, container, oid, self.shell, self.cluster)
with no_attributes_match_context:
assert put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster)
with reporter.step("Create bearer token with everything allowed for others role"):
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
rule = ape.Rule(ape.Verb.ALLOW, ALL_OBJECT_OPERATIONS, role_condition)
bearer = create_bearer_token(frostfs_cli, temp_directory, container, rule, rpc_endpoint)
with reporter.step("Check others can get and put objects without attributes and using bearer token"):
oid = objects_without_attributes[0]
with expect_not_raises():
head_object(other_wallet, container, oid, self.shell, rpc_endpoint, bearer)
with expect_not_raises():
get_object_from_random_node(other_wallet, container, oid, self.shell, self.cluster, bearer)
with expect_not_raises():
put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster, bearer)
with reporter.step("Check others can get and put objects with attributes matching the filter"):
oid = allow_objects.pop()
with expect_not_raises():
head_object(other_wallet, container, oid, self.shell, rpc_endpoint)
with expect_not_raises():
get_object_from_random_node(other_wallet, container, oid, self.shell, self.cluster)
with expect_not_raises():
put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster, attributes=allow_attribute)
with reporter.step("Check others cannot get and put objects without attributes matching the filter"):
oid = deny_objects[0]
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
head_object(other_wallet, container, oid, self.shell, rpc_endpoint)
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
assert get_object_from_random_node(other_wallet, container, oid, self.shell, self.cluster)
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
assert put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster, attributes=deny_attribute)
with reporter.step("Check others can get and put objects without attributes matching the filter with bearer token"):
oid = deny_objects.pop()
with expect_not_raises():
head_object(other_wallet, container, oid, self.shell, rpc_endpoint, bearer)
with expect_not_raises():
get_object_from_random_node(other_wallet, container, oid, self.shell, self.cluster, bearer)
with expect_not_raises():
put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster, bearer, attributes=allow_attribute)
@allure.title("PUT and GET object using bearer with objectID in filter (obj_size={object_size}, match_type=NOT_EQUAL)")
@pytest.mark.parametrize("container_request", [OWNER_ALLOW_ALL], indirect=True)
def test_ape_filter_object_id_not_equals(
self,
frostfs_cli: FrostfsCli,
default_wallet: WalletInfo,
other_wallet: WalletInfo,
container: str,
temp_directory: str,
file_path: TestFile,
):
with reporter.step("Put object to container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step("Create bearer token with objectID filter"):
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
object_condition = ape.Condition.by_object_id(oid, ape.ConditionType.RESOURCE, ape.MatchType.NOT_EQUAL)
rule = ape.Rule(ape.Verb.ALLOW, ALL_OBJECT_OPERATIONS, [role_condition, object_condition])
bearer = create_bearer_token(frostfs_cli, temp_directory, container, rule, self.cluster.default_rpc_endpoint)
with reporter.step("Others should be able to put object using bearer token"):
with expect_not_raises():
put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster, bearer)
with reporter.step("Others should not be able to get object matching the filter"):
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
get_object_from_random_node(other_wallet, container, oid, self.shell, self.cluster, bearer)
@allure.title("PUT and GET object using bearer with objectID in filter (obj_size={object_size}, match_type=EQUAL)")
@pytest.mark.parametrize("container_request", [OWNER_ALLOW_ALL], indirect=True)
def test_ape_filter_object_id_equals(
self,
frostfs_cli: FrostfsCli,
default_wallet: WalletInfo,
other_wallet: WalletInfo,
container: str,
temp_directory: str,
file_path: TestFile,
):
with reporter.step("Put object to container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step("Create bearer token with objectID filter"):
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
object_condition = ape.Condition.by_object_id(oid, ape.ConditionType.RESOURCE, ape.MatchType.EQUAL)
rule = ape.Rule(ape.Verb.ALLOW, ALL_OBJECT_OPERATIONS, [role_condition, object_condition])
bearer = create_bearer_token(frostfs_cli, temp_directory, container, rule, self.cluster.default_rpc_endpoint)
with reporter.step("Others should not be able to put object using bearer token"):
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
put_object_to_random_node(other_wallet, file_path, container, self.shell, self.cluster, bearer)
with reporter.step("Others should be able to get object matching the filter"):
with expect_not_raises():
get_object_from_random_node(other_wallet, container, oid, self.shell, self.cluster, bearer)

View file

@ -1,313 +0,0 @@
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.resources.error_patterns import OBJECT_ACCESS_DENIED
from frostfs_testlib.storage.dataclasses import ape
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import TestFile
from ....helpers.bearer_token import create_bearer_token
from ....helpers.container_access import (
ALL_OBJECT_OPERATIONS,
assert_access_to_container,
assert_full_access_to_container,
assert_no_access_to_container,
)
@pytest.mark.nightly
@pytest.mark.sanity
@pytest.mark.bearer
@pytest.mark.ape
@pytest.mark.parametrize("user_tag", ["ApeBearer"], indirect=True) # provide dedicated user with no APE side-policies
class TestApeBearer(ClusterTestBase):
# TODO: Without PATCH operation,
# since it requires specific permissions that do not apply when testing all operations at once
@allure.title("Operations with BearerToken (role={role}, obj_size={object_size})")
@pytest.mark.parametrize("role", [ape.Role.OWNER, ape.Role.OTHERS], indirect=True)
@pytest.mark.parametrize("objects", [4], indirect=True)
def test_bearer_token_operations(
self,
container: str,
objects: list[str],
frostfs_cli: FrostfsCli,
temp_directory: str,
test_wallet: WalletInfo,
role: ape.Role,
test_file: TestFile,
rpc_endpoint: str,
):
with reporter.step(f"Check {role} has full access to container without bearer token"):
assert_full_access_to_container(test_wallet, container, objects.pop(), test_file, self.shell, self.cluster)
with reporter.step(f"Deny all operations for everyone via APE"):
rule = ape.Rule(ape.Verb.DENY, ALL_OBJECT_OPERATIONS)
frostfs_cli.ape_manager.add(rpc_endpoint, rule.chain_id, target_name=container, target_type="container", rule=rule.as_string())
with reporter.step("Wait for one block"):
self.wait_for_blocks()
with reporter.step(f"Create bearer token with all operations allowed"):
bearer = create_bearer_token(
frostfs_cli,
temp_directory,
container,
rule=ape.Rule(ape.Verb.ALLOW, ALL_OBJECT_OPERATIONS),
endpoint=rpc_endpoint,
)
with reporter.step(f"Check {role} without token has no access to all operations with container"):
assert_no_access_to_container(test_wallet, container, objects.pop(), test_file, self.shell, self.cluster)
with reporter.step(f"Check {role} with token has access to all operations with container"):
assert_full_access_to_container(test_wallet, container, objects.pop(), test_file, self.shell, self.cluster, bearer)
with reporter.step(f"Remove deny rule from APE"):
frostfs_cli.ape_manager.remove(rpc_endpoint, rule.chain_id, target_name=container, target_type="container")
with reporter.step("Wait for one block"):
self.wait_for_blocks()
with reporter.step(f"Check {role} without token has access to all operations with container"):
assert_full_access_to_container(test_wallet, container, objects.pop(), test_file, self.shell, self.cluster)
# ^
@allure.title("Patch operation with BearerToken (object_size={object_size})")
@pytest.mark.parametrize("objects", [1], indirect=True)
def test_patch_object_with_bearer_token(
self,
frostfs_cli: FrostfsCli,
grpc_client_with_other_wallet: GrpcClientWrapper,
container: str,
objects: list[str],
test_file: TestFile,
temp_directory: str,
):
oid = objects[0]
with reporter.step("Check if the patch is available with another wallet"):
patched_oid = grpc_client_with_other_wallet.object.patch(
container,
oid,
self.cluster.default_rpc_endpoint,
ranges=["100:300"],
payloads=[test_file],
new_attrs="allow-patch=true",
timeout="200s",
)
assert patched_oid != oid, "OID of patched object must be different from original one"
oid = patched_oid
rule = ape.Rule(ape.Verb.DENY, ape.ObjectOperations.PATCH)
with reporter.step("Deny PATCH operation for everyone via APE"):
frostfs_cli.ape_manager.add(
self.cluster.default_rpc_endpoint,
rule.chain_id,
target_name=container,
target_type="container",
rule=rule.as_string(),
)
with reporter.step("Wait for one block"):
self.wait_for_blocks(1)
with reporter.step("Check that patch is not allowed with another wallet"):
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
grpc_client_with_other_wallet.object.patch(
container,
oid,
self.cluster.default_rpc_endpoint,
ranges=["100:300"],
payloads=[test_file],
new_attrs="deny-patch=true",
timeout="200s",
)
with reporter.step("Create bearer token with all operations allowed"):
bearer = create_bearer_token(
frostfs_cli,
temp_directory,
container,
rule=ape.Rule(ape.Verb.ALLOW, ALL_OBJECT_OPERATIONS),
endpoint=self.cluster.default_rpc_endpoint,
)
with reporter.step("Check that patch is available with another wallet with BearerToken"):
patched_oid = grpc_client_with_other_wallet.object.patch(
container,
oid,
self.cluster.default_rpc_endpoint,
bearer=bearer,
ranges=["100:300"],
payloads=[test_file],
new_attrs="bearer-patch=true",
timeout="200s",
)
assert patched_oid != oid, "OID of patched object must be different from original one"
oid = patched_oid
with reporter.step(f"Remove deny rule from APE"):
frostfs_cli.ape_manager.remove(
self.cluster.default_rpc_endpoint,
rule.chain_id,
target_name=container,
target_type="container",
)
with reporter.step("Wait for one block"):
self.wait_for_blocks(1)
with reporter.step("Check if the patch is available with another wallet"):
patched_oid = grpc_client_with_other_wallet.object.patch(
container,
oid,
self.cluster.default_rpc_endpoint,
bearer=bearer,
ranges=["100:300"],
payloads=[test_file],
new_attrs="allow-patch-2=true",
timeout="200s",
)
assert patched_oid != oid, "OID of patched object must be different from original one"
oid = patched_oid
attrs = {"allow-patch", "bearer-patch", "allow-patch-2"}
with reporter.step("Ensure that all attributes match expected values"):
object_info: dict = grpc_client_with_other_wallet.object.head(container, oid, self.cluster.default_rpc_endpoint)
object_attrs: dict = object_info["header"]["attributes"]
assert attrs <= {k for k in object_attrs.keys()}, f"Received attributes do not match expected ones: {object_attrs}"
assert all(
v == "true" for k, v in object_attrs.items() if k in attrs
), f"Received attributes do not match expected ones: {object_attrs}"
@allure.title("BearerToken for compound operations (obj_size={object_size})")
@pytest.mark.parametrize("objects", [4], indirect=True)
def test_bearer_token_compound_operations(
self,
frostfs_cli: FrostfsCli,
temp_directory: str,
default_wallet: WalletInfo,
other_wallet: WalletInfo,
container: str,
objects: list[str],
rpc_endpoint: str,
test_file: TestFile,
):
"""
Bearer Token COMPLETLY overrides chains set for the specific target.
Thus, any restictions or permissions should be explicitly defined in BT.
"""
wallets_map = {
ape.Role.OWNER: default_wallet,
ape.Role.OTHERS: other_wallet,
}
access_map = {
ape.Role.OWNER: {
ape.ObjectOperations.PUT: True,
ape.ObjectOperations.GET: True,
ape.ObjectOperations.HEAD: True,
ape.ObjectOperations.GET_RANGE: True,
ape.ObjectOperations.GET_RANGE_HASH: True,
ape.ObjectOperations.SEARCH: True,
ape.ObjectOperations.DELETE: False,
},
ape.Role.OTHERS: {
ape.ObjectOperations.PUT: True,
ape.ObjectOperations.GET: True,
ape.ObjectOperations.HEAD: True,
ape.ObjectOperations.GET_RANGE: False,
ape.ObjectOperations.GET_RANGE_HASH: False,
ape.ObjectOperations.SEARCH: False,
ape.ObjectOperations.DELETE: True,
},
}
bt_access_map = {
ape.Role.OWNER: {
ape.ObjectOperations.PUT: True,
ape.ObjectOperations.GET: False,
ape.ObjectOperations.HEAD: True,
ape.ObjectOperations.GET_RANGE: True,
ape.ObjectOperations.GET_RANGE_HASH: False,
ape.ObjectOperations.SEARCH: False,
ape.ObjectOperations.DELETE: True,
},
# Bearer Token COMPLETLY overrides chains set for the specific target.
# Thus, any restictions or permissions should be explicitly defined in BT.
ape.Role.OTHERS: {
ape.ObjectOperations.PUT: False,
ape.ObjectOperations.GET: False,
ape.ObjectOperations.HEAD: False,
ape.ObjectOperations.GET_RANGE: False,
ape.ObjectOperations.GET_RANGE_HASH: False,
ape.ObjectOperations.SEARCH: False,
ape.ObjectOperations.DELETE: False,
},
}
# Operations that we will deny for each role via APE
deny_map = {
ape.Role.OWNER: [ape.ObjectOperations.DELETE],
ape.Role.OTHERS: [
ape.ObjectOperations.SEARCH,
ape.ObjectOperations.GET_RANGE_HASH,
ape.ObjectOperations.GET_RANGE,
],
}
# Operations that we will allow for each role with bearer token
bearer_map = {
ape.Role.OWNER: [
ape.ObjectOperations.PUT,
ape.ObjectOperations.HEAD,
ape.ObjectOperations.GET_RANGE,
# Delete also requires PUT (to make tombstone) and HEAD (to get simple objects header)
ape.ObjectOperations.DELETE,
],
ape.Role.OTHERS: [
ape.ObjectOperations.GET,
ape.ObjectOperations.GET_RANGE,
ape.ObjectOperations.GET_RANGE_HASH,
],
}
conditions_map = {
ape.Role.OWNER: ape.Condition.by_role(ape.Role.OWNER),
ape.Role.OTHERS: ape.Condition.by_role(ape.Role.OTHERS),
}
verb_map = {ape.Role.OWNER: ape.Verb.ALLOW, ape.Role.OTHERS: ape.Verb.DENY}
for role, operations in deny_map.items():
with reporter.step(f"Add APE deny rule for {role}"):
rule = ape.Rule(ape.Verb.DENY, operations, conditions_map[role])
frostfs_cli.ape_manager.add(
rpc_endpoint, rule.chain_id, target_name=container, target_type="container", rule=rule.as_string()
)
with reporter.step("Wait for one block"):
self.wait_for_blocks()
for role, wallet in wallets_map.items():
with reporter.step(f"Assert access to container without bearer token for {role}"):
assert_access_to_container(access_map[role], wallet, container, objects.pop(), test_file, self.shell, self.cluster)
bearer_tokens = {}
for role in wallets_map.keys():
with reporter.step(f"Create bearer token for {role}"):
rule = ape.Rule(verb_map[role], bearer_map[role], conditions_map[role])
bt = create_bearer_token(frostfs_cli, temp_directory, container, rule, rpc_endpoint)
bearer_tokens[role] = bt
for role, wallet in wallets_map.items():
with reporter.step(f"Assert access to container with bearer token for {role}"):
assert_access_to_container(
bt_access_map[role], wallet, container, objects.pop(), test_file, self.shell, self.cluster, bearer_tokens[role]
)

View file

@ -1,98 +0,0 @@
import json
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import search_nodes_with_container
from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses import ape
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.storage.grpc_operations.client_wrappers import CliClientWrapper
from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.parallel import parallel
@pytest.fixture(scope="session")
def ir_wallet(cluster: Cluster) -> WalletInfo:
return WalletInfo.from_node(cluster.ir_nodes[0])
@pytest.fixture(scope="session")
def storage_wallet(cluster: Cluster) -> WalletInfo:
return WalletInfo.from_node(cluster.storage_nodes[0])
@pytest.fixture(scope="session")
def role(request: pytest.FixtureRequest):
return request.param
@pytest.fixture(scope="session")
def test_wallet(default_wallet: WalletInfo, other_wallet: WalletInfo, role: ape.Role):
role_to_wallet_map = {
ape.Role.OWNER: default_wallet,
ape.Role.OTHERS: other_wallet,
}
assert role in role_to_wallet_map, "Missing wallet with role {role}"
return role_to_wallet_map[role]
@pytest.fixture(scope="function", params=[5])
def objects(
container: str,
default_wallet: WalletInfo,
client_shell: Shell,
cluster: Cluster,
test_file: str,
request: pytest.FixtureRequest,
):
object_count = request.param
with reporter.step("Add test objects to container"):
put_results = parallel(
[put_object_to_random_node] * object_count,
wallet=default_wallet,
path=test_file,
cid=container,
shell=client_shell,
cluster=cluster,
)
objects_oids = [put_result.result() for put_result in put_results]
return objects_oids
@pytest.fixture
def container_nodes(default_wallet: WalletInfo, container: str, client_shell: Shell, cluster: Cluster) -> list[ClusterNode]:
cid = container
container_holder_nodes = search_nodes_with_container(default_wallet, cid, client_shell, cluster.default_rpc_endpoint, cluster)
report_data = {node.id: node.host_ip for node in container_holder_nodes}
reporter.attach(json.dumps(report_data, indent=2), "container_nodes.json")
return container_holder_nodes
@pytest.fixture
def container_node_wallet(container_nodes: list[ClusterNode]) -> WalletInfo:
return WalletInfo.from_node(container_nodes[0].storage_node)
@pytest.fixture
def grpc_client_with_container_wallet(client_shell: Shell, container_node_wallet: WalletInfo) -> GrpcClientWrapper:
return CliClientWrapper(FrostfsCli(client_shell, FROSTFS_CLI_EXEC, container_node_wallet.config_path))
@pytest.fixture(scope="session")
def grpc_client_with_other_wallet(client_shell: Shell, other_wallet: WalletInfo) -> GrpcClientWrapper:
return CliClientWrapper(FrostfsCli(client_shell, FROSTFS_CLI_EXEC, other_wallet.config_path))
@pytest.fixture(scope="session")
def grpc_client_with_ir_wallet(client_shell: Shell, ir_wallet: WalletInfo) -> GrpcClientWrapper:
return CliClientWrapper(FrostfsCli(client_shell, FROSTFS_CLI_EXEC, ir_wallet.config_path))

View file

@ -0,0 +1,86 @@
from dataclasses import dataclass
from typing import Dict, List, Optional
import allure
import pytest
from common import (
ASSETS_DIR,
IR_WALLET_CONFIG,
IR_WALLET_PATH,
STORAGE_WALLET_CONFIG,
STORAGE_WALLET_PATH,
WALLET_CONFIG,
)
from file_helper import generate_file
from python_keywords.acl import EACLRole
from python_keywords.container import create_container
from python_keywords.neofs_verbs import put_object
from wallet import init_wallet
from wellknown_acl import PUBLIC_ACL
OBJECT_COUNT = 5
@dataclass
class Wallet:
wallet_path: Optional[str] = None
config_path: Optional[str] = None
@dataclass
class Wallets:
wallets: Dict[EACLRole, List[Wallet]]
def get_wallet(self, role: EACLRole = EACLRole.USER) -> Wallet:
return self.wallets[role][0]
def get_wallets_list(self, role: EACLRole = EACLRole.USER) -> List[Wallet]:
return self.wallets[role]
@pytest.fixture(scope="module")
def wallets(prepare_wallet_and_deposit):
yield Wallets(
wallets={
EACLRole.USER: [
Wallet(wallet_path=prepare_wallet_and_deposit, config_path=WALLET_CONFIG)
],
EACLRole.OTHERS: [
Wallet(wallet_path=init_wallet(ASSETS_DIR)[0], config_path=WALLET_CONFIG),
Wallet(wallet_path=init_wallet(ASSETS_DIR)[0], config_path=WALLET_CONFIG),
],
EACLRole.SYSTEM: [
Wallet(wallet_path=IR_WALLET_PATH, config_path=IR_WALLET_CONFIG),
Wallet(wallet_path=STORAGE_WALLET_PATH, config_path=STORAGE_WALLET_CONFIG),
],
}
)
@pytest.fixture(scope="module")
def file_path():
yield generate_file()
@pytest.fixture(scope="function")
def eacl_container_with_objects(wallets, client_shell, file_path):
user_wallet = wallets.get_wallet()
with allure.step("Create eACL public container"):
cid = create_container(user_wallet.wallet_path, basic_acl=PUBLIC_ACL, shell=client_shell)
with allure.step("Add test objects to container"):
objects_oids = [
put_object(
user_wallet.wallet_path,
file_path,
cid,
attributes={"key1": "val1", "key": val, "key2": "abc"},
shell=client_shell,
)
for val in range(OBJECT_COUNT)
]
yield cid, objects_oids, file_path
# with allure.step('Delete eACL public container'):
# delete_container(user_wallet, cid)

View file

@ -0,0 +1,243 @@
import logging
from typing import Optional
import allure
import pytest
from common import (
ASSETS_DIR,
COMPLEX_OBJ_SIZE,
FREE_STORAGE,
IR_WALLET_CONFIG,
IR_WALLET_PASS,
IR_WALLET_PATH,
SIMPLE_OBJ_SIZE,
)
from epoch import tick_epoch
from file_helper import generate_file
from grpc_responses import OBJECT_ACCESS_DENIED, OBJECT_NOT_FOUND
from neofs_testlib.shell import Shell
from python_keywords.acl import (
EACLAccess,
EACLOperation,
EACLRole,
EACLRule,
create_eacl,
form_bearertoken_file,
set_eacl,
)
from python_keywords.container import create_container
from python_keywords.neofs_verbs import put_object
from python_keywords.payment_neogo import neofs_deposit, transfer_mainnet_gas
from python_keywords.storage_group import (
delete_storagegroup,
get_storagegroup,
list_storagegroup,
put_storagegroup,
verify_get_storage_group,
verify_list_storage_group,
)
from wallet import init_wallet
logger = logging.getLogger("NeoLogger")
deposit = 30
@pytest.mark.parametrize(
"object_size",
[SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE],
ids=["simple object", "complex object"],
)
@pytest.mark.storage_group
class TestStorageGroup:
@pytest.fixture(autouse=True)
def prepare_two_wallets(self, prepare_wallet_and_deposit):
self.main_wallet = prepare_wallet_and_deposit
self.other_wallet, _, _ = init_wallet(ASSETS_DIR)
if not FREE_STORAGE:
transfer_mainnet_gas(self.other_wallet, 31)
neofs_deposit(self.other_wallet, 30)
@allure.title("Test Storage Group in Private Container")
def test_storagegroup_basic_private_container(self, client_shell, object_size):
cid = create_container(self.main_wallet, shell=client_shell)
file_path = generate_file(object_size)
oid = put_object(self.main_wallet, file_path, cid, shell=client_shell)
objects = [oid]
storage_group = put_storagegroup(self.main_wallet, cid, objects)
self.expect_success_for_storagegroup_operations(
self.main_wallet, cid, objects, object_size, client_shell
)
self.expect_failure_for_storagegroup_operations(
self.other_wallet, cid, objects, storage_group
)
self.storagegroup_operations_by_system_ro_container(
self.main_wallet, cid, objects, object_size, client_shell
)
@allure.title("Test Storage Group in Public Container")
def test_storagegroup_basic_public_container(self, client_shell, object_size):
cid = create_container(self.main_wallet, basic_acl="public-read-write", shell=client_shell)
file_path = generate_file(object_size)
oid = put_object(self.main_wallet, file_path, cid, shell=client_shell)
objects = [oid]
self.expect_success_for_storagegroup_operations(
self.main_wallet, cid, objects, object_size, client_shell
)
self.expect_success_for_storagegroup_operations(
self.other_wallet, cid, objects, object_size, client_shell
)
self.storagegroup_operations_by_system_ro_container(
self.main_wallet, cid, objects, object_size, client_shell
)
@allure.title("Test Storage Group in Read-Only Container")
def test_storagegroup_basic_ro_container(self, client_shell, object_size):
cid = create_container(self.main_wallet, basic_acl="public-read", shell=client_shell)
file_path = generate_file(object_size)
oid = put_object(self.main_wallet, file_path, cid, shell=client_shell)
objects = [oid]
self.expect_success_for_storagegroup_operations(
self.main_wallet, cid, objects, object_size, client_shell
)
self.storagegroup_operations_by_other_ro_container(
self.main_wallet, self.other_wallet, cid, objects, object_size, client_shell
)
self.storagegroup_operations_by_system_ro_container(
self.main_wallet, cid, objects, object_size, client_shell
)
@allure.title("Test Storage Group with Bearer Allow")
def test_storagegroup_bearer_allow(self, client_shell, object_size):
cid = create_container(
self.main_wallet, basic_acl="eacl-public-read-write", shell=client_shell
)
file_path = generate_file(object_size)
oid = put_object(self.main_wallet, file_path, cid, shell=client_shell)
objects = [oid]
self.expect_success_for_storagegroup_operations(
self.main_wallet, cid, objects, object_size, client_shell
)
storage_group = put_storagegroup(self.main_wallet, cid, objects)
eacl_deny = [
EACLRule(access=EACLAccess.DENY, role=role, operation=op)
for op in EACLOperation
for role in EACLRole
]
set_eacl(self.main_wallet, cid, create_eacl(cid, eacl_deny))
self.expect_failure_for_storagegroup_operations(
self.main_wallet, cid, objects, storage_group
)
bearer_file = form_bearertoken_file(
self.main_wallet,
cid,
[
EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.USER)
for op in EACLOperation
],
)
self.expect_success_for_storagegroup_operations(
self.main_wallet, cid, objects, object_size, client_shell, bearer_file
)
@allure.title("Test to check Storage Group lifetime")
def test_storagegroup_lifetime(self, client_shell, object_size):
cid = create_container(self.main_wallet, shell=client_shell)
file_path = generate_file(object_size)
oid = put_object(self.main_wallet, file_path, cid, shell=client_shell)
objects = [oid]
storage_group = put_storagegroup(self.main_wallet, cid, objects, lifetime="1")
tick_epoch()
tick_epoch()
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_storagegroup(self.main_wallet, cid, storage_group)
@staticmethod
@allure.step("Run Storage Group Operations And Expect Success")
def expect_success_for_storagegroup_operations(
wallet: str,
cid: str,
obj_list: list,
object_size: int,
shell: Shell,
bearer_token: Optional[str] = None,
):
"""
This func verifies if the Object's owner is allowed to
Put, List, Get and Delete the Storage Group which contains
the Object.
"""
storage_group = put_storagegroup(wallet, cid, obj_list, bearer_token)
verify_list_storage_group(wallet, cid, storage_group, bearer_token)
verify_get_storage_group(
wallet, cid, storage_group, obj_list, object_size, shell, bearer_token
)
delete_storagegroup(wallet, cid, storage_group, bearer_token)
@staticmethod
@allure.step("Run Storage Group Operations And Expect Failure")
def expect_failure_for_storagegroup_operations(
wallet: str, cid: str, obj_list: list, storagegroup: str
):
"""
This func verifies if the Object's owner isn't allowed to
Put, List, Get and Delete the Storage Group which contains
the Object.
"""
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
put_storagegroup(wallet, cid, obj_list)
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
list_storagegroup(wallet, cid)
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
get_storagegroup(wallet, cid, storagegroup)
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
delete_storagegroup(wallet, cid, storagegroup)
@staticmethod
@allure.step("Run Storage Group Operations On Other's Behalf In RO Container")
def storagegroup_operations_by_other_ro_container(
owner_wallet: str,
other_wallet: str,
cid: str,
obj_list: list,
object_size: int,
shell: Shell,
):
storage_group = put_storagegroup(owner_wallet, cid, obj_list)
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
put_storagegroup(other_wallet, cid, obj_list)
verify_list_storage_group(other_wallet, cid, storage_group)
verify_get_storage_group(other_wallet, cid, storage_group, obj_list, object_size, shell)
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
delete_storagegroup(other_wallet, cid, storage_group)
@staticmethod
@allure.step("Run Storage Group Operations On Systems's Behalf In RO Container")
def storagegroup_operations_by_system_ro_container(
wallet: str, cid: str, obj_list: list, object_size: int, shell: Shell
):
"""
In this func we create a Storage Group on Inner Ring's key behalf
and include an Object created on behalf of some user. We expect
that System key is granted to make all operations except PUT and DELETE.
"""
if not FREE_STORAGE:
transfer_mainnet_gas(IR_WALLET_PATH, deposit + 1, wallet_password=IR_WALLET_PASS)
neofs_deposit(IR_WALLET_PATH, deposit, wallet_password=IR_WALLET_PASS)
storage_group = put_storagegroup(wallet, cid, obj_list)
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
put_storagegroup(IR_WALLET_PATH, cid, obj_list, wallet_config=IR_WALLET_CONFIG)
verify_list_storage_group(
IR_WALLET_PATH, cid, storage_group, wallet_config=IR_WALLET_CONFIG
)
verify_get_storage_group(
IR_WALLET_PATH,
cid,
storage_group,
obj_list,
object_size,
shell,
wallet_config=IR_WALLET_CONFIG,
)
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
delete_storagegroup(IR_WALLET_PATH, cid, storage_group, wallet_config=IR_WALLET_CONFIG)

View file

@ -0,0 +1,135 @@
import allure
import pytest
from python_keywords.acl import EACLRole
from python_keywords.container import create_container
from python_keywords.container_access import (
check_full_access_to_container,
check_no_access_to_container,
check_read_only_container,
)
from python_keywords.neofs_verbs import put_object
from wellknown_acl import PRIVATE_ACL_F, PUBLIC_ACL_F, READONLY_ACL_F
@pytest.mark.sanity
@pytest.mark.acl
@pytest.mark.acl_basic
class TestACLBasic:
@pytest.fixture(scope="function")
def public_container(self, client_shell, wallets):
user_wallet = wallets.get_wallet()
with allure.step("Create public container"):
cid_public = create_container(
user_wallet.wallet_path, basic_acl=PUBLIC_ACL_F, shell=client_shell
)
yield cid_public
# with allure.step('Delete public container'):
# delete_container(user_wallet.wallet_path, cid_public)
@pytest.fixture(scope="function")
def private_container(self, client_shell, wallets):
user_wallet = wallets.get_wallet()
with allure.step("Create private container"):
cid_private = create_container(
user_wallet.wallet_path, basic_acl=PRIVATE_ACL_F, shell=client_shell
)
yield cid_private
# with allure.step('Delete private container'):
# delete_container(user_wallet.wallet_path, cid_private)
@pytest.fixture(scope="function")
def read_only_container(self, client_shell, wallets):
user_wallet = wallets.get_wallet()
with allure.step("Create public readonly container"):
cid_read_only = create_container(
user_wallet.wallet_path, basic_acl=READONLY_ACL_F, shell=client_shell
)
yield cid_read_only
# with allure.step('Delete public readonly container'):
# delete_container(user_wallet.wallet_path, cid_read_only)
@allure.title("Test basic ACL on public container")
def test_basic_acl_public(self, wallets, client_shell, public_container, file_path):
"""
Test basic ACL set during public container creation.
"""
user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(role=EACLRole.OTHERS)
cid = public_container
for wallet, desc in ((user_wallet, "owner"), (other_wallet, "other users")):
with allure.step("Add test objects to container"):
# We create new objects for each wallet because check_full_access_to_container
# deletes the object
owner_object_oid = put_object(
user_wallet.wallet_path,
file_path,
cid,
shell=client_shell,
attributes={"created": "owner"},
)
other_object_oid = put_object(
other_wallet.wallet_path,
file_path,
cid,
shell=client_shell,
attributes={"created": "other"},
)
with allure.step(f"Check {desc} has full access to public container"):
check_full_access_to_container(
wallet.wallet_path, cid, owner_object_oid, file_path, shell=client_shell
)
check_full_access_to_container(
wallet.wallet_path, cid, other_object_oid, file_path, shell=client_shell
)
@allure.title("Test basic ACL on private container")
def test_basic_acl_private(self, wallets, client_shell, private_container, file_path):
"""
Test basic ACL set during private container creation.
"""
user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(role=EACLRole.OTHERS)
cid = private_container
with allure.step("Add test objects to container"):
owner_object_oid = put_object(
user_wallet.wallet_path, file_path, cid, shell=client_shell
)
with allure.step("Check only owner has full access to private container"):
with allure.step("Check no one except owner has access to operations with container"):
check_no_access_to_container(
other_wallet.wallet_path, cid, owner_object_oid, file_path, shell=client_shell
)
with allure.step("Check owner has full access to private container"):
check_full_access_to_container(
user_wallet.wallet_path, cid, owner_object_oid, file_path, shell=client_shell
)
@allure.title("Test basic ACL on readonly container")
def test_basic_acl_readonly(self, wallets, client_shell, read_only_container, file_path):
"""
Test basic ACL Operations for Read-Only Container.
"""
user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(role=EACLRole.OTHERS)
cid = read_only_container
with allure.step("Add test objects to container"):
object_oid = put_object(user_wallet.wallet_path, file_path, cid, shell=client_shell)
with allure.step("Check other has read-only access to operations with container"):
check_read_only_container(
other_wallet.wallet_path, cid, object_oid, file_path, shell=client_shell
)
with allure.step("Check owner has full access to public container"):
check_full_access_to_container(
user_wallet.wallet_path, cid, object_oid, file_path, shell=client_shell
)

View file

@ -0,0 +1,221 @@
import allure
import pytest
from python_keywords.acl import (
EACLAccess,
EACLOperation,
EACLRole,
EACLRule,
create_eacl,
form_bearertoken_file,
set_eacl,
wait_for_cache_expired,
)
from python_keywords.container_access import (
check_custom_access_to_container,
check_full_access_to_container,
check_no_access_to_container,
)
@pytest.mark.sanity
@pytest.mark.acl
@pytest.mark.acl_bearer
class TestACLBearer:
@pytest.mark.parametrize("role", [EACLRole.USER, EACLRole.OTHERS])
def test_bearer_token_operations(
self, wallets, client_shell, eacl_container_with_objects, role
):
allure.dynamic.title(f"Testcase to validate NeoFS operations with {role.value} BearerToken")
cid, objects_oids, file_path = eacl_container_with_objects
user_wallet = wallets.get_wallet()
deny_wallet = wallets.get_wallet(role)
with allure.step(f"Check {role.value} has full access to container without bearer token"):
check_full_access_to_container(
deny_wallet.wallet_path,
cid,
objects_oids.pop(),
file_path,
wallet_config=deny_wallet.config_path,
shell=client_shell,
)
with allure.step(f"Set deny all operations for {role.value} via eACL"):
eacl = [
EACLRule(access=EACLAccess.DENY, role=role, operation=op) for op in EACLOperation
]
eacl_file = create_eacl(cid, eacl, shell=client_shell)
set_eacl(user_wallet.wallet_path, cid, eacl_file, shell=client_shell)
wait_for_cache_expired()
with allure.step(f"Create bearer token for {role.value} with all operations allowed"):
bearer_token = form_bearertoken_file(
user_wallet.wallet_path,
cid,
[
EACLRule(operation=op, access=EACLAccess.ALLOW, role=role)
for op in EACLOperation
],
shell=client_shell,
)
with allure.step(
f"Check {role.value} without token has no access to all operations with container"
):
check_no_access_to_container(
deny_wallet.wallet_path,
cid,
objects_oids.pop(),
file_path,
wallet_config=deny_wallet.config_path,
shell=client_shell,
)
with allure.step(
f"Check {role.value} with token has access to all operations with container"
):
check_full_access_to_container(
deny_wallet.wallet_path,
cid,
objects_oids.pop(),
file_path,
bearer=bearer_token,
wallet_config=deny_wallet.config_path,
shell=client_shell,
)
with allure.step(f"Set allow all operations for {role.value} via eACL"):
eacl = [
EACLRule(access=EACLAccess.ALLOW, role=role, operation=op) for op in EACLOperation
]
eacl_file = create_eacl(cid, eacl, shell=client_shell)
set_eacl(user_wallet.wallet_path, cid, eacl_file, shell=client_shell)
wait_for_cache_expired()
with allure.step(
f"Check {role.value} without token has access to all operations with container"
):
check_full_access_to_container(
deny_wallet.wallet_path,
cid,
objects_oids.pop(),
file_path,
wallet_config=deny_wallet.config_path,
shell=client_shell,
)
@allure.title("BearerToken Operations for compound Operations")
def test_bearer_token_compound_operations(
self, wallets, client_shell, eacl_container_with_objects
):
cid, objects_oids, file_path = eacl_container_with_objects
user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(role=EACLRole.OTHERS)
# Operations that we will deny for each role via eACL
deny_map = {
EACLRole.USER: [EACLOperation.DELETE],
EACLRole.OTHERS: [
EACLOperation.SEARCH,
EACLOperation.GET_RANGE_HASH,
EACLOperation.GET_RANGE,
],
}
# Operations that we will allow for each role with bearer token
bearer_map = {
EACLRole.USER: [
EACLOperation.DELETE,
EACLOperation.PUT,
EACLOperation.GET_RANGE,
],
EACLRole.OTHERS: [
EACLOperation.GET,
EACLOperation.GET_RANGE,
EACLOperation.GET_RANGE_HASH,
],
}
deny_map_with_bearer = {
EACLRole.USER: [
op for op in deny_map[EACLRole.USER] if op not in bearer_map[EACLRole.USER]
],
EACLRole.OTHERS: [
op for op in deny_map[EACLRole.OTHERS] if op not in bearer_map[EACLRole.OTHERS]
],
}
eacl_deny = []
for role, operations in deny_map.items():
eacl_deny += [
EACLRule(access=EACLAccess.DENY, role=role, operation=op) for op in operations
]
set_eacl(
user_wallet.wallet_path,
cid,
eacl_table_path=create_eacl(cid, eacl_deny, shell=client_shell),
shell=client_shell,
)
wait_for_cache_expired()
with allure.step("Check rule consistency without bearer"):
check_custom_access_to_container(
user_wallet.wallet_path,
cid,
objects_oids.pop(),
file_path,
deny_operations=deny_map[EACLRole.USER],
wallet_config=user_wallet.config_path,
shell=client_shell,
)
check_custom_access_to_container(
other_wallet.wallet_path,
cid,
objects_oids.pop(),
file_path,
deny_operations=deny_map[EACLRole.OTHERS],
wallet_config=other_wallet.config_path,
shell=client_shell,
)
with allure.step("Check rule consistency using bearer token"):
bearer_token_user = form_bearertoken_file(
user_wallet.wallet_path,
cid,
[
EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.USER)
for op in bearer_map[EACLRole.USER]
],
shell=client_shell,
)
bearer_token_other = form_bearertoken_file(
user_wallet.wallet_path,
cid,
[
EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.OTHERS)
for op in bearer_map[EACLRole.OTHERS]
],
shell=client_shell,
)
check_custom_access_to_container(
user_wallet.wallet_path,
cid,
objects_oids.pop(),
file_path,
deny_operations=deny_map_with_bearer[EACLRole.USER],
bearer=bearer_token_user,
wallet_config=user_wallet.config_path,
shell=client_shell,
)
check_custom_access_to_container(
other_wallet.wallet_path,
cid,
objects_oids.pop(),
file_path,
deny_operations=deny_map_with_bearer[EACLRole.OTHERS],
bearer=bearer_token_other,
wallet_config=other_wallet.config_path,
shell=client_shell,
)

View file

@ -0,0 +1,617 @@
import allure
import pytest
from common import NEOFS_NETMAP_DICT
from failover_utils import wait_object_replication_on_nodes
from neofs_testlib.hosting import Hosting
from neofs_testlib.shell import Shell
from python_keywords.acl import (
EACLAccess,
EACLOperation,
EACLRole,
EACLRule,
create_eacl,
set_eacl,
wait_for_cache_expired,
)
from python_keywords.container import create_container
from python_keywords.container_access import (
check_full_access_to_container,
check_no_access_to_container,
)
from python_keywords.neofs_verbs import put_object
from python_keywords.node_management import drop_object
from python_keywords.object_access import (
can_delete_object,
can_get_head_object,
can_get_object,
can_get_range_hash_of_object,
can_get_range_of_object,
can_put_object,
can_search_object,
)
from wellknown_acl import PUBLIC_ACL
@pytest.mark.sanity
@pytest.mark.acl
@pytest.mark.acl_extended
class TestEACLContainer:
NODE_COUNT = len(NEOFS_NETMAP_DICT.keys())
@pytest.fixture(scope="function")
def eacl_full_placement_container_with_object(self, wallets, file_path, shell: Shell):
user_wallet = wallets.get_wallet()
with allure.step("Create eACL public container with full placement rule"):
full_placement_rule = (
f"REP {self.NODE_COUNT} IN X CBF 1 SELECT {self.NODE_COUNT} FROM * AS X"
)
cid = create_container(
user_wallet.wallet_path,
full_placement_rule,
basic_acl=PUBLIC_ACL,
shell=shell,
)
with allure.step("Add test object to container"):
oid = put_object(user_wallet.wallet_path, file_path, cid)
wait_object_replication_on_nodes(user_wallet.wallet_path, cid, oid, self.NODE_COUNT)
yield cid, oid, file_path
@pytest.mark.parametrize("deny_role", [EACLRole.USER, EACLRole.OTHERS])
def test_extended_acl_deny_all_operations(
self, wallets, client_shell, eacl_container_with_objects, deny_role
):
user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(EACLRole.OTHERS)
deny_role_wallet = other_wallet if deny_role == EACLRole.OTHERS else user_wallet
not_deny_role_wallet = user_wallet if deny_role == EACLRole.OTHERS else other_wallet
deny_role_str = "all others" if deny_role == EACLRole.OTHERS else "user"
not_deny_role_str = "user" if deny_role == EACLRole.OTHERS else "all others"
allure.dynamic.title(f"Testcase to deny NeoFS operations for {deny_role_str}.")
cid, object_oids, file_path = eacl_container_with_objects
with allure.step(f"Deny all operations for {deny_role_str} via eACL"):
eacl_deny = [
EACLRule(access=EACLAccess.DENY, role=deny_role, operation=op)
for op in EACLOperation
]
set_eacl(
user_wallet.wallet_path,
cid,
create_eacl(cid, eacl_deny, shell=client_shell),
shell=client_shell,
)
wait_for_cache_expired()
with allure.step(f"Check only {not_deny_role_str} has full access to container"):
with allure.step(
f"Check {deny_role_str} has not access to any operations with container"
):
check_no_access_to_container(
deny_role_wallet.wallet_path,
cid,
object_oids[0],
file_path,
shell=client_shell,
)
with allure.step(
f"Check {not_deny_role_wallet} has full access to eACL public container"
):
check_full_access_to_container(
not_deny_role_wallet.wallet_path,
cid,
object_oids.pop(),
file_path,
shell=client_shell,
)
with allure.step(f"Allow all operations for {deny_role_str} via eACL"):
eacl_deny = [
EACLRule(access=EACLAccess.ALLOW, role=deny_role, operation=op)
for op in EACLOperation
]
set_eacl(
user_wallet.wallet_path,
cid,
create_eacl(cid, eacl_deny, shell=client_shell),
shell=client_shell,
)
wait_for_cache_expired()
with allure.step(f"Check all have full access to eACL public container"):
check_full_access_to_container(
user_wallet.wallet_path,
cid,
object_oids.pop(),
file_path,
shell=client_shell,
)
check_full_access_to_container(
other_wallet.wallet_path,
cid,
object_oids.pop(),
file_path,
shell=client_shell,
)
@allure.title("Testcase to allow NeoFS operations for only one other pubkey.")
def test_extended_acl_deny_all_operations_exclude_pubkey(
self, wallets, client_shell, eacl_container_with_objects
):
user_wallet = wallets.get_wallet()
other_wallet, other_wallet_allow = wallets.get_wallets_list(EACLRole.OTHERS)[0:2]
cid, object_oids, file_path = eacl_container_with_objects
with allure.step("Deny all operations for others except single wallet via eACL"):
eacl = [
EACLRule(
access=EACLAccess.ALLOW,
role=other_wallet_allow.wallet_path,
operation=op,
)
for op in EACLOperation
]
eacl += [
EACLRule(access=EACLAccess.DENY, role=EACLRole.OTHERS, operation=op)
for op in EACLOperation
]
set_eacl(
user_wallet.wallet_path,
cid,
create_eacl(cid, eacl, shell=client_shell),
shell=client_shell,
)
wait_for_cache_expired()
with allure.step("Check only owner and allowed other have full access to public container"):
with allure.step("Check other has not access to operations with container"):
check_no_access_to_container(
other_wallet.wallet_path,
cid,
object_oids[0],
file_path,
shell=client_shell,
)
with allure.step("Check owner has full access to public container"):
check_full_access_to_container(
user_wallet.wallet_path,
cid,
object_oids.pop(),
file_path,
shell=client_shell,
)
with allure.step("Check allowed other has full access to public container"):
check_full_access_to_container(
other_wallet_allow.wallet_path,
cid,
object_oids.pop(),
file_path,
shell=client_shell,
)
@allure.title("Testcase to validate NeoFS replication with eACL deny rules.")
def test_extended_acl_deny_replication(
self,
wallets,
client_shell,
hosting: Hosting,
eacl_full_placement_container_with_object,
file_path,
):
user_wallet = wallets.get_wallet()
cid, oid, file_path = eacl_full_placement_container_with_object
with allure.step("Deny all operations for user via eACL"):
eacl_deny = [
EACLRule(access=EACLAccess.DENY, role=EACLRole.USER, operation=op)
for op in EACLOperation
]
eacl_deny += [
EACLRule(access=EACLAccess.DENY, role=EACLRole.OTHERS, operation=op)
for op in EACLOperation
]
set_eacl(
user_wallet.wallet_path,
cid,
create_eacl(cid, eacl_deny, shell=client_shell),
shell=client_shell,
)
wait_for_cache_expired()
with allure.step("Drop object to check replication"):
drop_object(hosting, node_name=[*NEOFS_NETMAP_DICT][0], cid=cid, oid=oid)
storage_wallet_path = NEOFS_NETMAP_DICT[[*NEOFS_NETMAP_DICT][0]]["wallet_path"]
with allure.step("Wait for dropped object replicated"):
wait_object_replication_on_nodes(storage_wallet_path, cid, oid, self.NODE_COUNT)
@allure.title("Testcase to validate NeoFS system operations with extended ACL")
def test_extended_actions_system(self, wallets, client_shell, eacl_container_with_objects):
user_wallet = wallets.get_wallet()
ir_wallet, storage_wallet = wallets.get_wallets_list(role=EACLRole.SYSTEM)[:2]
cid, object_oids, file_path = eacl_container_with_objects
with allure.step("Check IR and STORAGE rules compliance"):
assert not can_put_object(
ir_wallet.wallet_path,
cid,
file_path,
shell=client_shell,
wallet_config=ir_wallet.config_path,
)
assert can_put_object(
storage_wallet.wallet_path,
cid,
file_path,
shell=client_shell,
wallet_config=storage_wallet.config_path,
)
assert can_get_object(
ir_wallet.wallet_path,
cid,
object_oids[0],
file_path,
shell=client_shell,
wallet_config=ir_wallet.config_path,
)
assert can_get_object(
storage_wallet.wallet_path,
cid,
object_oids[0],
file_path,
shell=client_shell,
wallet_config=storage_wallet.config_path,
)
assert can_get_head_object(
ir_wallet.wallet_path,
cid,
object_oids[0],
shell=client_shell,
wallet_config=ir_wallet.config_path,
)
assert can_get_head_object(
storage_wallet.wallet_path,
cid,
object_oids[0],
shell=client_shell,
wallet_config=storage_wallet.config_path,
)
assert can_search_object(
ir_wallet.wallet_path,
cid,
shell=client_shell,
oid=object_oids[0],
wallet_config=ir_wallet.config_path,
)
assert can_search_object(
storage_wallet.wallet_path,
cid,
shell=client_shell,
oid=object_oids[0],
wallet_config=storage_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_get_range_of_object(
wallet=ir_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=client_shell,
wallet_config=ir_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_get_range_of_object(
wallet=storage_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=client_shell,
wallet_config=storage_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_get_range_hash_of_object(
wallet=ir_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=client_shell,
wallet_config=ir_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_get_range_hash_of_object(
wallet=storage_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=client_shell,
wallet_config=storage_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_delete_object(
wallet=ir_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=client_shell,
wallet_config=ir_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_delete_object(
wallet=storage_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=client_shell,
wallet_config=storage_wallet.config_path,
)
with allure.step("Deny all operations for SYSTEM via eACL"):
set_eacl(
user_wallet.wallet_path,
cid,
create_eacl(
cid=cid,
rules_list=[
EACLRule(access=EACLAccess.DENY, role=EACLRole.SYSTEM, operation=op)
for op in EACLOperation
],
shell=client_shell,
),
shell=client_shell,
)
wait_for_cache_expired()
with allure.step("Check IR and STORAGE rules compliance with deny eACL"):
assert not can_put_object(
wallet=ir_wallet.wallet_path,
cid=cid,
file_name=file_path,
shell=client_shell,
wallet_config=ir_wallet.config_path,
)
assert not can_put_object(
wallet=storage_wallet.wallet_path,
cid=cid,
file_name=file_path,
shell=client_shell,
wallet_config=storage_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_get_object(
wallet=ir_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
file_name=file_path,
shell=client_shell,
wallet_config=ir_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_get_object(
wallet=storage_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
file_name=file_path,
shell=client_shell,
wallet_config=storage_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_get_head_object(
wallet=ir_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=client_shell,
wallet_config=ir_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_get_head_object(
wallet=storage_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=client_shell,
wallet_config=storage_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_search_object(
wallet=ir_wallet.wallet_path,
cid=cid,
shell=client_shell,
oid=object_oids[0],
wallet_config=ir_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_search_object(
wallet=storage_wallet.wallet_path,
cid=cid,
shell=client_shell,
oid=object_oids[0],
wallet_config=storage_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_get_range_of_object(
wallet=ir_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=client_shell,
wallet_config=ir_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_get_range_of_object(
wallet=storage_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=client_shell,
wallet_config=storage_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_get_range_hash_of_object(
wallet=ir_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=client_shell,
wallet_config=ir_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_get_range_hash_of_object(
wallet=storage_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=client_shell,
wallet_config=storage_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_delete_object(
wallet=ir_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=client_shell,
wallet_config=ir_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_delete_object(
wallet=storage_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=client_shell,
wallet_config=storage_wallet.config_path,
)
with allure.step("Allow all operations for SYSTEM via eACL"):
set_eacl(
user_wallet.wallet_path,
cid,
create_eacl(
cid=cid,
rules_list=[
EACLRule(access=EACLAccess.ALLOW, role=EACLRole.SYSTEM, operation=op)
for op in EACLOperation
],
shell=client_shell,
),
shell=client_shell,
)
wait_for_cache_expired()
with allure.step("Check IR and STORAGE rules compliance with allow eACL"):
assert not can_put_object(
wallet=ir_wallet.wallet_path,
cid=cid,
file_name=file_path,
shell=client_shell,
wallet_config=ir_wallet.config_path,
)
assert can_put_object(
wallet=storage_wallet.wallet_path,
cid=cid,
file_name=file_path,
shell=client_shell,
wallet_config=storage_wallet.config_path,
)
assert can_get_object(
wallet=ir_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
file_name=file_path,
shell=client_shell,
wallet_config=ir_wallet.config_path,
)
assert can_get_object(
wallet=storage_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
file_name=file_path,
shell=client_shell,
wallet_config=storage_wallet.config_path,
)
assert can_get_head_object(
wallet=ir_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=client_shell,
wallet_config=ir_wallet.config_path,
)
assert can_get_head_object(
wallet=storage_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=client_shell,
wallet_config=storage_wallet.config_path,
)
assert can_search_object(
wallet=ir_wallet.wallet_path,
cid=cid,
shell=client_shell,
oid=object_oids[0],
wallet_config=ir_wallet.config_path,
)
assert can_search_object(
wallet=storage_wallet.wallet_path,
cid=cid,
shell=client_shell,
oid=object_oids[0],
wallet_config=storage_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_get_range_of_object(
wallet=ir_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=client_shell,
wallet_config=ir_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_get_range_of_object(
wallet=storage_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=client_shell,
wallet_config=storage_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_get_range_hash_of_object(
wallet=ir_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=client_shell,
wallet_config=ir_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_get_range_hash_of_object(
wallet=storage_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=client_shell,
wallet_config=storage_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_delete_object(
wallet=ir_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=client_shell,
wallet_config=ir_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_delete_object(
wallet=storage_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=client_shell,
wallet_config=storage_wallet.config_path,
)

View file

@ -0,0 +1,543 @@
import allure
import pytest
from python_keywords.acl import (
EACLAccess,
EACLFilter,
EACLFilters,
EACLHeaderType,
EACLMatchType,
EACLOperation,
EACLRole,
EACLRule,
create_eacl,
form_bearertoken_file,
set_eacl,
wait_for_cache_expired,
)
from python_keywords.container import create_container, delete_container
from python_keywords.container_access import (
check_full_access_to_container,
check_no_access_to_container,
)
from python_keywords.neofs_verbs import put_object
from python_keywords.object_access import can_get_head_object, can_get_object, can_put_object
from wellknown_acl import PUBLIC_ACL
@pytest.mark.sanity
@pytest.mark.acl
@pytest.mark.acl_bearer
@pytest.mark.acl_filters
class TestEACLFilters:
# SPEC: https://github.com/nspcc-dev/neofs-spec/blob/master/01-arch/07-acl.md
ATTRIBUTE = {"check_key": "check_value"}
OTHER_ATTRIBUTE = {"check_key": "other_value"}
SET_HEADERS = {
"key_one": "check_value",
"x_key": "xvalue",
"check_key": "check_value",
}
OTHER_HEADERS = {
"key_one": "check_value",
"x_key": "other_value",
"check_key": "other_value",
}
REQ_EQUAL_FILTER = EACLFilter(
key="check_key", value="check_value", header_type=EACLHeaderType.REQUEST
)
NOT_REQ_EQUAL_FILTER = EACLFilter(
key="check_key",
value="other_value",
match_type=EACLMatchType.STRING_NOT_EQUAL,
header_type=EACLHeaderType.REQUEST,
)
OBJ_EQUAL_FILTER = EACLFilter(
key="check_key", value="check_value", header_type=EACLHeaderType.OBJECT
)
NOT_OBJ_EQUAL_FILTER = EACLFilter(
key="check_key",
value="other_value",
match_type=EACLMatchType.STRING_NOT_EQUAL,
header_type=EACLHeaderType.OBJECT,
)
OBJECT_COUNT = 5
OBJECT_ATTRIBUTES_FILTER_SUPPORTED_OPERATIONS = [
EACLOperation.GET,
EACLOperation.HEAD,
EACLOperation.PUT,
]
@pytest.fixture(scope="function")
def eacl_container_with_objects(self, wallets, client_shell, file_path):
user_wallet = wallets.get_wallet()
with allure.step("Create eACL public container"):
cid = create_container(
user_wallet.wallet_path, basic_acl=PUBLIC_ACL, shell=client_shell
)
with allure.step("Add test objects to container"):
objects_with_header = [
put_object(
user_wallet.wallet_path,
file_path,
cid,
shell=client_shell,
attributes={**self.SET_HEADERS, "key": val},
)
for val in range(self.OBJECT_COUNT)
]
objects_with_other_header = [
put_object(
user_wallet.wallet_path,
file_path,
cid,
shell=client_shell,
attributes={**self.OTHER_HEADERS, "key": val},
)
for val in range(self.OBJECT_COUNT)
]
objects_without_header = [
put_object(user_wallet.wallet_path, file_path, cid, shell=client_shell)
for _ in range(self.OBJECT_COUNT)
]
yield cid, objects_with_header, objects_with_other_header, objects_without_header, file_path
with allure.step("Delete eACL public container"):
delete_container(user_wallet.wallet_path, cid, shell=client_shell)
@pytest.mark.parametrize(
"match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL]
)
def test_extended_acl_filters_request(
self, wallets, client_shell, eacl_container_with_objects, match_type
):
allure.dynamic.title(f"Validate NeoFS operations with request filter: {match_type.name}")
user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(EACLRole.OTHERS)
(
cid,
objects_with_header,
objects_with_other_header,
objects_without_header,
file_path,
) = eacl_container_with_objects
with allure.step("Deny all operations for other with eACL request filter"):
equal_filter = EACLFilter(**self.REQ_EQUAL_FILTER.__dict__)
equal_filter.match_type = match_type
eacl_deny = [
EACLRule(
access=EACLAccess.DENY,
role=EACLRole.OTHERS,
filters=EACLFilters([equal_filter]),
operation=op,
)
for op in EACLOperation
]
set_eacl(
user_wallet.wallet_path,
cid,
create_eacl(cid, eacl_deny, shell=client_shell),
shell=client_shell,
)
wait_for_cache_expired()
# Filter denies requests where "check_key {match_type} ATTRIBUTE", so when match_type
# is STRING_EQUAL, then requests with "check_key=OTHER_ATTRIBUTE" will be allowed while
# requests with "check_key=ATTRIBUTE" will be denied, and vice versa
allow_headers = (
self.OTHER_ATTRIBUTE if match_type == EACLMatchType.STRING_EQUAL else self.ATTRIBUTE
)
deny_headers = (
self.ATTRIBUTE if match_type == EACLMatchType.STRING_EQUAL else self.OTHER_ATTRIBUTE
)
# We test on 3 groups of objects with various headers,
# but eACL rule should ignore object headers and
# work only based on request headers
for oid in (
objects_with_header,
objects_with_other_header,
objects_without_header,
):
with allure.step("Check other has full access when sending request without headers"):
check_full_access_to_container(
other_wallet.wallet_path, cid, oid.pop(), file_path, shell=client_shell
)
with allure.step(
"Check other has full access when sending request with allowed headers"
):
check_full_access_to_container(
other_wallet.wallet_path,
cid,
oid.pop(),
file_path,
shell=client_shell,
xhdr=allow_headers,
)
with allure.step("Check other has no access when sending request with denied headers"):
check_no_access_to_container(
other_wallet.wallet_path,
cid,
oid.pop(),
file_path,
shell=client_shell,
xhdr=deny_headers,
)
with allure.step(
"Check other has full access when sending request "
"with denied headers and using bearer token"
):
bearer_token_other = form_bearertoken_file(
user_wallet.wallet_path,
cid,
[
EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.OTHERS)
for op in EACLOperation
],
shell=client_shell,
)
check_full_access_to_container(
other_wallet.wallet_path,
cid,
oid.pop(),
file_path,
shell=client_shell,
xhdr=deny_headers,
bearer=bearer_token_other,
)
@pytest.mark.parametrize(
"match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL]
)
def test_extended_acl_deny_filters_object(
self, wallets, client_shell, eacl_container_with_objects, match_type
):
allure.dynamic.title(
f"Validate NeoFS operations with deny user headers filter: {match_type.name}"
)
user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(EACLRole.OTHERS)
(
cid,
objects_with_header,
objects_with_other_header,
objs_without_header,
file_path,
) = eacl_container_with_objects
with allure.step("Deny all operations for other with object filter"):
equal_filter = EACLFilter(**self.OBJ_EQUAL_FILTER.__dict__)
equal_filter.match_type = match_type
eacl_deny = [
EACLRule(
access=EACLAccess.DENY,
role=EACLRole.OTHERS,
filters=EACLFilters([equal_filter]),
operation=op,
)
for op in self.OBJECT_ATTRIBUTES_FILTER_SUPPORTED_OPERATIONS
]
set_eacl(
user_wallet.wallet_path,
cid,
create_eacl(cid, eacl_deny, shell=client_shell),
shell=client_shell,
)
wait_for_cache_expired()
allow_objects = (
objects_with_other_header
if match_type == EACLMatchType.STRING_EQUAL
else objects_with_header
)
deny_objects = (
objects_with_header
if match_type == EACLMatchType.STRING_EQUAL
else objects_with_other_header
)
# We will attempt requests with various headers,
# but eACL rule should ignore request headers and validate
# only object headers
for xhdr in (self.ATTRIBUTE, self.OTHER_ATTRIBUTE, None):
with allure.step("Check other have full access to objects without attributes"):
check_full_access_to_container(
other_wallet.wallet_path,
cid,
objs_without_header.pop(),
file_path,
shell=client_shell,
xhdr=xhdr,
)
with allure.step("Check other have full access to objects without deny attribute"):
check_full_access_to_container(
other_wallet.wallet_path,
cid,
allow_objects.pop(),
file_path,
shell=client_shell,
xhdr=xhdr,
)
with allure.step("Check other have no access to objects with deny attribute"):
with pytest.raises(AssertionError):
assert can_get_head_object(
other_wallet.wallet_path,
cid,
deny_objects[0],
shell=client_shell,
xhdr=xhdr,
)
with pytest.raises(AssertionError):
assert can_get_object(
other_wallet.wallet_path,
cid,
deny_objects[0],
file_path,
shell=client_shell,
xhdr=xhdr,
)
with allure.step(
"Check other have access to objects with deny attribute and using bearer token"
):
bearer_token_other = form_bearertoken_file(
user_wallet.wallet_path,
cid,
[
EACLRule(
operation=op,
access=EACLAccess.ALLOW,
role=EACLRole.OTHERS,
)
for op in EACLOperation
],
shell=client_shell,
)
check_full_access_to_container(
other_wallet.wallet_path,
cid,
deny_objects.pop(),
file_path,
shell=client_shell,
xhdr=xhdr,
bearer=bearer_token_other,
)
allow_attribute = (
self.OTHER_ATTRIBUTE if match_type == EACLMatchType.STRING_EQUAL else self.ATTRIBUTE
)
with allure.step("Check other can PUT objects without denied attribute"):
assert can_put_object(
other_wallet.wallet_path,
cid,
file_path,
shell=client_shell,
attributes=allow_attribute,
)
assert can_put_object(other_wallet.wallet_path, cid, file_path, shell=client_shell)
deny_attribute = (
self.ATTRIBUTE if match_type == EACLMatchType.STRING_EQUAL else self.OTHER_ATTRIBUTE
)
with allure.step("Check other can not PUT objects with denied attribute"):
with pytest.raises(AssertionError):
assert can_put_object(
other_wallet.wallet_path,
cid,
file_path,
shell=client_shell,
attributes=deny_attribute,
)
with allure.step(
"Check other can PUT objects with denied attribute and using bearer token"
):
bearer_token_other_for_put = form_bearertoken_file(
user_wallet.wallet_path,
cid,
[
EACLRule(
operation=EACLOperation.PUT,
access=EACLAccess.ALLOW,
role=EACLRole.OTHERS,
)
],
shell=client_shell,
)
assert can_put_object(
other_wallet.wallet_path,
cid,
file_path,
shell=client_shell,
attributes=deny_attribute,
bearer=bearer_token_other_for_put,
)
@pytest.mark.parametrize(
"match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL]
)
def test_extended_acl_allow_filters_object(
self, wallets, client_shell, eacl_container_with_objects, match_type
):
allure.dynamic.title(
"Testcase to validate NeoFS operation with allow eACL user headers filters:"
f"{match_type.name}"
)
user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(EACLRole.OTHERS)
(
cid,
objects_with_header,
objects_with_other_header,
objects_without_header,
file_path,
) = eacl_container_with_objects
with allure.step(
"Deny all operations for others except few operations allowed by object filter"
):
equal_filter = EACLFilter(**self.OBJ_EQUAL_FILTER.__dict__)
equal_filter.match_type = match_type
eacl = [
EACLRule(
access=EACLAccess.ALLOW,
role=EACLRole.OTHERS,
filters=EACLFilters([equal_filter]),
operation=op,
)
for op in self.OBJECT_ATTRIBUTES_FILTER_SUPPORTED_OPERATIONS
] + [
EACLRule(access=EACLAccess.DENY, role=EACLRole.OTHERS, operation=op)
for op in self.OBJECT_ATTRIBUTES_FILTER_SUPPORTED_OPERATIONS
]
set_eacl(
user_wallet.wallet_path,
cid,
create_eacl(cid, eacl, shell=client_shell),
shell=client_shell,
)
wait_for_cache_expired()
if match_type == EACLMatchType.STRING_EQUAL:
allow_objects = objects_with_header
deny_objects = objects_with_other_header
allow_attribute = self.ATTRIBUTE
deny_attribute = self.OTHER_ATTRIBUTE
else:
allow_objects = objects_with_other_header
deny_objects = objects_with_header
allow_attribute = self.OTHER_ATTRIBUTE
deny_attribute = self.ATTRIBUTE
with allure.step(f"Check other cannot get and put objects without attributes"):
oid = objects_without_header.pop()
with pytest.raises(AssertionError):
assert can_get_head_object(other_wallet.wallet_path, cid, oid, shell=client_shell)
with pytest.raises(AssertionError):
assert can_get_object(
other_wallet.wallet_path, cid, oid, file_path, shell=client_shell
)
with pytest.raises(AssertionError):
assert can_put_object(other_wallet.wallet_path, cid, file_path, shell=client_shell)
with allure.step(
"Check other can get and put objects without attributes and using bearer token"
):
bearer_token_other = form_bearertoken_file(
user_wallet.wallet_path,
cid,
[
EACLRule(
operation=op,
access=EACLAccess.ALLOW,
role=EACLRole.OTHERS,
)
for op in EACLOperation
],
shell=client_shell,
)
assert can_get_head_object(
other_wallet.wallet_path,
cid,
objects_without_header[0],
shell=client_shell,
bearer=bearer_token_other,
)
assert can_get_object(
other_wallet.wallet_path,
cid,
objects_without_header[0],
file_path,
shell=client_shell,
bearer=bearer_token_other,
)
assert can_put_object(
other_wallet.wallet_path,
cid,
file_path,
shell=client_shell,
bearer=bearer_token_other,
)
with allure.step(f"Check other can get objects with attributes matching the filter"):
oid = allow_objects.pop()
assert can_get_head_object(other_wallet.wallet_path, cid, oid, shell=client_shell)
assert can_get_object(other_wallet.wallet_path, cid, oid, file_path, shell=client_shell)
assert can_put_object(
other_wallet.wallet_path,
cid,
file_path,
shell=client_shell,
attributes=allow_attribute,
)
with allure.step("Check other cannot get objects without attributes matching the filter"):
with pytest.raises(AssertionError):
assert can_get_head_object(
other_wallet.wallet_path, cid, deny_objects[0], shell=client_shell
)
with pytest.raises(AssertionError):
assert can_get_object(
other_wallet.wallet_path, cid, deny_objects[0], file_path, shell=client_shell
)
with pytest.raises(AssertionError):
assert can_put_object(
other_wallet.wallet_path,
cid,
file_path,
attributes=deny_attribute,
shell=client_shell,
)
with allure.step(
"Check other can get objects without attributes matching the filter "
"and using bearer token"
):
oid = deny_objects.pop()
assert can_get_head_object(
other_wallet.wallet_path, cid, oid, shell=client_shell, bearer=bearer_token_other
)
assert can_get_object(
other_wallet.wallet_path,
cid,
oid,
file_path,
shell=client_shell,
bearer=bearer_token_other,
)
assert can_put_object(
other_wallet.wallet_path,
cid,
file_path,
shell=client_shell,
attributes=deny_attribute,
bearer=bearer_token_other,
)

View file

@ -1,52 +0,0 @@
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
from frostfs_testlib.shell.interfaces import Shell
from frostfs_testlib.steps.cli.object import put_object
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.utils.file_utils import generate_file
@pytest.fixture(scope="session")
def frostfs_cli_on_first_node(cluster: Cluster) -> FrostfsCli:
node = cluster.cluster_nodes[0]
shell = node.host.get_shell()
return FrostfsCli(shell, FROSTFS_CLI_EXEC, node.storage_node.get_remote_wallet_config_path())
@pytest.fixture
def object_id(
default_wallet: WalletInfo,
frostfs_cli_on_first_node: FrostfsCli,
simple_object_size: ObjectSize,
cluster: Cluster,
client_shell: Shell,
container: str,
) -> str:
test_file = generate_file(simple_object_size.value)
with reporter.step("Allow PutObject on first node via local override"):
frostfs_cli_on_first_node.control.add_rule(
endpoint=cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="allowPutObject",
rule=f"allow Object.Put *",
)
with reporter.step("Put objects in container on the first node"):
object_id = put_object(default_wallet, test_file, container, client_shell, cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Remove PutObject local override from first node"):
frostfs_cli_on_first_node.control.remove_rule(
endpoint=cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="allowPutObject",
)
return object_id

View file

@ -1,212 +0,0 @@
from time import sleep
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.resources.error_patterns import RULE_ACCESS_DENIED_CONTAINER
from frostfs_testlib.shell.interfaces import Shell
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.ape import Operations
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.parallel import parallel
from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils import datetime_utils
from ...helpers.container_request import APE_EVERYONE_ALLOW_ALL, ContainerRequest, MultipleContainersRequest
REP2 = ContainerRequest("REP 2", ape_rules=APE_EVERYONE_ALLOW_ALL, short_name="REP2_allow_all_ape")
def remove_local_overrides_on_node(node: ClusterNode):
target = "Chain ID"
shell: Shell = node.host.get_shell()
remote_config: str = node.storage_node.get_remote_wallet_config_path()
cli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=remote_config)
with reporter.step(f"Check local overrides on {node.storage_node.id} node"):
rules = cli.control.list_rules(
endpoint=node.storage_node.get_control_endpoint(), target_name="root", target_type="namespace"
).stdout
if target in rules:
with reporter.step("Delete rules"):
chain_ids = [i.split(" ")[2].strip() for i in rules.split("\n") if "Chain ID" in i]
for chain_id in chain_ids:
cli.control.remove_rule(
endpoint=node.storage_node.get_control_endpoint(),
target_type="namespace",
target_name="root",
chain_id=chain_id,
)
with reporter.step("Wait for one block"):
sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
@pytest.fixture(scope="session")
def remove_local_ape_overrides(cluster: Cluster) -> None:
yield
with reporter.step("Check local overrides on nodes."):
parallel(remove_local_overrides_on_node, cluster.cluster_nodes)
@pytest.mark.ape
@pytest.mark.ape_local
@pytest.mark.ape_container
@pytest.mark.ape_namespace
class TestApeLocalOverrideContainer(ClusterTestBase):
@allure.title("LocalOverride: Deny to GetContainer in root tenant")
def test_local_override_deny_to_get_container_root(
self,
frostfs_cli_on_first_node: FrostfsCli,
frostfs_cli: FrostfsCli,
container: str,
remove_local_ape_overrides: None,
):
with reporter.step("Create a namespace rule for the first node"):
frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="namespace",
target_name="root",
chain_id="denyContainerGet",
rule="deny Container.Get *",
)
with reporter.step("Check get the container property on the first node, expected denied error"):
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_CONTAINER.format(operation=Operations.GET_CONTAINER)):
frostfs_cli.container.get(self.cluster.storage_nodes[0].get_rpc_endpoint(), container)
with reporter.step("Check get the container property on the second node, expected allow"):
with expect_not_raises():
frostfs_cli.container.get(self.cluster.storage_nodes[1].get_rpc_endpoint(), container)
with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="namespace",
target_name="root",
chain_id="denyContainerGet",
)
with reporter.step("Check get the container property on the first node, expected allow"):
with expect_not_raises():
frostfs_cli.container.get(self.cluster.storage_nodes[0].get_rpc_endpoint(), container)
@allure.title("LocalOverride: Deny to PutContainer in root tenant")
def test_local_override_deny_to_put_container_root(
self,
frostfs_cli_on_first_node: FrostfsCli,
frostfs_cli: FrostfsCli,
remove_local_ape_overrides: None,
):
with reporter.step("Create a namespace rule for the first node"):
frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="namespace",
target_name="root",
chain_id="denyContainerPut",
rule="deny Container.Put *",
)
with reporter.step("Check create container on the first node, expected access denied error"):
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_CONTAINER.format(operation=Operations.PUT_CONTAINER)):
frostfs_cli.container.create(
rpc_endpoint=self.cluster.storage_nodes[0].get_rpc_endpoint(),
policy="REP 1",
)
with reporter.step("Check create a container on the second node, expected allow"):
with expect_not_raises():
frostfs_cli.container.create(
rpc_endpoint=self.cluster.storage_nodes[1].get_rpc_endpoint(),
policy="REP 1",
)
with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="namespace",
target_name="root",
chain_id="denyContainerPut",
)
with reporter.step("Check create a container on the first node, expected allow"):
with expect_not_raises():
frostfs_cli.container.create(
rpc_endpoint=self.cluster.storage_nodes[0].get_rpc_endpoint(),
policy="REP 1",
)
@allure.title("LocalOverride: Deny to ListContainer in root tenant")
def test_local_override_deny_to_list_container_root(
self,
frostfs_cli_on_first_node: FrostfsCli,
frostfs_cli: FrostfsCli,
remove_local_ape_overrides: None,
):
with reporter.step("Create a namespace rule for the first node"):
frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="namespace",
target_name="root",
chain_id="denyContainerList",
rule="deny Container.List *",
)
with reporter.step("Check list the container properties on the first node, expected access denied error"):
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_CONTAINER.format(operation=Operations.LIST_CONTAINER)):
frostfs_cli.container.list(rpc_endpoint=self.cluster.storage_nodes[0].get_rpc_endpoint(), ttl=1)
with reporter.step("Check list the container properties on the second node, expected allow"):
with expect_not_raises():
frostfs_cli.container.list(rpc_endpoint=self.cluster.storage_nodes[1].get_rpc_endpoint(), ttl=1)
with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="namespace",
target_name="root",
chain_id="denyContainerList",
)
with reporter.step("Check display a list of containers on the first node, expected allow"):
with expect_not_raises():
frostfs_cli.container.list(rpc_endpoint=self.cluster.storage_nodes[0].get_rpc_endpoint(), ttl=1)
@allure.title("LocalOverride: Deny to DeleteContainer in root tenant")
@pytest.mark.parametrize("multiple_containers_request", [MultipleContainersRequest([REP2, REP2])], indirect=True)
def test_local_override_deny_to_delete_container_root(
self,
frostfs_cli_on_first_node: FrostfsCli,
frostfs_cli: FrostfsCli,
remove_local_ape_overrides: None,
containers: list[str],
):
with reporter.step("Create a namespace rule for the first node"):
frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="namespace",
target_name="root",
chain_id="denyContainerDelete",
rule="deny Container.Delete *",
)
with reporter.step("Check delete first container from the first node, expected access denied error"):
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_CONTAINER.format(operation=Operations.DELETE_CONTAINER)):
frostfs_cli.container.delete(self.cluster.storage_nodes[0].get_rpc_endpoint(), containers[0], ttl=1)
with reporter.step("Check delete a second container from the second node, expected allow"):
with expect_not_raises():
frostfs_cli.container.delete(self.cluster.storage_nodes[1].get_rpc_endpoint(), containers[1], ttl=1)
with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="namespace",
target_name="root",
chain_id="denyContainerDelete",
)
with reporter.step("Check delete first container from the first node, expected allow"):
with expect_not_raises():
frostfs_cli.container.delete(self.cluster.storage_nodes[0].get_rpc_endpoint(), containers[0], ttl=1)

View file

@ -1,304 +0,0 @@
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.resources.error_patterns import NO_RULE_FOUND_OBJECT
from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils.file_utils import TestFile
from ...helpers.container_request import ContainerRequest
REP1_MSK = ContainerRequest("REP 1 IN MOW CBF 1 SELECT 1 FROM MSK AS MOW FILTER SubDivCode EQ MOW AS MSK", short_name="REP1_MSK_no_ape")
@pytest.mark.ape
@pytest.mark.ape_local
@pytest.mark.ape_object
@pytest.mark.ape_allow
@pytest.mark.parametrize("container_request", [REP1_MSK], indirect=True)
@pytest.mark.parametrize("user_tag", ["ApeLocalOverrideAllow"], indirect=True) # provide dedicated user with no APE side-policies
class TestApeLocalOverrideAllow(ClusterTestBase):
@allure.title("LocalOverride: Allow to GetObject in root tenant")
def test_local_override_allow_to_get_object_root(
self,
frostfs_cli_on_first_node: FrostfsCli,
grpc_client: GrpcClientWrapper,
container: str,
object_id: str,
):
with reporter.step("Create local override on first node"):
frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="allowGetObject",
rule=f"allow Object.Get *",
)
with reporter.step("Check get object in container on the first node, expected allow"):
with expect_not_raises():
grpc_client.object.get(container, object_id, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check get object in container on the second node, epxected access denied error"):
with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT):
grpc_client.object.get(container, object_id, self.cluster.storage_nodes[1].get_rpc_endpoint())
with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="allowGetObject",
)
@allure.title("LocalOverride: Allow to PutObject in root tenant")
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
def test_local_override_allow_to_put_object_root(
self,
frostfs_cli_on_first_node: FrostfsCli,
grpc_client: GrpcClientWrapper,
container: str,
test_file: TestFile,
):
with reporter.step("Create local override on first node"):
frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="allowPutObject",
rule=f"allow Object.Put *",
)
with reporter.step("Check put object in container on the first node, expected allow"):
with expect_not_raises():
grpc_client.object.put(test_file, container, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check put object in container on the second node, epxected access denied error"):
with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT):
grpc_client.object.put(test_file, container, self.cluster.storage_nodes[1].get_rpc_endpoint())
with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="allowPutObject",
)
@allure.title("LocalOverride: Allow to HeadObject in root tenant")
def test_local_override_allow_to_head_object_root(
self,
frostfs_cli_on_first_node: FrostfsCli,
grpc_client: GrpcClientWrapper,
container: str,
object_id: str,
):
with reporter.step("Create local override on first node"):
frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="allowHeadObject",
rule=f"allow Object.Head *",
)
with reporter.step("Check head object in container on the first node, expected allow"):
with expect_not_raises():
grpc_client.object.head(container, object_id, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check head object in container on the second node, expected access denied error"):
with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT):
grpc_client.object.head(container, object_id, self.cluster.storage_nodes[1].get_rpc_endpoint())
with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="allowHeadObject",
)
@allure.title("LocalOverride: Allow to SearchObject in root tenant")
def test_local_override_allow_to_search_object_root(
self,
frostfs_cli_on_first_node: FrostfsCli,
grpc_client: GrpcClientWrapper,
container: str,
):
with reporter.step("Create local override on first node"):
frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="allowSearchObject",
rule=f"allow Object.Search *",
)
with reporter.step("Check search object in container on the first node, expected allow"):
with expect_not_raises():
grpc_client.object.search(container, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check search object from container on the second node, expected access denied error"):
with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT):
grpc_client.object.search(container, self.cluster.storage_nodes[1].get_rpc_endpoint())
with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="allowSearchObject",
)
@allure.title("LocalOverride: Allow to RangeObject in root tenant")
def test_local_override_allow_to_range_object_root(
self,
frostfs_cli_on_first_node: FrostfsCli,
grpc_client: GrpcClientWrapper,
container: str,
object_id: str,
):
with reporter.step("Create local override on first node"):
frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="allowRangeObject",
rule=f"allow Object.Range *",
)
with reporter.step("Check get range object in container on the first node, expected allow"):
with expect_not_raises():
grpc_client.object.range(container, object_id, "0:10", self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check get range object in container on the second node, expected access denied error"):
with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT):
grpc_client.object.range(container, object_id, "0:10", self.cluster.storage_nodes[1].get_rpc_endpoint())
with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="allowRangeObject",
)
@allure.title("LocalOverride: Allow to HashObject in root tenant")
def test_local_override_allow_to_hash_object_root(
self,
frostfs_cli_on_first_node: FrostfsCli,
grpc_client: GrpcClientWrapper,
container: str,
object_id: str,
):
with reporter.step("Create local override on first node"):
frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="allowHashObject",
rule=f"allow Object.Hash *",
)
with reporter.step("Check get range hash object in container on the first node, expected allow"):
with expect_not_raises():
grpc_client.object.hash(self.cluster.storage_nodes[0].get_rpc_endpoint(), container, object_id, range="0:10")
with reporter.step("Check get range hash object in container on the second node, expected access denied error"):
with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT):
grpc_client.object.hash(self.cluster.storage_nodes[1].get_rpc_endpoint(), container, object_id, range="0:10")
with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="allowHashObject",
)
@allure.title("LocalOverride: Allow to DeleteObject in root tenant")
def test_local_override_allow_to_delete_object_root(
self,
frostfs_cli_on_first_node: FrostfsCli,
grpc_client: GrpcClientWrapper,
container: str,
object_id: str,
):
with reporter.step("Create local override on first node"):
frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="allowDeleteObject",
rule=f"allow Object.Head Object.Delete *",
)
with reporter.step("Check delete object from container on the second node, expected access denied error"):
with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT):
grpc_client.object.delete(container, object_id, self.cluster.storage_nodes[1].get_rpc_endpoint())
with reporter.step("Check delete object in container on the first node, expected allow"):
with expect_not_raises():
grpc_client.object.delete(container, object_id, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="allowDeleteObject",
)
@allure.title("LocalOverride: Allow to PatchObject in root tenant")
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
def test_local_override_allow_to_patch_object_root(
self,
frostfs_cli_on_first_node: FrostfsCli,
grpc_client: GrpcClientWrapper,
container: str,
object_id: str,
test_file: TestFile,
):
with reporter.step("Create local override on first node"):
frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="allowPatchObject",
rule=f"allow Object.Patch *",
)
with reporter.step("Check patch object in container on the second node, epxected access denied error"):
with pytest.raises(RuntimeError, match=NO_RULE_FOUND_OBJECT):
grpc_client.object.patch(
container,
object_id,
self.cluster.storage_nodes[1].get_rpc_endpoint(),
ranges=["500:300"],
payloads=[test_file],
new_attrs="patched=false",
timeout="200s",
)
with reporter.step("Check patch object in container on the first node, expected allow"):
with expect_not_raises():
patched_oid = grpc_client.object.patch(
container,
object_id,
self.cluster.storage_nodes[0].get_rpc_endpoint(),
ranges=["100:200"],
payloads=[test_file],
new_attrs="patched=true",
timeout="200s",
)
assert patched_oid != object_id, "OID of patched object must be different from original one"
with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="allowPatchObject",
)

View file

@ -1,382 +0,0 @@
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.resources.error_patterns import RULE_ACCESS_DENIED_OBJECT
from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils.file_utils import TestFile
from ...helpers.container_request import APE_EVERYONE_ALLOW_ALL, ContainerRequest
REP2 = ContainerRequest("REP 2", ape_rules=APE_EVERYONE_ALLOW_ALL, short_name="REP2_allow_all_ape")
@pytest.mark.ape
@pytest.mark.ape_local
@pytest.mark.ape_object
@pytest.mark.ape_deny
class TestApeLocalOverrideDeny(ClusterTestBase):
@allure.title("LocalOverride: Deny to GetObject in root tenant")
@pytest.mark.parametrize("container_request", [REP2], indirect=True)
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
def test_local_override_deny_to_get_object_root(
self,
frostfs_cli_on_first_node: FrostfsCli,
grpc_client: GrpcClientWrapper,
container: str,
test_file: TestFile,
):
with reporter.step("Create local override on first node"):
frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="denyGetObject",
rule=f"deny Object.Get /{container}/*",
)
with reporter.step("Put object in container on the first node"):
oid = grpc_client.object.put(test_file, container, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check get object from container on the first node, expected access denied error"):
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_OBJECT):
grpc_client.object.get(container, oid, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check get object from container on the second node, expected allow"):
with expect_not_raises():
grpc_client.object.get(container, oid, self.cluster.storage_nodes[1].get_rpc_endpoint())
with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="denyGetObject",
)
with reporter.step("Check get object in container on the first node, expected allow"):
with expect_not_raises():
grpc_client.object.get(container, oid, self.cluster.storage_nodes[0].get_rpc_endpoint())
@allure.title("LocalOverride: Deny to PutObject in root tenant")
@pytest.mark.parametrize("container_request", [REP2], indirect=True)
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
def test_local_override_deny_to_put_object_root(
self,
frostfs_cli_on_first_node: FrostfsCli,
grpc_client: GrpcClientWrapper,
container: str,
test_file: TestFile,
):
with reporter.step("Create local override on first node"):
frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="denyPutObject",
rule=f"deny Object.Put /{container}/*",
)
with reporter.step("Check put object from container on the first node, expected access denied error"):
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_OBJECT):
grpc_client.object.put(test_file, container, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check put object from container on the second node, expected allow"):
with expect_not_raises():
grpc_client.object.put(test_file, container, self.cluster.storage_nodes[1].get_rpc_endpoint())
with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="denyPutObject",
)
with reporter.step("Check get object in container on the first node, expected allow"):
with expect_not_raises():
grpc_client.object.put(test_file, container, self.cluster.storage_nodes[0].get_rpc_endpoint())
@allure.title("LocalOverride: Deny to HeadObject in root tenant")
@pytest.mark.parametrize("container_request", [REP2], indirect=True)
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
def test_local_override_deny_to_head_object_root(
self,
frostfs_cli_on_first_node: FrostfsCli,
grpc_client: GrpcClientWrapper,
container: str,
test_file: TestFile,
):
with reporter.step("Create local override on first node"):
frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="denyHeadObject",
rule=f"deny Object.Head /{container}/*",
)
with reporter.step("Put object in container on the first node"):
oid = grpc_client.object.put(test_file, container, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check head object from container on the first node, expected access denied error"):
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_OBJECT):
grpc_client.object.head(container, oid, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check head object from container on the second node, expected allow"):
with expect_not_raises():
grpc_client.object.head(container, oid, self.cluster.storage_nodes[1].get_rpc_endpoint())
with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="denyHeadObject",
)
with reporter.step("Check head object in container on the first node, expected allow"):
with expect_not_raises():
grpc_client.object.head(container, oid, self.cluster.storage_nodes[0].get_rpc_endpoint())
@allure.title("LocalOverride: Deny to SearchObject in root tenant")
@pytest.mark.parametrize("container_request", [REP2], indirect=True)
def test_local_override_deny_to_search_object_root(
self,
frostfs_cli_on_first_node: FrostfsCli,
grpc_client: GrpcClientWrapper,
container: str,
):
with reporter.step("Create local override on first node"):
frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="denySearchObject",
rule=f"deny Object.Search /{container}/*",
)
with reporter.step("Check search object from container on the first node, expected access denied error"):
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_OBJECT):
grpc_client.object.search(container, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check search object from container on the second node, expected allow"):
with expect_not_raises():
grpc_client.object.search(container, self.cluster.storage_nodes[1].get_rpc_endpoint())
with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="denySearchObject",
)
with reporter.step("Check search object in container on the first node, expected allow"):
with expect_not_raises():
grpc_client.object.search(container, self.cluster.storage_nodes[0].get_rpc_endpoint())
@allure.title("LocalOverride: Deny to RangeObject in root tenant")
@pytest.mark.parametrize("container_request", [REP2], indirect=True)
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
def test_local_override_deny_to_range_object_root(
self,
frostfs_cli_on_first_node: FrostfsCli,
grpc_client: GrpcClientWrapper,
container: str,
test_file: TestFile,
):
with reporter.step("Create local override on first node"):
frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="denyRangeObject",
rule=f"deny Object.Range /{container}/*",
)
with reporter.step("Put object in container on the first node"):
oid = grpc_client.object.put(test_file, container, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check range object from container on the first node, expected access denied error"):
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_OBJECT):
grpc_client.object.range(container, oid, "0:10", self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check get range object from container on the second node, expected allow"):
with expect_not_raises():
grpc_client.object.range(container, oid, "0:10", self.cluster.storage_nodes[1].get_rpc_endpoint())
with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="denyRangeObject",
)
with reporter.step("Check get range object in container on the first node, expected allow"):
with expect_not_raises():
grpc_client.object.range(container, oid, "0:10", self.cluster.storage_nodes[0].get_rpc_endpoint())
@allure.title("LocalOverride: Deny to HashObject in root tenant")
@pytest.mark.parametrize("container_request", [REP2], indirect=True)
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
def test_local_override_deny_to_hash_object_root(
self,
frostfs_cli_on_first_node: FrostfsCli,
grpc_client: GrpcClientWrapper,
container: str,
test_file: TestFile,
):
with reporter.step("Create local override on first node"):
frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="denyHashObject",
rule=f"deny Object.Hash /{container}/*",
)
with reporter.step("Put object in container on the first node"):
oid = grpc_client.object.put(test_file, container, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check get range hash object from container on the first node, expected access denied error"):
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_OBJECT):
grpc_client.object.hash(self.cluster.storage_nodes[0].get_rpc_endpoint(), container, oid, range="0:10")
with reporter.step("Check get range hash object from container on the second node, expected allow"):
with expect_not_raises():
grpc_client.object.hash(self.cluster.storage_nodes[1].get_rpc_endpoint(), container, oid, range="0:10")
with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="denyHashObject",
)
with reporter.step("Check get range hash object in container on the first node, expected allow"):
with expect_not_raises():
grpc_client.object.hash(self.cluster.storage_nodes[0].get_rpc_endpoint(), container, oid, range="0:10")
@allure.title("LocalOverride: Deny to DeleteObject in root tenant")
@pytest.mark.parametrize("container_request", [REP2], indirect=True)
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
def test_local_override_deny_to_delete_object_root(
self,
frostfs_cli_on_first_node: FrostfsCli,
grpc_client: GrpcClientWrapper,
container: str,
test_file: TestFile,
):
with reporter.step("Create local override on first node"):
frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="denyDeleteObject",
rule=f"deny Object.Delete /{container}/*",
)
with reporter.step("Put objects in container on the first node"):
oid_1 = grpc_client.object.put(test_file, container, self.cluster.storage_nodes[0].get_rpc_endpoint())
oid_2 = grpc_client.object.put(test_file, container, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Search object in container on the first node"):
search_object_in_container_1 = grpc_client.object.search(container, self.cluster.storage_nodes[0].get_rpc_endpoint())
assert oid_1 in search_object_in_container_1, f"Object {oid_1} was not found"
assert oid_2 in search_object_in_container_1, f"Object {oid_2} was not found"
with reporter.step("Search object from container on the second node"):
search_object_in_container_2 = grpc_client.object.search(container, self.cluster.storage_nodes[1].get_rpc_endpoint())
assert oid_1 in search_object_in_container_2, f"Object {oid_1} was not found"
assert oid_2 in search_object_in_container_2, f"Object {oid_2} was not found"
with reporter.step("Check delete object from container on the first node, expected access denied error"):
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_OBJECT):
grpc_client.object.delete(container, oid_1, self.cluster.storage_nodes[0].get_rpc_endpoint())
with reporter.step("Check delete object from container on the second node, expected allow"):
with expect_not_raises():
grpc_client.object.delete(container, oid_2, self.cluster.storage_nodes[1].get_rpc_endpoint())
with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="denyDeleteObject",
)
with reporter.step("Check delete object in container on the first node, expected allow"):
with expect_not_raises():
grpc_client.object.delete(container, oid_1, self.cluster.storage_nodes[0].get_rpc_endpoint())
@allure.title("LocalOverride: Deny to PatchObject in root tenant")
@pytest.mark.parametrize("container_request", [REP2], indirect=True)
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
def test_local_override_deny_to_patch_object_root(
self,
frostfs_cli_on_first_node: FrostfsCli,
grpc_client: GrpcClientWrapper,
test_file: TestFile,
container: str,
object_id: str,
):
with reporter.step("Create local override on first node"):
frostfs_cli_on_first_node.control.add_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="denyPatchObject",
rule=f"deny Object.Patch /{container}/*",
)
with reporter.step("Check patch object from container on the first node, expected access denied error"):
with pytest.raises(RuntimeError, match=RULE_ACCESS_DENIED_OBJECT):
grpc_client.object.patch(
container,
object_id,
self.cluster.storage_nodes[0].get_rpc_endpoint(),
ranges=["0:350"],
payloads=[test_file],
new_attrs="patched_by_first_node=false",
timeout="200s",
)
with reporter.step("Check patch object from container on the second node, expected allow"):
with expect_not_raises():
patched_oid_1 = grpc_client.object.patch(
container,
object_id,
self.cluster.storage_nodes[1].get_rpc_endpoint(),
ranges=["200:400"],
payloads=[test_file],
new_attrs="patched_by_second_node=true",
timeout="200s",
)
assert patched_oid_1 != object_id, "OID of patched object must be different from original one"
with reporter.step("Delete a rule"):
frostfs_cli_on_first_node.control.remove_rule(
endpoint=self.cluster.storage_nodes[0].get_control_endpoint(),
target_type="container",
target_name=container,
chain_id="denyPatchObject",
)
with reporter.step("Check patch object in container on the first node, expected allow"):
with expect_not_raises():
patched_oid_2 = grpc_client.object.patch(
container,
patched_oid_1,
self.cluster.storage_nodes[0].get_rpc_endpoint(),
ranges=["600:0"],
payloads=[test_file],
new_attrs="patched_by_first_node=true",
timeout="200s",
)
assert patched_oid_1 != patched_oid_2, "OID of patched object must be different from original one"

View file

@ -1,102 +1,34 @@
import logging
import random
import os
import shutil
from datetime import datetime
from typing import Optional
import allure
import pytest
from frostfs_testlib import plugins, reporter
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.clients import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, S3HttpClient
from frostfs_testlib.clients.s3 import BucketContainerResolver, VersioningStatus
from frostfs_testlib.credentials.interfaces import CredentialsProvider, User
from frostfs_testlib.healthcheck.interfaces import Healthcheck
from frostfs_testlib.hosting import Hosting
from frostfs_testlib.resources import optionals
from frostfs_testlib.resources.common import COMPLEX_OBJECT_CHUNKS_COUNT, COMPLEX_OBJECT_TAIL_SIZE, SIMPLE_OBJECT_SIZE
from frostfs_testlib.shell import LocalShell, Shell
from frostfs_testlib.steps import s3_helper
from frostfs_testlib.steps.cli.container import DEFAULT_EC_PLACEMENT_RULE, DEFAULT_PLACEMENT_RULE, FROSTFS_CLI_EXEC
from frostfs_testlib.steps.cli.object import get_netmap_netinfo
from frostfs_testlib.steps.epoch import ensure_fresh_epoch
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.policy import PlacementPolicy
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.storage.grpc_operations.client_wrappers import CliClientWrapper
from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.parallel import parallel
from frostfs_testlib.testing.test_control import cached_fixture, run_optionally
from frostfs_testlib.utils import env_utils, string_utils, version_utils
from frostfs_testlib.utils.file_utils import TestFile, generate_file
from ..helpers.container_creation import create_container_with_ape, create_containers_with_ape
from ..helpers.container_request import EVERYONE_ALLOW_ALL, ContainerRequest, MultipleContainersRequest
from ..resources.common import TEST_CYCLES_COUNT
import wallet
import yaml
from binary_version_helper import get_local_binaries_versions, get_remote_binaries_versions
from common import (
ASSETS_DIR,
FREE_STORAGE,
HOSTING_CONFIG_FILE,
MAINNET_WALLET_PATH,
NEOFS_NETMAP_DICT,
)
from env_properties import save_env_properties
from neofs_testlib.hosting import Hosting
from neofs_testlib.reporter import AllureHandler, get_reporter
from neofs_testlib.shell import LocalShell, Shell
from payment_neogo import neofs_deposit, transfer_mainnet_gas
from python_keywords.node_management import node_healthcheck
logger = logging.getLogger("NeoLogger")
WALLTETS_IN_POOL = 2
# Add logs check test even if it's not fit to mark selectors
def pytest_configure(config: pytest.Config):
markers = config.option.markexpr
if markers != "" and "sanity" not in markers:
config.option.markexpr = f"logs_after_session or ({markers})"
number_key = pytest.StashKey[str]()
start_time = pytest.StashKey[int]()
test_outcome = pytest.StashKey[str]()
# pytest hook. Do not rename
def pytest_collection_finish(session: pytest.Session):
items_total = len(session.items)
for number, item in enumerate(session.items, 1):
item.stash[number_key] = f"[{number}/{items_total}]"
item.stash[test_outcome] = ""
item.stash[start_time] = 0
# pytest hook. Do not rename
def pytest_runtest_setup(item: pytest.Item):
item.stash[start_time] = int(datetime.now().timestamp())
logger.info(f"STARTED {item.stash[number_key]}: {item.name}")
# pytest hook. Do not rename
def pytest_runtest_makereport(item: pytest.Item, call: pytest.CallInfo):
if call.excinfo is not None:
if call.excinfo.typename == "Skipped":
item.stash[start_time] = int(datetime.now().timestamp())
item.stash[test_outcome] += f"SKIPPED on {call.when}; "
else:
item.stash[test_outcome] += f"FAILED on {call.when}; "
if call.when == "teardown":
duration = int(datetime.now().timestamp()) - item.stash[start_time]
if not item.stash[test_outcome]:
outcome = "PASSED "
else:
outcome = item.stash[test_outcome]
logger.info(f"ENDED {item.stash[number_key]}: {item.name}: {outcome}(duration={duration}s)")
# pytest hook. Do not rename
def pytest_generate_tests(metafunc: pytest.Metafunc):
if (
TEST_CYCLES_COUNT <= 1
or metafunc.definition.get_closest_marker("logs_after_session")
or metafunc.definition.get_closest_marker("no_cycles")
):
return
metafunc.fixturenames.append("cycle")
metafunc.parametrize("cycle", range(1, TEST_CYCLES_COUNT + 1), ids=[f"cycle {cycle}" for cycle in range(1, TEST_CYCLES_COUNT + 1)])
@pytest.fixture(scope="session")
def configure_testlib():
get_reporter().register_handler(AllureHandler())
yield
@pytest.fixture(scope="session")
@ -104,6 +36,16 @@ def client_shell(configure_testlib) -> Shell:
yield LocalShell()
@pytest.fixture(scope="session")
def hosting(configure_testlib) -> Hosting:
with open(HOSTING_CONFIG_FILE, "r") as file:
hosting_config = yaml.full_load(file)
hosting_instance = Hosting()
hosting_instance.configure(hosting_config)
yield hosting_instance
@pytest.fixture(scope="session")
def require_multiple_hosts(hosting: Hosting):
"""Designates tests that require environment with multiple hosts.
@ -115,458 +57,69 @@ def require_multiple_hosts(hosting: Hosting):
yield
@pytest.fixture(scope="session")
def require_multiple_interfaces(cluster: Cluster):
"""
We determine that there are the required number of interfaces for tests
If there are no required interfaces, the tests will be skipped.
"""
interfaces = cluster.cluster_nodes[0].host.config.interfaces
if "internal1" not in interfaces or "data1" not in interfaces:
pytest.skip("This test requires multiple internal and data interfaces")
return
@pytest.fixture(scope="session")
@cached_fixture(optionals.OPTIONAL_CACHE_FIXTURES)
def max_object_size(cluster: Cluster, client_shell: Shell) -> int:
storage_node = cluster.storage_nodes[0]
wallet = WalletInfo.from_node(storage_node)
net_info = get_netmap_netinfo(wallet=wallet, endpoint=storage_node.get_rpc_endpoint(), shell=client_shell)
return net_info["maximum_object_size"]
@pytest.fixture(scope="session")
def simple_object_size(max_object_size: int) -> ObjectSize:
size = min(int(SIMPLE_OBJECT_SIZE), max_object_size)
return ObjectSize("simple", size)
@pytest.fixture(scope="session")
def complex_object_size(max_object_size: int) -> ObjectSize:
size = max_object_size * int(COMPLEX_OBJECT_CHUNKS_COUNT) + int(COMPLEX_OBJECT_TAIL_SIZE)
return ObjectSize("complex", size)
# By default we want all tests to be executed with both object sizes
# This can be overriden in choosen tests if needed
@pytest.fixture(
scope="session",
params=[
pytest.param("simple", marks=[pytest.mark.simple, pytest.mark.exclude_sanity]),
pytest.param("complex", marks=pytest.mark.complex),
],
)
def object_size(simple_object_size: ObjectSize, complex_object_size: ObjectSize, request: pytest.FixtureRequest) -> ObjectSize:
if request.param == "simple":
return simple_object_size
return complex_object_size
@pytest.fixture()
def test_file(object_size: ObjectSize) -> TestFile:
return generate_file(object_size.value)
@pytest.fixture(scope="module")
def test_file_module(object_size: ObjectSize) -> TestFile:
return generate_file(object_size.value)
# Deprecated. Please migrate all to test_file
@pytest.fixture()
def file_path(test_file: TestFile) -> TestFile:
return test_file
@pytest.fixture(scope="session")
def rep_placement_policy() -> PlacementPolicy:
return PlacementPolicy("rep", DEFAULT_PLACEMENT_RULE)
@pytest.fixture(scope="session")
def ec_placement_policy() -> PlacementPolicy:
return PlacementPolicy("ec", DEFAULT_EC_PLACEMENT_RULE)
@pytest.fixture(scope="session")
@allure.title("Init Frostfs CLI")
def frostfs_cli(client_shell: Shell, wallet: WalletInfo) -> FrostfsCli:
return FrostfsCli(client_shell, FROSTFS_CLI_EXEC, wallet.config_path)
@pytest.fixture(scope="session")
@allure.title("Init Frostfs CLI remote")
def remote_frostfs_cli(cluster: Cluster) -> FrostfsCli:
node = cluster.cluster_nodes[0]
host = node.host
service_config = host.get_service_config(node.storage_node.name)
wallet_path = service_config.attributes["wallet_path"]
wallet_password = service_config.attributes["wallet_password"]
shell = host.get_shell()
wallet_config_path = f"/tmp/{node.storage_node.name}-config.yaml"
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
cli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config_path)
return cli
@pytest.fixture(scope="session")
@allure.title("Init GrpcClientWrapper with local Frostfs CLI")
def grpc_client(frostfs_cli: FrostfsCli) -> GrpcClientWrapper:
return CliClientWrapper(frostfs_cli)
@pytest.fixture(scope="session")
@allure.title("Init GrpcClientWrapper with remote Frostfs CLI")
def remote_grpc_client(remote_frostfs_cli: FrostfsCli) -> GrpcClientWrapper:
return CliClientWrapper(remote_frostfs_cli)
# By default we want all tests to be executed with both storage policies.
# This can be overriden in choosen tests if needed.
@pytest.fixture(scope="session", params=[pytest.param("rep", marks=pytest.mark.rep), pytest.param("ec", marks=pytest.mark.ec)])
def placement_policy(
rep_placement_policy: PlacementPolicy, ec_placement_policy: PlacementPolicy, request: pytest.FixtureRequest
) -> PlacementPolicy:
if request.param == "rep":
return rep_placement_policy
elif request.param == "ec":
return ec_placement_policy
return request.param
@pytest.fixture(scope="session")
def cluster(temp_directory: str, hosting: Hosting, client_shell: Shell) -> Cluster:
cluster = Cluster(hosting)
if cluster.is_local_devenv():
cluster.create_wallet_configs(hosting)
ClusterTestBase.shell = client_shell
ClusterTestBase.cluster = cluster
yield cluster
@allure.title("[Session]: Provide S3 policy")
@pytest.fixture(scope="session")
def s3_policy(request: pytest.FixtureRequest):
policy = None
if "param" in request.__dict__:
policy = request.param
return policy
@pytest.fixture(scope="session")
@allure.title("[Session] Create healthcheck object")
def healthcheck(cluster: Cluster) -> Healthcheck:
healthcheck_cls = plugins.load_plugin("frostfs.testlib.healthcheck", cluster.cluster_nodes[0].host.config.healthcheck_plugin_name)
return healthcheck_cls()
@pytest.fixture(scope="session")
def cluster_state_controller_session(client_shell: Shell, cluster: Cluster, healthcheck: Healthcheck) -> ClusterStateController:
controller = ClusterStateController(client_shell, cluster, healthcheck)
return controller
@pytest.fixture
def cluster_state_controller(cluster_state_controller_session: ClusterStateController) -> ClusterStateController:
yield cluster_state_controller_session
cluster_state_controller_session.start_stopped_hosts()
cluster_state_controller_session.start_all_stopped_services()
@pytest.fixture(scope="session")
def credentials_provider(cluster: Cluster) -> CredentialsProvider:
return CredentialsProvider(cluster)
@allure.title("[Session]: Create S3 client")
@pytest.fixture(
scope="session",
params=[
pytest.param(AwsCliClient, marks=[pytest.mark.aws, pytest.mark.weekly, pytest.mark.exclude_sanity]),
pytest.param(Boto3ClientWrapper, marks=[pytest.mark.boto3, pytest.mark.nightly]),
],
)
def s3_client(
user: User,
s3_policy: Optional[str],
cluster: Cluster,
request: pytest.FixtureRequest,
credentials_provider: CredentialsProvider,
) -> S3ClientWrapper:
node = cluster.cluster_nodes[0]
credentials_provider.S3.provide(user, node, s3_policy)
s3_client_cls = request.param
client = s3_client_cls(user.s3_credentials.access_key, user.s3_credentials.secret_key, cluster.default_s3_gate_endpoint)
return client
@allure.title("[Session] Create S3 http client")
@pytest.fixture(scope="session")
def s3_http_client(
default_user: User, s3_policy: Optional[str], cluster: Cluster, credentials_provider: CredentialsProvider
) -> S3HttpClient:
node = cluster.cluster_nodes[0]
credentials_provider.S3.provide(default_user, node, s3_policy)
return S3HttpClient(
cluster.default_s3_gate_endpoint,
default_user.s3_credentials.access_key,
default_user.s3_credentials.secret_key,
)
@pytest.fixture
def versioning_status(request: pytest.FixtureRequest) -> VersioningStatus:
if "param" in request.__dict__:
return request.param
return VersioningStatus.UNDEFINED
@allure.title("[Session] Bulk create buckets for tests")
@pytest.fixture(scope="session")
def buckets_pool(s3_client: S3ClientWrapper, request: pytest.FixtureRequest):
test_buckets: list = []
s3_client_type = type(s3_client).__name__
for test in request.session.items:
if s3_client_type not in test.name:
continue
if "bucket" in test.fixturenames:
test_buckets.append(string_utils.unique_name("bucket-"))
if "two_buckets" in test.fixturenames:
test_buckets.append(string_utils.unique_name("bucket-"))
test_buckets.append(string_utils.unique_name("bucket-"))
if test_buckets:
parallel(s3_client.create_bucket, test_buckets)
return test_buckets
@allure.title("[Test] Create bucket")
@pytest.fixture
def bucket(buckets_pool: list[str], s3_client: S3ClientWrapper, versioning_status: VersioningStatus):
if buckets_pool:
bucket_name = buckets_pool.pop()
else:
bucket_name = s3_client.create_bucket()
if versioning_status:
s3_helper.set_bucket_versioning(s3_client, bucket_name, versioning_status)
return bucket_name
@allure.title("[Test] Create two buckets")
@pytest.fixture
def two_buckets(buckets_pool: list[str], s3_client: S3ClientWrapper) -> list[str]:
buckets: list[str] = []
for _ in range(2):
if buckets_pool:
buckets.append(buckets_pool.pop())
else:
buckets.append(s3_client.create_bucket())
return buckets
@allure.title("[Autouse/Session] Collect binary versions")
@pytest.fixture(scope="session", autouse=True)
@run_optionally(optionals.OPTIONAL_AUTOUSE_FIXTURES_ENABLED)
def collect_binary_versions(hosting: Hosting, client_shell: Shell, request: pytest.FixtureRequest):
environment_dir = request.config.getoption("--alluredir")
if not environment_dir:
return None
@allure.title("Check binary versions")
def check_binary_versions(request, hosting: Hosting, client_shell: Shell):
local_versions = get_local_binaries_versions(client_shell)
remote_versions = get_remote_binaries_versions(hosting)
local_versions = version_utils.get_local_binaries_versions(client_shell)
remote_versions = version_utils.get_remote_binaries_versions(hosting)
remote_versions_keys = list(remote_versions.keys())
all_versions = {
**local_versions,
**{
f"{name}_{remote_versions_keys.index(host) + 1:02d}": version
for host, versions in remote_versions.items()
for name, version in versions.items()
},
}
file_path = f"{environment_dir}/environment.properties"
env_utils.save_env_properties(file_path, all_versions)
all_versions = {**local_versions, **remote_versions}
save_env_properties(request.config, all_versions)
@pytest.fixture(scope="session")
def rpc_endpoint(cluster: Cluster):
return cluster.default_rpc_endpoint
@allure.title("Prepare tmp directory")
def prepare_tmp_dir():
full_path = f"{os.getcwd()}/{ASSETS_DIR}"
shutil.rmtree(full_path, ignore_errors=True)
os.mkdir(full_path)
yield full_path
shutil.rmtree(full_path)
@reporter.step("Prepare default user with wallet")
@pytest.fixture(scope="session")
@cached_fixture(optionals.OPTIONAL_CACHE_FIXTURES)
def default_user(credentials_provider: CredentialsProvider, cluster: Cluster) -> User:
user = User(string_utils.unique_name("user-"))
node = cluster.cluster_nodes[0]
@pytest.fixture(scope="session", autouse=True)
@allure.title("Collect logs")
def collect_logs(prepare_tmp_dir, hosting: Hosting):
start_time = datetime.utcnow()
yield
end_time = datetime.utcnow()
credentials_provider.GRPC.provide(user, node)
# Dump logs to temp directory (because they might be too large to keep in RAM)
logs_dir = os.path.join(prepare_tmp_dir, "logs")
os.makedirs(logs_dir)
return user
for host in hosting.hosts:
host.dump_logs(logs_dir, since=start_time, until=end_time)
# Zip all files and attach to Allure because it is more convenient to download a single
# zip with all logs rather than mess with individual logs files per service or node
logs_zip_file_path = shutil.make_archive(logs_dir, "zip", logs_dir)
allure.attach.file(logs_zip_file_path, name="logs.zip", extension="zip")
@pytest.fixture(scope="session", autouse=True)
@allure.title("Run health check for all storage nodes")
def run_health_check(collect_logs, hosting: Hosting):
failed_nodes = []
for node_name in NEOFS_NETMAP_DICT.keys():
health_check = node_healthcheck(hosting, node_name)
if health_check.health_status != "READY" or health_check.network_status != "ONLINE":
failed_nodes.append(node_name)
if failed_nodes:
raise AssertionError(f"Nodes {failed_nodes} are not healthy")
@pytest.fixture(scope="session")
@cached_fixture(optionals.OPTIONAL_CACHE_FIXTURES)
def users_pool(credentials_provider: CredentialsProvider, cluster: Cluster) -> list[User]:
users = [User(string_utils.unique_name("user-")) for _ in range(WALLTETS_IN_POOL)]
parallel(credentials_provider.GRPC.provide, users, cluster_node=cluster.cluster_nodes[0])
return users
@allure.title("Prepare wallet and deposit")
def prepare_wallet_and_deposit(prepare_tmp_dir):
wallet_path, address, _ = wallet.init_wallet(ASSETS_DIR)
logger.info(f"Init wallet: {wallet_path}, address: {address}")
allure.attach.file(wallet_path, os.path.basename(wallet_path), allure.attachment_type.JSON)
if not FREE_STORAGE:
deposit = 30
transfer_mainnet_gas(wallet_path, deposit + 1, wallet_path=MAINNET_WALLET_PATH)
neofs_deposit(wallet_path, deposit)
@pytest.fixture(scope="session")
def user_tag(request: pytest.FixtureRequest) -> str:
tag = "default"
if "param" in request.__dict__:
tag = request.param
return tag
@pytest.fixture(scope="session")
@cached_fixture(optionals.OPTIONAL_CACHE_FIXTURES)
@reporter.step("Create {user_tag} user")
def user(user_tag: str) -> User:
user = User(string_utils.unique_name("user-"))
user.attributes["tag"] = user_tag
return user
@pytest.fixture(scope="session")
@cached_fixture(optionals.OPTIONAL_CACHE_FIXTURES)
def wallet(user: User, credentials_provider: CredentialsProvider, cluster: Cluster) -> WalletInfo:
credentials_provider.GRPC.provide(user, cluster.cluster_nodes[0])
return user.wallet
# TODO: Migrate tests to fixture wallet above
@reporter.step("Get wallet for default user")
@pytest.fixture(scope="session")
def default_wallet(wallet: WalletInfo) -> WalletInfo:
return wallet
@pytest.fixture(scope="session")
@cached_fixture(optionals.OPTIONAL_CACHE_FIXTURES)
def wallets_pool(users_pool: list[User]) -> list[WalletInfo]:
return [user.wallet for user in users_pool]
@pytest.fixture(scope="session")
def other_wallet(wallets_pool: list[WalletInfo]) -> WalletInfo:
if not wallets_pool:
raise RuntimeError("[other_wallet] No wallets in pool. Consider increasing WALLTETS_IN_POOL or review.")
return wallets_pool.pop()
@pytest.fixture(scope="session")
def other_wallet_2(wallets_pool: list[WalletInfo]) -> WalletInfo:
if not wallets_pool:
raise RuntimeError("[other_wallet2] No wallets in pool. Consider increasing WALLTETS_IN_POOL or review.")
return wallets_pool.pop()
@pytest.fixture()
@allure.title("Select random node for testing")
def node_under_test(cluster: Cluster) -> ClusterNode:
selected_node = random.choice(cluster.cluster_nodes)
reporter.attach(f"{selected_node}", "Selected node")
return selected_node
@allure.title("Init bucket container resolver")
@pytest.fixture()
def bucket_container_resolver(node_under_test: ClusterNode) -> BucketContainerResolver:
resolver_cls = plugins.load_plugin("frostfs.testlib.bucket_cid_resolver", node_under_test.host.config.product)
resolver: BucketContainerResolver = resolver_cls()
return resolver
@pytest.fixture(scope="session", params=[pytest.param(EVERYONE_ALLOW_ALL)])
def container_request(request: pytest.FixtureRequest) -> ContainerRequest:
if "param" in request.__dict__:
return request.param
container_marker = request.node.get_closest_marker("container")
# let default container to be public at the moment
container_request = EVERYONE_ALLOW_ALL
if container_marker:
if len(container_marker.args) != 1:
raise RuntimeError(f"Something wrong with container marker: {container_marker}")
container_request = container_marker.args[0]
if not container_request:
raise RuntimeError(
f"""Container specification is empty.
Add @pytest.mark.parametrize("container_request", [ContainerRequest(...)], indirect=True) decorator."""
)
return container_request
@pytest.fixture(scope="session")
def multiple_containers_request(request: pytest.FixtureRequest) -> ContainerRequest:
if "param" in request.__dict__:
return request.param
raise RuntimeError(
f"""Container specification is empty.
Add @pytest.mark.parametrize("container_requests", [[ContainerRequest(...), ..., ContainerRequest(...)]], indirect=True) decorator."""
)
@pytest.fixture
def container(
wallet: WalletInfo,
frostfs_cli: FrostfsCli,
client_shell: Shell,
cluster: Cluster,
rpc_endpoint: str,
container_request: ContainerRequest,
) -> str:
return create_container_with_ape(container_request, frostfs_cli, wallet, client_shell, cluster, rpc_endpoint)
@pytest.fixture
def containers(
wallet: WalletInfo,
frostfs_cli: FrostfsCli,
client_shell: Shell,
cluster: Cluster,
rpc_endpoint: str,
multiple_containers_request: MultipleContainersRequest,
) -> list[str]:
return create_containers_with_ape(frostfs_cli, wallet, client_shell, cluster, rpc_endpoint, multiple_containers_request)
@pytest.fixture()
def new_epoch(client_shell: Shell, cluster: Cluster) -> int:
return ensure_fresh_epoch(client_shell, cluster)
@pytest.fixture(scope="module")
def new_epoch_module_scope(client_shell: Shell, cluster: Cluster) -> int:
return ensure_fresh_epoch(client_shell, cluster)
return wallet_path

View file

@ -1,8 +1,9 @@
import json
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.resources.wellknown_acl import PRIVATE_ACL_F
from frostfs_testlib.steps.cli.container import (
from epoch import tick_epoch
from python_keywords.container import (
create_container,
delete_container,
get_container,
@ -10,86 +11,86 @@ from frostfs_testlib.steps.cli.container import (
wait_for_container_creation,
wait_for_container_deletion,
)
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from ...helpers.utility import placement_policy_from_container
from utility import placement_policy_from_container
from wellknown_acl import PRIVATE_ACL_F
@pytest.mark.nightly
@pytest.mark.parametrize("name", ["", "test-container"], ids=["No name", "Set particular name"])
@pytest.mark.sanity
@pytest.mark.container
class TestContainer(ClusterTestBase):
PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
def test_container_creation(client_shell, prepare_wallet_and_deposit, name):
scenario_title = f"with name {name}" if name else "without name"
allure.dynamic.title(f"User can create container {scenario_title}")
@allure.title("Create container (name={name})")
@pytest.mark.parametrize("name", ["", "test-container"], ids=["No name", "Set particular name"])
@pytest.mark.smoke
def test_container_creation(self, default_wallet: WalletInfo, name: str, rpc_endpoint: str):
wallet = default_wallet
wallet = prepare_wallet_and_deposit
with open(wallet) as file:
json_wallet = json.load(file)
cid = create_container(wallet, self.shell, rpc_endpoint, self.PLACEMENT_RULE, name=name)
placement_rule = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
cid = create_container(wallet, rule=placement_rule, name=name, shell=client_shell)
containers = list_containers(wallet, self.shell, rpc_endpoint)
assert cid in containers, f"Expected container {cid} in containers: {containers}"
containers = list_containers(wallet, shell=client_shell)
assert cid in containers, f"Expected container {cid} in containers: {containers}"
container_info: str = get_container(wallet, cid, self.shell, rpc_endpoint, False)
container_info = container_info.casefold() # To ignore case when comparing with expected values
container_info: str = get_container(wallet, cid, json_mode=False, shell=client_shell)
container_info = container_info.casefold() # To ignore case when comparing with expected values
info_to_check = {
f"owner ID: {wallet.get_address_from_json(0)}",
f"CID: {cid}",
}
if name:
info_to_check.add(f"Name={name}")
info_to_check = {
f"basic ACL: {PRIVATE_ACL_F} (private)",
f"owner ID: {json_wallet.get('accounts')[0].get('address')}",
f"container ID: {cid}",
}
if name:
info_to_check.add(f"Name={name}")
with reporter.step("Check container has correct information"):
expected_policy = self.PLACEMENT_RULE.casefold()
actual_policy = placement_policy_from_container(container_info)
assert actual_policy == expected_policy, f"Expected policy\n{expected_policy} but got policy\n{actual_policy}"
with allure.step("Check container has correct information"):
expected_policy = placement_rule.casefold()
actual_policy = placement_policy_from_container(container_info)
assert (
actual_policy == expected_policy
), f"Expected policy\n{expected_policy} but got policy\n{actual_policy}"
for info in info_to_check:
expected_info = info.casefold()
assert expected_info in container_info, f"Expected {expected_info} in container info:\n{container_info}"
for info in info_to_check:
expected_info = info.casefold()
assert (
expected_info in container_info
), f"Expected {expected_info} in container info:\n{container_info}"
with reporter.step("Delete container and check it was deleted"):
# Force to skip frostfs-cli verifictions before delete.
# Because no APE rules assigned to container, those verifications will fail due to APE requests denial.
delete_container(wallet, cid, self.shell, rpc_endpoint, force=True, await_mode=True)
self.tick_epoch()
wait_for_container_deletion(wallet, cid, self.shell, rpc_endpoint)
with allure.step("Delete container and check it was deleted"):
delete_container(wallet, cid, shell=client_shell)
tick_epoch()
wait_for_container_deletion(wallet, cid, shell=client_shell)
@allure.title("Delete container without force")
@pytest.mark.smoke
def test_container_deletion_no_force(self, container: str, default_wallet: WalletInfo, rpc_endpoint: str):
with reporter.step("Delete container and check it was deleted"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=True)
self.tick_epoch()
wait_for_container_deletion(default_wallet, container, self.shell, rpc_endpoint)
@allure.title("Parallel container creation and deletion")
@pytest.mark.exclude_sanity
def test_container_creation_deletion_parallel(self, default_wallet: WalletInfo, rpc_endpoint: str):
containers_count = 3
wallet = default_wallet
iteration_count = 10
@allure.title("Parallel container creation and deletion")
@pytest.mark.sanity
@pytest.mark.container
def test_container_creation_deletion_parallel(client_shell, prepare_wallet_and_deposit):
containers_count = 3
wallet = prepare_wallet_and_deposit
placement_rule = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
for _ in range(iteration_count):
cids: list[str] = []
with reporter.step(f"Create {containers_count} containers"):
for _ in range(containers_count):
cids.append(
create_container(wallet, self.shell, rpc_endpoint, self.PLACEMENT_RULE, await_mode=False, wait_for_creation=False)
)
cids: list[str] = []
with allure.step(f"Create {containers_count} containers"):
for _ in range(containers_count):
cids.append(
create_container(
wallet,
rule=placement_rule,
await_mode=False,
shell=client_shell,
wait_for_creation=False,
)
)
with reporter.step("Wait for containers occur in container list"):
for cid in cids:
wait_for_container_creation(wallet, cid, self.shell, rpc_endpoint, sleep_interval=containers_count)
with allure.step(f"Wait for containers occur in container list"):
for cid in cids:
wait_for_container_creation(
wallet, cid, sleep_interval=containers_count, shell=client_shell
)
with reporter.step("Delete containers and check they were deleted"):
for cid in cids:
# Force to skip frostfs-cli verifictions before delete.
# Because no APE rules assigned to container, those verifications will fail due to APE requests denial.
delete_container(wallet, cid, self.shell, rpc_endpoint, force=True, await_mode=True)
containers_list = list_containers(wallet, self.shell, rpc_endpoint)
assert cid not in containers_list, "Container not deleted"
with allure.step("Delete containers and check they were deleted"):
for cid in cids:
delete_container(wallet, cid, shell=client_shell)
tick_epoch()
wait_for_container_deletion(wallet, cid, shell=client_shell)

View file

@ -1,884 +0,0 @@
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.steps.cli.container import create_container, delete_container
from frostfs_testlib.steps.cli.object import delete_object, put_object_to_random_node
from frostfs_testlib.steps.node_management import get_netmap_snapshot
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.cli_utils import parse_netmap_output
from frostfs_testlib.utils.file_utils import generate_file
from ...helpers.container_request import PUBLIC_WITH_POLICY, ContainerRequest
from ...helpers.policy_validation import get_netmap_param, validate_object_policy
from ...resources.policy_error_patterns import NOT_ENOUGH_TO_SELECT, NOT_FOUND_FILTER, NOT_FOUND_SELECTOR, NOT_PARSE_POLICY
@pytest.mark.nightly
@pytest.mark.policy
class TestPolicy(ClusterTestBase):
@allure.title("[NEGATIVE] Placement policy: Can't parse placement policy")
def test_placement_policy_negative(self, default_wallet: WalletInfo, rpc_endpoint: str):
"""
Negative test for placement policy: Can't parse placement policy.
"""
placement_rule = "REP 1 IN SPB REP 1 in MSK CBF 1 SELECT 1 FROM SPBRU AS SPB SELECT 1 FROM MSKRU AS MSK FILTER SubDivCode EQ SPE AS SPBRU FILTER NOT (Country EQ Sweden OR CountryCode EQ FI) AND Location EQ Moskva AS MSKRU"
with reporter.step(f"Create container with policy {placement_rule}"):
with pytest.raises(Exception, match=NOT_PARSE_POLICY):
create_container(default_wallet, self.shell, rpc_endpoint, placement_rule)
@allure.title("[NEGATIVE] Placement policy: Not enough nodes to SELECT")
def test_placement_policy_negative_not_enough_nodes_to_select(self, default_wallet: WalletInfo, rpc_endpoint: str):
"""
Negative test for placement policy: Not enough nodes to SELECT.
"""
placement_rule = "REP 2 IN RU SELECT 2 FROM RUS AS RU FILTER Country EQ Russia AND SubDivCode EQ SPE AS RUS"
with reporter.step(f"Create container with policy {placement_rule}"):
with pytest.raises(Exception, match=NOT_ENOUGH_TO_SELECT):
create_container(default_wallet, self.shell, rpc_endpoint, placement_rule)
@allure.title("[NEGATIVE] Placement policy: Filter not found")
def test_placement_policy_negative_not_enough_nodes_to_filter(self, default_wallet: WalletInfo, rpc_endpoint: str):
"""
Negative test for placement policy: Filter not found.
"""
placement_rule = (
"REP 2 IN HALF CBF 1 SELECT 2 FROM GT15 AS HALF FILTER @NOTRU AND Price GT 15 AS GT15 FILTER CountryCode NE RU AS NOTRU"
)
with reporter.step(f"Create container with policy {placement_rule}"):
with pytest.raises(Exception, match=NOT_FOUND_FILTER):
create_container(default_wallet, self.shell, rpc_endpoint, placement_rule)
@allure.title("[NEGATIVE] Placement policy: SELECTOR not found")
def test_placement_policy_negative_not_enough_nodes_to_selector(self, default_wallet: WalletInfo, rpc_endpoint: str):
"""
Negative test for placement policy: Filter not found.
"""
placement_rule = "REP 2 IN HALFIC CBF 1 SELECT 2 FROM GT15 AS HALF FILTER Price GT 15 AS GT15"
with reporter.step(f"Create container with policy {placement_rule}"):
with pytest.raises(Exception, match=NOT_FOUND_SELECTOR):
create_container(default_wallet, self.shell, rpc_endpoint, placement_rule)
@allure.title("Simple policy results with one node")
@pytest.mark.parametrize("container_request", [PUBLIC_WITH_POLICY("REP 1 REP 1 CBF 1")], indirect=True)
def test_simple_policy_results_with_one_node(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement simple policy results with one node.
"""
file_path = generate_file(simple_object_size.value)
expected_copies = 1
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, self.shell, self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with SELECT results with one node")
@pytest.mark.parametrize(
"container_request", [PUBLIC_WITH_POLICY("REP 1 IN AnyNode REP 1 IN AnyNode CBF 1 SELECT 1 FROM * AS AnyNode")], indirect=True
)
def test_policy_with_select_results_with_one_node(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with SELECT results with one node.
"""
file_path = generate_file(simple_object_size.value)
expected_copies = 1
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, self.shell, self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with SELECT and FILTER results with one node")
@pytest.mark.parametrize(
"container_request",
[
PUBLIC_WITH_POLICY(
"REP 1 IN MyRussianNodes REP 1 IN MyRussianNodes CBF 1 SELECT 1 FROM RussianNodes AS MyRussianNodes FILTER Country EQ 'Russia' AS RussianNodes"
)
],
indirect=True,
)
def test_policy_with_select_and_filter_results_with_one_node(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with SELECT and FILTER results with one node.
"""
file_path = generate_file(simple_object_size.value)
expected_copies = 1
placement_params = {"country": "Russia"}
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, self.shell, self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
node_address = self.cluster.node(resulting_copies[0]).get_interface(Interfaces.MGMT)
with reporter.step(f"Check the node is selected from {placement_params['country']}"):
assert (
placement_params["country"] == netmap[node_address]["country"]
), f"The node is selected from the wrong country. Got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with SELECT and Complex FILTER results with one node")
@pytest.mark.parametrize(
"container_request",
[
PUBLIC_WITH_POLICY(
"REP 1 IN RUS REP 1 IN RUS CBF 1 SELECT 1 FROM RU AS RUS FILTER Country NE Sweden AS NotSE FILTER @NotSE AND NOT (CountryCode EQ FI) AND Country EQ 'Russia' AS RU"
)
],
indirect=True,
)
def test_policy_with_select_and_complex_filter_results_with_one_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with SELECT and Complex FILTER results with one node.
"""
placement_params = {"country": ["Russia", "Sweden"], "country_code": "FI"}
file_path = generate_file(simple_object_size.value)
expected_copies = 1
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, self.shell, self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
node_address = self.cluster.node(resulting_copies[0]).get_interface(Interfaces.MGMT)
with reporter.step(f"Check the node is selected from {placement_params['country'][0]}"):
assert (
not (placement_params["country"][1] == netmap[node_address]["country"])
or not (placement_params["country"][1] == netmap[node_address]["country"])
and not (placement_params["country_code"] == netmap[node_address]["country_code"])
and (placement_params["country"][0] == netmap[node_address]["country"])
), f"The node is selected from the wrong country or country code. Got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with Multi SELECTs and FILTERs results with one node")
@pytest.mark.parametrize(
"container_request",
[
PUBLIC_WITH_POLICY(
"REP 1 IN RU REP 1 IN EU REP 1 IN RU CBF 1 SELECT 1 FROM RUS AS RU SELECT 1 FROM EUR AS EU FILTER Country EQ Russia AS RUS FILTER NOT (@RUS) AND Country EQ Sweden OR CountryCode EQ FI AS EUR"
)
],
indirect=True,
)
def test_policy_with_multi_selects_and_filters_results_with_one_node(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with Multi SELECTs and FILTERs results with one nodes.
"""
placement_params = {"country": ["Sweden", "Russia"], "country_code": "FI"}
file_path = generate_file(simple_object_size.value)
expected_copies = 2
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, self.shell, self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
with reporter.step(f"Check two nodes are selected from any country"):
for storage_node in resulting_copies:
node_address = self.cluster.node(storage_node).get_interface(Interfaces.MGMT)
assert (placement_params["country"][1] == netmap[node_address]["country"]) or (
not (placement_params["country"][1] == netmap[node_address]["country"])
and (placement_params["country"][0] == netmap[node_address]["country"])
or (placement_params["country_code"] == netmap[node_address]["country_code"])
), f"The node is selected from the wrong country or country code. Got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with SELECT and FILTER results with UNIQUE nodes")
@pytest.mark.parametrize(
"container_request",
[
PUBLIC_WITH_POLICY(
"UNIQUE REP 1 IN MyRussianNodes REP 1 IN MyRussianNodes CBF 1 SELECT 1 FROM RussianNodes AS MyRussianNodes FILTER Country EQ 'Russia' AS RussianNodes"
)
],
indirect=True,
)
def test_policy_with_select_and_filter_results_with_unique_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with SELECT and FILTER results with UNIQUE nodes.
"""
placement_params = {"country": "Russia"}
file_path = generate_file(simple_object_size.value)
expected_copies = 2
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, self.shell, self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
with reporter.step(f"Check two nodes are selected from {placement_params['country']}"):
for storage_node in resulting_copies:
node_address = self.cluster.node(storage_node).get_interface(Interfaces.MGMT)
assert (
placement_params["country"] == netmap[node_address]["country"]
), f"The node is selected from the wrong country. Got {netmap[node_address]['country']}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Simple policy results with 25% of available nodes")
@pytest.mark.parametrize("container_request", [PUBLIC_WITH_POLICY("REP 1 CBF 1")], indirect=True)
def test_simple_policy_results_with_25_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy results with 25% of available nodes.
"""
file_path = generate_file(simple_object_size.value)
expected_copies = 1
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, self.shell, self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with SELECT results with 25% of available nodes")
@pytest.mark.parametrize("container_request", [PUBLIC_WITH_POLICY("REP 1 IN One CBF 1 SELECT 1 FROM * AS One")], indirect=True)
def test_policy_with_select_results_with_25_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with SELECT results with 25% of available nodes.
"""
file_path = generate_file(simple_object_size.value)
expected_copies = 1
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, self.shell, self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Simple policy results with 50% of available nodes")
@pytest.mark.parametrize("container_request", [PUBLIC_WITH_POLICY("REP 2 CBF 1")], indirect=True)
def test_simple_policy_results_with_50_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement simple policy results with 50% of available nodes.
"""
file_path = generate_file(simple_object_size.value)
expected_copies = 2
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, self.shell, self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with SELECT results with 50% of available nodes")
@pytest.mark.parametrize("container_request", [PUBLIC_WITH_POLICY("REP 2 IN HALF CBF 1 SELECT 2 FROM * AS HALF")], indirect=True)
def test_policy_with_select_results_with_50_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with SELECT results with 50% of available nodes.
"""
file_path = generate_file(simple_object_size.value)
expected_copies = 2
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, self.shell, self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Simple policy results with 75% of available nodes")
@pytest.mark.parametrize("container_request", [PUBLIC_WITH_POLICY("REP 1")], indirect=True)
def test_simple_policy_results_with_75_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement simple policy results with 75% of available nodes.
"""
file_path = generate_file(simple_object_size.value)
expected_copies = 1
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, self.shell, self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with SELECT results with 75% of available nodes")
@pytest.mark.parametrize(
"container_request", [PUBLIC_WITH_POLICY("REP 2 IN DS CBF 1 SELECT 3 IN DISTINCT Country FROM * AS DS")], indirect=True
)
def test_policy_with_select_results_with_75_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with SELECT results with 75% of available nodes.
"""
file_path = generate_file(simple_object_size.value)
expected_copies = 2
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, self.shell, self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Simple policy results with 100% of available nodes")
@pytest.mark.parametrize("container_request", [PUBLIC_WITH_POLICY("REP 4")], indirect=True)
def test_simple_policy_results_with_100_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement simple policy results with 100% of available nodes.
"""
file_path = generate_file(simple_object_size.value)
expected_copies = 4
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, self.shell, self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with SELECT results with 100% of available nodes")
@pytest.mark.parametrize("container_request", [PUBLIC_WITH_POLICY("REP 1 IN All SELECT 4 FROM * AS All")], indirect=True)
def test_policy_with_select_results_with_100_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with SELECT results with 100% of available nodes.
"""
file_path = generate_file(simple_object_size.value)
expected_copies = 1
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, self.shell, self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with SELECT and Complex FILTER results with 100% of available nodes")
@pytest.mark.parametrize(
"container_request",
[
PUBLIC_WITH_POLICY(
"REP 2 IN All SELECT 4 FROM AllNodes AS All FILTER Country EQ Russia OR Country EQ Sweden OR Country EQ Finland AS AllCountries FILTER @AllCountries AND Continent EQ Europe AS AllNodes"
)
],
indirect=True,
)
def test_policy_with_select_and_complex_filter_results_with_100_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with SELECT and Complex FILTER results with 100% of available nodes.
"""
placement_params = {"country": ["Russia", "Sweden", "Finland"], "continent": "Europe"}
file_path = generate_file(simple_object_size.value)
expected_copies = 2
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, self.shell, self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
with reporter.step(f"Check two nodes are selected from {' or '.join(placement_params['country'])}"):
for storage_node in resulting_copies:
node_address = self.cluster.node(storage_node).get_interface(Interfaces.MGMT)
assert (
(netmap[node_address]["country"] in placement_params["country"])
or (netmap[node_address]["country"] in placement_params["country"])
and (netmap[node_address]["continent"] == placement_params["continent"])
), f"The node is selected from the wrong country or continent. Got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Simple policy results with UNIQUE nodes")
@pytest.mark.parametrize("container_request", [PUBLIC_WITH_POLICY("UNIQUE REP 1 REP 1 CBF 1")], indirect=True)
def test_simple_policy_results_with_unique_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement simple policy results with UNIQUE nodes.
"""
file_path = generate_file(simple_object_size.value)
expected_copies = 2
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, self.shell, self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with SELECT results with UNIQUE nodes")
@pytest.mark.parametrize(
"container_request",
[PUBLIC_WITH_POLICY("UNIQUE REP 1 IN AnyNode REP 1 IN AnyNode CBF 1 SELECT 1 FROM * AS AnyNode")],
indirect=True,
)
def test_policy_with_select_results_with_unique_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with SELECT results with UNIQUE nodes.
"""
file_path = generate_file(simple_object_size.value)
expected_copies = 2
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, self.shell, self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with SELECT and Complex FILTER results with UNIQUE nodes")
@pytest.mark.parametrize(
"container_request",
[
PUBLIC_WITH_POLICY(
"UNIQUE REP 1 IN RUS REP 1 IN RUS CBF 1 SELECT 1 FROM RU AS RUS FILTER Country NE Sweden AS NotSE FILTER @NotSE AND NOT (CountryCode EQ FI) AND Country EQ 'Russia' AS RU"
)
],
indirect=True,
)
def test_policy_with_select_and_complex_filter_results_with_unique_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with SELECT and Complex FILTER results with UNIQUE nodes.
"""
placement_params = {"country_code": "FI", "country": ["Sweden", "Russia"]}
file_path = generate_file(simple_object_size.value)
expected_copies = 2
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, self.shell, self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
with reporter.step(f"Check two nodes are selected not from {placement_params['country'][0]}"):
for storage_node in resulting_copies:
node_address = self.cluster.node(storage_node).get_interface(Interfaces.MGMT)
assert not (placement_params["country"][0] == netmap[node_address]["country"]) or (
not (placement_params["country"][0] == netmap[node_address]["country"])
and not (placement_params["country_code"] == netmap[node_address]["country_code"])
and placement_params["country"][1] == netmap[node_address]["country"]
), f"The node is selected from the wrong country or with wrong country code. Got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with Multi SELECTs and FILTERs results with UNIQUE nodes")
@pytest.mark.parametrize(
"container_request",
[
PUBLIC_WITH_POLICY(
"UNIQUE REP 1 IN RU REP 1 IN EU REP 1 IN RU CBF 1 SELECT 1 FROM RUS AS RU SELECT 1 FROM EUR AS EU FILTER Country EQ Russia AS RUS FILTER NOT (@RUS) AND Country EQ Sweden OR CountryCode EQ FI AS EUR"
)
],
indirect=True,
)
def test_policy_with_multi_selects_and_filters_results_with_unique_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with Multi SELECTs and FILTERs results with UNIQUE nodes.
"""
placement_params = {"country": ["Russia", "Sweden"], "country_code": "FI"}
file_path = generate_file(simple_object_size.value)
expected_copies = 3
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, self.shell, self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
with reporter.step(f"Check three nodes are selected from any country"):
for storage_node in resulting_copies:
node_address = self.cluster.node(storage_node).get_interface(Interfaces.MGMT)
assert (placement_params["country"][0] == netmap[node_address]["country"]) or (
not (placement_params["country"][0] == netmap[node_address]["country"])
and (placement_params["country"][1] == netmap[node_address]["country"])
or (placement_params["country_code"] == netmap[node_address]["country_code"])
), f"The node is selected from the wrong country or with wrong country code. Got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy: REP 1 IN SPB REP 1 IN MSK REP 3")
@pytest.mark.parametrize(
"container_request",
[
PUBLIC_WITH_POLICY(
"REP 1 IN SPB REP 1 IN MSK REP 3 CBF 1 SELECT 2 FROM LED AS SPB SELECT 2 FROM MOW AS MSK FILTER Location EQ 'Saint Petersburg (ex Leningrad)' AS LED FILTER Location EQ Moskva AS MOW"
)
],
indirect=True,
)
def test_policy_rep_1_in_spb_rep_1_in_msk_rep_3(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy: REP 1 IN SPB REP 1 IN MSK REP 3.
"""
placement_params = {"location": ["Saint Petersburg (ex Leningrad)", "Moskva"]}
file_path = generate_file(simple_object_size.value)
expected_copies = 3
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, self.shell, self.cluster.storage_nodes)
assert len(resulting_copies) >= expected_copies and len(resulting_copies) <= len(
self.cluster.storage_nodes
), f"Expected {expected_copies} or {expected_copies + 1} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
list_of_location = []
for storage_node in resulting_copies:
node_address = self.cluster.node(storage_node).get_interface(Interfaces.MGMT)
list_of_location.append(netmap[node_address]["location"])
with reporter.step(f"Check two or three nodes are selected from Russia and from any other country"):
assert (
placement_params["location"][0] in list_of_location
and placement_params["location"][1] in list_of_location
and len(resulting_copies) > 2
), f"The node is selected from the wrong location. Got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)

View file

@ -1,654 +0,0 @@
import itertools
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.shell.interfaces import Shell
from frostfs_testlib.steps.cli.container import delete_container
from frostfs_testlib.steps.cli.object import delete_object, put_object_to_random_node
from frostfs_testlib.steps.node_management import get_netmap_snapshot
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeNetmapInfo
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing import parallel
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils.cli_utils import parse_netmap_output
from frostfs_testlib.utils.file_utils import generate_file
from ...helpers.container_creation import create_container_with_ape
from ...helpers.container_request import PUBLIC_WITH_POLICY, ContainerRequest
from ...helpers.policy_validation import get_netmap_param, validate_object_policy
@pytest.mark.weekly
@pytest.mark.policy
@pytest.mark.policy_price
class TestPolicyWithPrice(ClusterTestBase):
@wait_for_success(1200, 60, title="Wait for full field price on node", expected_result=True)
def await_for_price_attribute_on_nodes(self):
netmap = parse_netmap_output(get_netmap_snapshot(node=self.cluster.storage_nodes[0], shell=self.shell))
netmap = get_netmap_param(netmap)
for node in self.cluster.cluster_nodes:
node_address = node.get_interface(Interfaces.MGMT)
if netmap[node_address]["Price"] is None:
return False
return True
@reporter.step("Set Pirce field on {cluster_node}")
def set_price_on_node(
self, cluster_node: ClusterNode, locode_price_map: dict[str, str], netmap: list[NodeNetmapInfo], config_manager: ConfigStateManager
):
node_address = cluster_node.get_interface(Interfaces.MGMT)
node_netmap = [netmap_entry for netmap_entry in netmap if netmap_entry.node == node_address]
assert node_netmap, f"No node found with address {node_address}: \n{netmap}"
price = locode_price_map[node_netmap[0].un_locode]
config_manager.set_on_node(cluster_node, StorageNode, {"node:attribute_5": f"Price:{price}"})
@pytest.fixture(scope="module")
def fill_field_price(self, cluster: Cluster, cluster_state_controller_session: ClusterStateController):
locode_price_map = {
"RU MOW": "15",
"RU LED": "10",
"SE STO": "65",
"FI HEL": "55",
}
netmap = parse_netmap_output(get_netmap_snapshot(node=self.cluster.storage_nodes[0], shell=self.shell))
config_manager = cluster_state_controller_session.manager(ConfigStateManager)
parallel(self.set_price_on_node, cluster.cluster_nodes, locode_price_map, netmap, config_manager)
cluster_state_controller_session.wait_after_storage_startup()
self.tick_epoch()
self.await_for_price_attribute_on_nodes()
yield
cluster_state_controller_session.manager(ConfigStateManager).revert_all()
@pytest.fixture
def container(
self,
default_wallet: WalletInfo,
frostfs_cli: FrostfsCli,
client_shell: Shell,
cluster: Cluster,
rpc_endpoint: str,
container_request: ContainerRequest,
# In these set of tests containers should be created after the fill_field_price fixture
fill_field_price,
) -> str:
return create_container_with_ape(container_request, frostfs_cli, default_wallet, client_shell, cluster, rpc_endpoint)
@allure.title("Policy with SELECT and FILTER results with 25% of available nodes")
@pytest.mark.parametrize(
"container_request",
[PUBLIC_WITH_POLICY("REP 1 IN Nodes25 SELECT 1 FROM LE10 AS Nodes25 FILTER Price LE 10 AS LE10")],
indirect=True,
)
def test_policy_with_select_and_filter_results_with_25_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with SELECT and FILTER results with 25% of available nodes.
"""
placement_params = {"Price": 10}
file_path = generate_file(simple_object_size.value)
expected_copies = 1
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
node_address = self.cluster.node(resulting_copies[0]).get_interface(Interfaces.MGMT)
with reporter.step(f"Check the node is selected with price <= {placement_params['Price']}"):
assert (
int(netmap[node_address]["Price"]) <= placement_params["Price"]
), f"The node is selected with the wrong price. Got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with select and complex filter results with 25% of available nodes")
@pytest.mark.parametrize(
"container_request",
[
PUBLIC_WITH_POLICY(
"REP 1 IN Nodes25 SELECT 1 FROM BET0AND10 AS Nodes25 FILTER Price LE 10 AS LE10 FILTER Price GT 0 AS GT0 FILTER @LE10 AND @GT0 AS BET0AND10"
)
],
indirect=True,
)
def test_policy_with_select_and_complex_filter_results_with_25_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with SELECT and Complex FILTER results with 25% of available nodes.
"""
placement_params = {"Price": [10, 0]}
file_path = generate_file(simple_object_size.value)
expected_copies = 1
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
with reporter.step(f"Check the node is selected with price between 1 and 10"):
for node in resulting_copies:
node_address = self.cluster.node(node).get_interface(Interfaces.MGMT)
assert (
int(netmap[node_address]["Price"]) > placement_params["Price"][1]
and int(netmap[node_address]["Price"]) <= placement_params["Price"][0]
), f"The node is selected with the wrong price. Got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with Multi SELECTs and FILTERs results with 25% of available nodes")
@pytest.mark.parametrize(
"container_request",
[
PUBLIC_WITH_POLICY(
"UNIQUE REP 1 IN One REP 1 IN One CBF 1 SELECT 1 FROM MINMAX AS One FILTER Price LT 15 AS LT15 FILTER Price GT 55 AS GT55 FILTER @LT15 OR @GT55 AS MINMAX"
)
],
indirect=True,
)
def test_policy_with_multi_selects_and_filters_results_with_25_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with Multi SELECTs and FILTERs results with 25% of available nodes.
"""
placement_params = {"Price": [15, 55]}
file_path = generate_file(simple_object_size.value)
expected_copies = 2
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
with reporter.step(f"Check two nodes are selected with max and min prices"):
for node in resulting_copies:
node_address = self.cluster.node(node).get_interface(Interfaces.MGMT)
assert (
int(netmap[node_address]["Price"]) > placement_params["Price"][1]
or int(netmap[node_address]["Price"]) < placement_params["Price"][0]
), f"The node is selected with the wrong price. Got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with SELECT and FILTER results with 50% of available nodes")
@pytest.mark.parametrize(
"container_request",
[PUBLIC_WITH_POLICY("REP 2 IN HALF CBF 1 SELECT 2 FROM GT15 AS HALF FILTER Price GT 15 AS GT15")],
indirect=True,
)
def test_policy_with_select_and_filter_results_with_50_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with SELECT and FILTER results with 50% of available nodes.
"""
placement_params = {"Price": 15}
file_path = generate_file(simple_object_size.value)
expected_copies = 2
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
with reporter.step(f"Check two nodes are selected with price > {placement_params['Price']}"):
for node in resulting_copies:
node_address = self.cluster.node(node).get_interface(Interfaces.MGMT)
assert (
int(netmap[node_address]["Price"]) > placement_params["Price"]
), f"The node is selected with the wrong price. Got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with SELECT and Complex FILTER results with 50% of available nodes")
@pytest.mark.parametrize(
"container_request",
[
PUBLIC_WITH_POLICY(
"REP 2 IN HALF CBF 2 SELECT 2 FROM GE15 AS HALF FILTER CountryCode NE RU AS NOTRU FILTER @NOTRU AND Price GE 15 AS GE15"
)
],
indirect=True,
)
def test_policy_with_select_and_complex_filter_results_with_50_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with SELECT and Complex FILTER results with 50% of available nodes.
"""
placement_params = {"Price": 15, "country_code": "RU"}
file_path = generate_file(simple_object_size.value)
expected_copies = 2
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
with reporter.step(f"Check two nodes are selected not with country code '{placement_params['country_code']}'"):
for node in resulting_copies:
node_address = self.cluster.node(node).get_interface(Interfaces.MGMT)
assert (
not netmap[node_address]["country_code"] == placement_params["country_code"]
or not netmap[node_address]["country_code"] == placement_params["country_code"]
and int(netmap[node_address]["Price"]) >= placement_params["Price"]
), f"The node is selected with the wrong price or country code. Got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with Multi SELECTs and FILTERs results with 50% of available nodes")
@pytest.mark.parametrize(
"container_request",
[
PUBLIC_WITH_POLICY(
"REP 2 IN FH REP 1 IN SH CBF 2 SELECT 2 FROM LE55 AS FH SELECT 2 FROM GE15 AS SH FILTER 'UN-LOCODE' EQ 'RU LED' OR 'UN-LOCODE' EQ 'RU MOW' AS RU FILTER NOT (@RU) AS NOTRU FILTER @NOTRU AND Price GE 15 AS GE15 FILTER @RU AND Price LE 55 AS LE55"
)
],
indirect=True,
)
def test_policy_with_multi_selects_and_filters_results_with_50_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with Multi SELECTs and FILTERs results with 50% of available nodes.
"""
placement_params = {"un_locode": ["RU LED", "RU MOW"], "Price": [15, 55]}
file_path = generate_file(simple_object_size.value)
expected_copies = 3
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
with reporter.step(f"Check all nodes are selected"):
for node in resulting_copies:
node_address = self.cluster.node(node).get_interface(Interfaces.MGMT)
assert (
netmap[node_address]["un_locode"] in placement_params["un_locode"]
or not netmap[node_address]["un_locode"] == placement_params["un_locode"][1]
or (
not netmap[node_address]["un_locode"] == placement_params["un_locode"][1]
and int(netmap[node_address]["Price"]) >= placement_params["Price"][0]
)
or (
netmap[node_address]["un_locode"] == placement_params["un_locode"][1]
and int(netmap[node_address]["Price"]) <= placement_params["Price"][1]
)
), f"The node is selected with the wrong price or un_locode. Expected {placement_params} and got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with SELECT and FILTER results with 75% of available nodes")
@pytest.mark.parametrize(
"container_request",
[PUBLIC_WITH_POLICY("REP 2 IN NODES75 SELECT 2 FROM LT65 AS NODES75 FILTER Price LT 65 AS LT65")],
indirect=True,
)
def test_policy_with_select_and_filter_results_with_75_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with SELECT and FILTER results with 75% of available nodes.
"""
placement_params = {"Price": 65}
file_path = generate_file(simple_object_size.value)
expected_copies = 2
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
with reporter.step(f"Check two nodes are selected with price < {placement_params['Price']}"):
for node in resulting_copies:
node_address = self.cluster.node(node).get_interface(Interfaces.MGMT)
assert (
int(netmap[node_address]["Price"]) < placement_params["Price"]
), f"The node is selected with the wrong price. Got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with SELECT and Complex FILTER results with 75% of available nodes")
@pytest.mark.parametrize(
"container_request",
[
PUBLIC_WITH_POLICY(
"REP 2 IN NODES75 SELECT 2 FROM LT65 AS NODES75 FILTER Continent NE America AS NOAM FILTER @NOAM AND Price LT 65 AS LT65"
)
],
indirect=True,
)
def test_policy_with_select_and_complex_filter_results_with_75_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with SELECT and Complex FILTER results with 75% of available nodes.
"""
placement_params = {"Price": 65, "continent": "America"}
file_path = generate_file(simple_object_size.value)
expected_copies = 2
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
with reporter.step(f"Check three nodes are selected not from {placement_params['continent']}"):
for node in resulting_copies:
node_address = self.cluster.node(node).get_interface(Interfaces.MGMT)
assert (
int(netmap[node_address]["Price"]) < placement_params["Price"]
and not netmap[node_address]["continent"] == placement_params["continent"]
) or (
not netmap[node_address]["continent"] == placement_params["continent"]
), f"The node is selected with the wrong price or continent. Got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with Multi SELECTs and FILTERs results with 75% of available nodes")
@pytest.mark.parametrize(
"container_request",
[
PUBLIC_WITH_POLICY(
"REP 3 IN EXPNSV REP 3 IN CHEAP SELECT 3 FROM GT10 AS EXPNSV SELECT 3 FROM LT65 AS CHEAP FILTER NOT (Continent EQ America) AS NOAM FILTER @NOAM AND Price LT 65 AS LT65 FILTER @NOAM AND Price GT 10 AS GT10"
)
],
indirect=True,
)
def test_policy_with_multi_selects_and_filters_results_with_75_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with Multi SELECTs and FILTERs results with 75% of available nodes.
"""
placement_params = {"Price": [65, 10], "continent": "America"}
file_path = generate_file(simple_object_size.value)
expected_copies = 4
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
with reporter.step(f"Check all nodes are selected"):
for node in resulting_copies:
node_address = self.cluster.node(node).get_interface(Interfaces.MGMT)
assert (
(
int(netmap[node_address]["Price"]) > placement_params["Price"][1]
and not netmap[node_address]["continent"] == placement_params["continent"]
)
or (
int(netmap[node_address]["Price"]) < placement_params["Price"][0]
and not netmap[node_address]["continent"] == placement_params["continent"]
)
or not (netmap[node_address]["continent"] == placement_params["continent"])
), f"The node is selected with the wrong price or continent. Got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with SELECT and FILTER results with 100% of available nodes")
@pytest.mark.parametrize(
"container_request", [PUBLIC_WITH_POLICY("REP 1 IN All SELECT 4 FROM AllNodes AS All FILTER Price GE 0 AS AllNodes")], indirect=True
)
def test_policy_with_select_and_filter_results_with_100_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with SELECT and FILTER results with 100% of available nodes.
"""
placement_params = {"Price": 0}
file_path = generate_file(simple_object_size.value)
expected_copies = 1
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
node_address = self.cluster.node(resulting_copies[0]).get_interface(Interfaces.MGMT)
with reporter.step(f"Check the node is selected with price >= {placement_params['Price']}"):
assert (
int(netmap[node_address]["Price"]) >= placement_params["Price"]
), f"The node is selected with the wrong price. Got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)
@allure.title("Policy with Multi SELECTs and FILTERs results with 100% of available nodes")
@pytest.mark.parametrize(
"container_request",
[
PUBLIC_WITH_POLICY(
"REP 4 IN AllOne REP 4 IN AllTwo CBF 4 SELECT 2 FROM GEZero AS AllOne SELECT 2 FROM AllCountries AS AllTwo FILTER Country EQ Russia OR Country EQ Sweden OR Country EQ Finland AS AllCountries FILTER Price GE 0 AS GEZero"
)
],
indirect=True,
)
def test_policy_with_multi_selects_and_filters_results_with_100_of_available_nodes(
self,
default_wallet: WalletInfo,
rpc_endpoint: str,
simple_object_size: ObjectSize,
container: str,
container_request: ContainerRequest,
):
"""
This test checks object's copies based on container's placement policy with Multi SELECTs and FILTERs results with 100% of available nodes.
"""
placement_params = {"country": ["Russia", "Sweden", "Finland"], "Price": 0}
file_path = generate_file(simple_object_size.value)
expected_copies = 4
with reporter.step(f"Check container policy"):
validate_object_policy(default_wallet, self.shell, container_request.policy, container, rpc_endpoint)
with reporter.step(f"Put object in container"):
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
with reporter.step(f"Check object expected copies"):
resulting_copies = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(resulting_copies) == expected_copies, f"Expected {expected_copies} copies, got {len(resulting_copies)}"
with reporter.step(f"Check the object appearance"):
netmap = parse_netmap_output(get_netmap_snapshot(node=resulting_copies[0], shell=self.shell))
netmap = get_netmap_param(netmap)
with reporter.step(f"Check all node are selected"):
for node in resulting_copies:
node_address = self.cluster.node(node).get_interface(Interfaces.MGMT)
assert (netmap[node_address]["country"] in placement_params["country"]) or (
int(netmap[node_address]["Price"]) >= placement_params["Price"]
), f"The node is selected from the wrong country or with wrong price. Got {netmap[node_address]}"
with reporter.step(f"Delete the object from the container"):
delete_object(default_wallet, container, oid, self.shell, rpc_endpoint)
with reporter.step(f"Delete the container"):
delete_container(default_wallet, container, self.shell, rpc_endpoint, await_mode=False)

View file

@ -1,30 +0,0 @@
import os
from datetime import datetime
import allure
import pytest
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.controllers import ShardsWatcher
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.utils.file_utils import TestFile, generate_file
@pytest.fixture()
@allure.title("Provide Shards watcher")
def shards_watcher(node_under_test: ClusterNode) -> ShardsWatcher:
watcher = ShardsWatcher(node_under_test)
return watcher
@pytest.fixture()
@allure.title("Test start time")
def test_start_time() -> datetime:
start_time = datetime.utcnow()
return start_time
@pytest.fixture()
@allure.title("Generate simple size file")
def simple_file(simple_object_size: ObjectSize) -> TestFile:
path_file = generate_file(size=simple_object_size.value)
return path_file

View file

@ -0,0 +1,101 @@
import logging
from random import choices
from time import sleep
import allure
import pytest
from failover_utils import wait_all_storage_node_returned, wait_object_replication_on_nodes
from file_helper import generate_file, get_file_hash
from iptables_helper import IpTablesHelper
from neofs_testlib.hosting import Hosting
from python_keywords.container import create_container
from python_keywords.neofs_verbs import get_object, put_object
from wellknown_acl import PUBLIC_ACL
logger = logging.getLogger("NeoLogger")
STORAGE_NODE_COMMUNICATION_PORT = "8080"
STORAGE_NODE_COMMUNICATION_PORT_TLS = "8082"
PORTS_TO_BLOCK = [STORAGE_NODE_COMMUNICATION_PORT, STORAGE_NODE_COMMUNICATION_PORT_TLS]
blocked_hosts = []
@pytest.fixture(autouse=True)
@allure.step("Restore network")
def restore_network(hosting: Hosting):
yield
not_empty = len(blocked_hosts) != 0
for host_address in list(blocked_hosts):
with allure.step(f"Restore network at host {host_address}"):
host = hosting.get_host_by_address(host_address)
IpTablesHelper.restore_input_traffic_to_port(host.get_shell(), PORTS_TO_BLOCK)
blocked_hosts.remove(host)
if not_empty:
wait_all_storage_node_returned(hosting)
@allure.title("Block Storage node traffic")
@pytest.mark.failover
@pytest.mark.failover_net
def test_block_storage_node_traffic(
prepare_wallet_and_deposit, client_shell, require_multiple_hosts, hosting: Hosting
):
"""
Block storage nodes traffic using iptables and wait for replication for objects.
"""
wallet = prepare_wallet_and_deposit
placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
wakeup_node_timeout = 10 # timeout to let nodes detect that traffic has blocked
nodes_to_block_count = 2
source_file_path = generate_file()
cid = create_container(wallet, shell=client_shell, rule=placement_rule, basic_acl=PUBLIC_ACL)
oid = put_object(wallet, source_file_path, cid, shell=client_shell)
# TODO: we need to refactor wait_object_replication_on_nodes so that it returns
# storage node names rather than endpoints
node_endpoints = wait_object_replication_on_nodes(wallet, cid, oid, 2, shell=client_shell)
logger.info(f"Nodes are {node_endpoints}")
node_endpoints_to_block = node_endpoints
if nodes_to_block_count > len(node_endpoints):
# TODO: the intent of this logic is not clear, need to revisit
node_endpoints_to_block = choices(node_endpoints, k=2)
excluded_nodes = []
for node_endpoint in node_endpoints_to_block:
host_address = node_endpoint.split(":")[0]
host = hosting.get_host_by_address(host_address)
with allure.step(f"Block incoming traffic at host {host_address} on port {PORTS_TO_BLOCK}"):
blocked_hosts.append(host_address)
excluded_nodes.append(node_endpoint)
IpTablesHelper.drop_input_traffic_to_port(host.get_shell(), PORTS_TO_BLOCK)
sleep(wakeup_node_timeout)
with allure.step(f"Check object is not stored on node {node_endpoint}"):
new_nodes = wait_object_replication_on_nodes(
wallet, cid, oid, 2, shell=client_shell, excluded_nodes=excluded_nodes
)
assert node_endpoint not in new_nodes
with allure.step(f"Check object data is not corrupted"):
got_file_path = get_object(wallet, cid, oid, endpoint=new_nodes[0], shell=client_shell)
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
for node_endpoint in node_endpoints_to_block:
host_address = node_endpoint.split(":")[0]
host = hosting.get_host_by_address(host_address)
with allure.step(
f"Unblock incoming traffic at host {host_address} on port {PORTS_TO_BLOCK}"
):
IpTablesHelper.restore_input_traffic_to_port(host.get_shell(), PORTS_TO_BLOCK)
blocked_hosts.remove(host_address)
sleep(wakeup_node_timeout)
with allure.step(f"Check object data is not corrupted"):
new_nodes = wait_object_replication_on_nodes(wallet, cid, oid, 2, shell=client_shell)
got_file_path = get_object(wallet, cid, oid, shell=client_shell, endpoint=new_nodes[0])
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)

View file

@ -1,278 +0,0 @@
import itertools
import logging
import os
import random
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.steps.cli.container import StorageContainer, StorageContainerInfo, create_container
from frostfs_testlib.steps.cli.object import get_object, get_object_nodes, put_object
from frostfs_testlib.steps.node_management import check_node_in_map, check_node_not_in_map
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode
from frostfs_testlib.storage.controllers import ClusterStateController
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.parallel import parallel, parallel_workers_limit
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils.file_utils import get_file_hash
from pytest import FixtureRequest
from ...helpers.container_creation import create_container_with_ape
from ...helpers.container_request import APE_EVERYONE_ALLOW_ALL, ContainerRequest
logger = logging.getLogger("NeoLogger")
@pytest.mark.failover
@pytest.mark.failover_server
class TestFailoverServer(ClusterTestBase):
@wait_for_success(max_wait_time=120, interval=1)
def wait_node_not_in_map(self, *args, **kwargs):
check_node_not_in_map(*args, **kwargs)
@wait_for_success(max_wait_time=120, interval=1)
def wait_node_in_map(self, *args, **kwargs):
check_node_in_map(*args, **kwargs)
@allure.title("[Test] Create containers")
@pytest.fixture
def containers(
self,
request: FixtureRequest,
default_wallet: WalletInfo,
frostfs_cli: FrostfsCli,
cluster: Cluster,
) -> list[StorageContainer]:
placement_rule = "REP 2 CBF 2 SELECT 2 FROM *"
container_request = ContainerRequest(placement_rule, APE_EVERYONE_ALLOW_ALL)
containers_count = request.param
results = parallel(
[create_container_with_ape for _ in range(containers_count)],
container_request=container_request,
frostfs_cli=frostfs_cli,
wallet=default_wallet,
shell=self.shell,
cluster=cluster,
endpoint=self.cluster.default_rpc_endpoint,
)
containers = [
StorageContainer(StorageContainerInfo(result.result(), default_wallet), self.shell, self.cluster) for result in results
]
return containers
@allure.title("[Test] Create container")
@pytest.fixture()
def container(self, default_wallet: WalletInfo, frostfs_cli: FrostfsCli) -> StorageContainer:
select = len(self.cluster.cluster_nodes)
placement_rule = f"REP {select - 1} CBF 1 SELECT {select} FROM *"
cid = create_container_with_ape(
ContainerRequest(placement_rule, APE_EVERYONE_ALLOW_ALL),
frostfs_cli,
default_wallet,
self.shell,
self.cluster,
self.cluster.default_rpc_endpoint,
)
storage_cont_info = StorageContainerInfo(cid, default_wallet)
return StorageContainer(storage_cont_info, self.shell, self.cluster)
@allure.title("[Class] Create objects")
@pytest.fixture(scope="class")
def storage_objects(
self,
request: FixtureRequest,
containers: list[StorageContainer],
simple_object_size: ObjectSize,
complex_object_size: ObjectSize,
) -> list[StorageObjectInfo]:
object_count = request.param
sizes_samples = [simple_object_size, complex_object_size]
samples_count = len(sizes_samples)
assert object_count >= samples_count, f"Object count is too low, must be >= {samples_count}"
sizes_weights = [2, 1]
sizes = sizes_samples + random.choices(sizes_samples, weights=sizes_weights, k=object_count - samples_count)
total_objects = len(containers) * object_count
with reporter.step(f"Upload {total_objects} in total to containers"):
results = parallel(
[self._generate_files_and_remove_physical_copy for _ in range(total_objects)],
container=itertools.cycle(containers),
size=itertools.cycle(sizes),
)
return [result.result() for result in results]
def _generate_files_and_remove_physical_copy(self, container: StorageContainer, size: ObjectSize) -> StorageObjectInfo:
storage_object = container.generate_object(size.value)
# Deliberately remove physical copy of the file for this test since it can generate multibytes of test objects
os.remove(storage_object.file_path)
return storage_object
@allure.title("[Test] Create objects and get nodes with object")
@pytest.fixture()
def object_and_nodes(self, simple_object_size: ObjectSize, container: StorageContainer) -> tuple[StorageObjectInfo, list[ClusterNode]]:
object_info = container.generate_object(simple_object_size.value)
object_nodes = get_object_nodes(self.cluster, object_info.cid, object_info.oid, self.cluster.cluster_nodes[0])
return object_info, object_nodes
def _verify_object(self, storage_object: StorageObjectInfo, node: StorageNode):
with reporter.step(f"Verify object {storage_object.oid} from node {node}"):
file_path = get_object(
storage_object.wallet,
storage_object.cid,
storage_object.oid,
endpoint=node.get_rpc_endpoint(),
shell=self.shell,
timeout="60s",
)
assert storage_object.file_hash == get_file_hash(file_path)
@reporter.step("Verify objects")
def verify_objects(self, nodes: list[StorageNode], storage_objects: list[StorageObjectInfo]) -> None:
workers_count = os.environ.get("PARALLEL_CUSTOM_LIMIT", 50)
with parallel_workers_limit(int(workers_count)):
parallel(self._verify_object, storage_objects * len(nodes), node=itertools.cycle(nodes))
@allure.title("Full shutdown node")
@pytest.mark.parametrize("containers, storage_objects", [(4, 5)], indirect=True)
def test_complete_node_shutdown(
self,
storage_objects: list[StorageObjectInfo],
node_under_test: ClusterNode,
cluster_state_controller: ClusterStateController,
):
with reporter.step(f"Remove one node from the list of nodes"):
alive_nodes = list(set(self.cluster.cluster_nodes) - {node_under_test})
storage_nodes = [cluster.storage_node for cluster in alive_nodes]
with reporter.step("Tick 2 epochs and wait for 2 blocks"):
self.tick_epochs(2, storage_nodes[0], wait_block=2)
with reporter.step(f"Stop node"):
cluster_state_controller.stop_node_host(node_under_test, "hard")
with reporter.step("Verify that there are no corrupted objects"):
self.verify_objects(storage_nodes, storage_objects)
with reporter.step(f"Check node still in map"):
self.wait_node_in_map(node_under_test.storage_node, self.shell, alive_node=storage_nodes[0])
count_tick_epoch = int(alive_nodes[0].ir_node.get_netmap_cleaner_threshold()) + 4
with reporter.step(f"Tick {count_tick_epoch} epochs and wait for 2 blocks"):
self.tick_epochs(count_tick_epoch, storage_nodes[0], wait_block=2)
with reporter.step(f"Check node in not map after {count_tick_epoch} epochs"):
self.wait_node_not_in_map(node_under_test.storage_node, self.shell, alive_node=storage_nodes[0])
with reporter.step(f"Verify that there are no corrupted objects after {count_tick_epoch} epochs"):
self.verify_objects(storage_nodes, storage_objects)
@allure.title("Temporarily disable a node")
@pytest.mark.parametrize("containers, storage_objects", [(5, 10)], indirect=True)
def test_temporarily_disable_a_node(
self,
storage_objects: list[StorageObjectInfo],
node_under_test: ClusterNode,
cluster_state_controller: ClusterStateController,
):
with reporter.step(f"Remove one node from the list"):
storage_nodes = list(set(self.cluster.storage_nodes) - {node_under_test.storage_node})
with reporter.step("Tick 2 epochs and wait for 2 blocks"):
self.tick_epochs(2, storage_nodes[0], wait_block=2)
with reporter.step(f"Stop node"):
cluster_state_controller.stop_node_host(node_under_test, "hard")
with reporter.step("Verify that there are no corrupted objects"):
self.verify_objects(storage_nodes, storage_objects)
with reporter.step(f"Check node still in map"):
self.wait_node_in_map(node_under_test.storage_node, self.shell, alive_node=storage_nodes[0])
with reporter.step(f"Start node"):
cluster_state_controller.start_node_host(node_under_test)
with reporter.step("Verify that there are no corrupted objects"):
self.verify_objects(storage_nodes, storage_objects)
@allure.title("Not enough nodes in the container with policy - 'REP 3 CBF 1 SELECT 4 FROM *'")
def test_not_enough_nodes_in_container_rep_3(
self,
object_and_nodes: tuple[StorageObjectInfo, list[ClusterNode]],
default_wallet: WalletInfo,
cluster_state_controller: ClusterStateController,
simple_file: str,
):
object_info, object_nodes = object_and_nodes
endpoint_without_object = list(set(self.cluster.cluster_nodes) - set(object_nodes))[0].storage_node.get_rpc_endpoint()
endpoint_with_object = object_nodes[0].storage_node.get_rpc_endpoint()
with reporter.step("Stop all nodes with object except first one"):
parallel(cluster_state_controller.stop_node_host, object_nodes[1:], mode="hard")
with reporter.step(f"Get object from node without object"):
get_object(default_wallet, object_info.cid, object_info.oid, self.shell, endpoint_without_object)
with reporter.step(f"Get object from node with object"):
get_object(default_wallet, object_info.cid, object_info.oid, self.shell, endpoint_with_object)
with reporter.step(f"[Negative] Put operation to node with object"):
with pytest.raises(RuntimeError):
put_object(default_wallet, simple_file, object_info.cid, self.shell, endpoint_with_object)
@allure.title("Not enough nodes in the container with policy - 'REP 2 CBF 2 SELECT 4 FROM *'")
def test_not_enough_nodes_in_container_rep_2(
self,
default_wallet: WalletInfo,
cluster_state_controller: ClusterStateController,
frostfs_cli: FrostfsCli,
simple_file: str,
):
with reporter.step("Create container with full network map"):
node_count = len(self.cluster.cluster_nodes)
placement_rule = f"REP {node_count - 2} IN X CBF 2 SELECT {node_count} FROM * AS X"
cid = create_container_with_ape(
ContainerRequest(placement_rule, APE_EVERYONE_ALLOW_ALL),
frostfs_cli,
default_wallet,
self.shell,
self.cluster,
self.cluster.default_rpc_endpoint,
)
with reporter.step("Put object"):
oid = put_object(default_wallet, simple_file, cid, self.shell, self.cluster.default_rpc_endpoint)
with reporter.step("Search nodes with object"):
object_nodes = get_object_nodes(self.cluster, cid, oid, self.cluster.cluster_nodes[0])
with reporter.step("Choose node to stop"):
node_under_test = random.choice(object_nodes)
alive_node_with_object = random.choice(list(set(object_nodes) - {node_under_test}))
alive_endpoint_with_object = alive_node_with_object.storage_node.get_rpc_endpoint()
with reporter.step("Stop random node with object"):
cluster_state_controller.stop_node_host(node_under_test, "hard")
with reporter.step("Put object to alive node with object"):
oid_2 = put_object(default_wallet, simple_file, cid, self.shell, alive_endpoint_with_object)
with reporter.step("Get object from alive node with object"):
get_object(default_wallet, cid, oid_2, self.shell, alive_endpoint_with_object)
with reporter.step("Create container on alive node"):
create_container(default_wallet, self.shell, alive_endpoint_with_object, placement_rule)

View file

@ -1,681 +1,139 @@
import logging
import random
from datetime import datetime
from time import sleep
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.clients.s3 import BucketContainerResolver, S3ClientWrapper, VersioningStatus
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.steps import s3_helper
from frostfs_testlib.steps.cli.container import StorageContainer, StorageContainerInfo, create_container
from frostfs_testlib.steps.cli.object import get_object, put_object_to_random_node
from frostfs_testlib.steps.node_management import (
check_node_in_map,
check_node_not_in_map,
exclude_node_from_network_map,
include_node_to_network_map,
remove_nodes_from_map_morph,
wait_for_node_to_be_ready,
)
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode
from frostfs_testlib.storage.controllers import ClusterStateController, ShardsWatcher
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils import datetime_utils
from frostfs_testlib.utils.failover_utils import wait_object_replication
from frostfs_testlib.utils.file_keeper import FileKeeper
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
from ...helpers.container_request import REP_2_2_2_PUBLIC, requires_container
from ...resources.common import S3_POLICY_FILE_LOCATION
from failover_utils import wait_all_storage_node_returned, wait_object_replication_on_nodes
from file_helper import generate_file, get_file_hash
from neofs_testlib.hosting import Host, Hosting
from neofs_testlib.shell import CommandOptions
from python_keywords.container import create_container
from python_keywords.neofs_verbs import get_object, put_object
from wellknown_acl import PUBLIC_ACL
logger = logging.getLogger("NeoLogger")
stopped_nodes: list[StorageNode] = []
stopped_hosts = []
@pytest.fixture(scope="function")
@allure.title("Provide File Keeper")
def file_keeper():
keeper = FileKeeper()
yield keeper
keeper.restore_files()
@pytest.fixture(autouse=True)
@allure.step("Return all stopped hosts")
def after_run_return_all_stopped_hosts(hosting: Hosting):
yield
return_stopped_hosts(hosting)
def panic_reboot_host(host: Host) -> None:
shell = host.get_shell()
shell.exec('sudo sh -c "echo 1 > /proc/sys/kernel/sysrq"')
options = CommandOptions(close_stdin=True, timeout=1, check=False)
shell.exec('sudo sh -c "echo b > /proc/sysrq-trigger"', options)
def return_stopped_hosts(hosting: Hosting) -> None:
for host_address in list(stopped_hosts):
with allure.step(f"Start host {host_address}"):
host = hosting.get_host_by_address(host_address)
host.start_host()
stopped_hosts.remove(host_address)
wait_all_storage_node_returned(hosting)
@allure.title("Lose and return storage node's host")
@pytest.mark.parametrize("hard_reboot", [True, False])
@pytest.mark.failover
@pytest.mark.failover_storage
class TestFailoverStorage(ClusterTestBase):
@allure.title("Shutdown and start node (stop_mode={stop_mode})")
@pytest.mark.parametrize("stop_mode", ["hard", "soft"])
@pytest.mark.failover_reboot
@requires_container(REP_2_2_2_PUBLIC)
def test_lose_storage_node_host(
self,
default_wallet,
stop_mode: str,
container: str,
require_multiple_hosts,
simple_object_size: ObjectSize,
cluster: Cluster,
cluster_state_controller: ClusterStateController,
):
wallet = default_wallet
source_file_path = generate_file(simple_object_size.value)
stopped_hosts_nodes = []
def test_lose_storage_node_host(
prepare_wallet_and_deposit,
client_shell,
hosting: Hosting,
hard_reboot: bool,
require_multiple_hosts,
):
wallet = prepare_wallet_and_deposit
placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
source_file_path = generate_file()
cid = create_container(wallet, shell=client_shell, rule=placement_rule, basic_acl=PUBLIC_ACL)
oid = put_object(wallet, source_file_path, cid, shell=client_shell)
node_endpoints = wait_object_replication_on_nodes(wallet, cid, oid, 2, shell=client_shell)
with reporter.step(f"Put object"):
oid = put_object_to_random_node(wallet, source_file_path, container, shell=self.shell, cluster=self.cluster)
for node_endpoint in node_endpoints:
host_address = node_endpoint.split(":")[0]
host = hosting.get_host_by_address(host_address)
stopped_hosts.append(host.config.address)
with reporter.step(f"Wait for replication and get nodes with object"):
nodes_with_object = wait_object_replication(container, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes)
with allure.step(f"Stop host {host_address}"):
host.stop_host("hard" if hard_reboot else "soft")
with reporter.step(f"Stop 2 nodes with object and wait replication one by one"):
for storage_node in random.sample(nodes_with_object, 2):
stopped_hosts_nodes.append(storage_node)
new_nodes = wait_object_replication_on_nodes(
wallet, cid, oid, 2, shell=client_shell, excluded_nodes=[node_endpoint]
)
assert all(old_node not in new_nodes for old_node in node_endpoints)
cluster_node = cluster.node(storage_node)
cluster_state_controller.stop_node_host(cluster_node, stop_mode)
with allure.step("Check object data is not corrupted"):
got_file_path = get_object(wallet, cid, oid, endpoint=new_nodes[0])
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
replicated_nodes = wait_object_replication(
container,
oid,
2,
shell=self.shell,
nodes=list(set(self.cluster.storage_nodes) - {*stopped_hosts_nodes}),
with allure.step(f"Return all hosts"):
return_stopped_hosts(hosting)
with allure.step("Check object data is not corrupted"):
new_nodes = wait_object_replication_on_nodes(wallet, cid, oid, 2, shell=client_shell)
got_file_path = get_object(wallet, cid, oid, shell=client_shell, endpoint=new_nodes[0])
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
@allure.title("Panic storage node's host")
@pytest.mark.parametrize("sequence", [True, False])
@pytest.mark.failover_panic
@pytest.mark.failover
def test_panic_storage_node_host(
prepare_wallet_and_deposit,
client_shell,
hosting: Hosting,
require_multiple_hosts,
sequence: bool,
):
wallet = prepare_wallet_and_deposit
placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
source_file_path = generate_file()
cid = create_container(wallet, shell=client_shell, rule=placement_rule, basic_acl=PUBLIC_ACL)
oid = put_object(wallet, source_file_path, cid, shell=client_shell)
node_endpoints = wait_object_replication_on_nodes(wallet, cid, oid, 2, shell=client_shell)
allure.attach(
"\n".join(node_endpoints),
"Current nodes with object",
allure.attachment_type.TEXT,
)
new_nodes: list[str] = []
for node_endpoint in node_endpoints:
host_address = node_endpoint.split(":")[0]
with allure.step(f"Hard reboot host {node_endpoint} via magic SysRq option"):
host = hosting.get_host_by_address(host_address)
panic_reboot_host(host)
if sequence:
try:
new_nodes = wait_object_replication_on_nodes(
wallet, cid, oid, 2, shell=client_shell, excluded_nodes=[node_endpoint]
)
except AssertionError:
new_nodes = wait_object_replication_on_nodes(
wallet, cid, oid, 2, shell=client_shell
)
allure.attach(
"\n".join(new_nodes),
f"Nodes with object after {node_endpoint} fail",
allure.attachment_type.TEXT,
)
with reporter.step("Check object data is not corrupted"):
got_file_path = get_object(wallet, container, oid, endpoint=replicated_nodes[0].get_rpc_endpoint(), shell=self.shell)
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
with reporter.step("Return all hosts"):
cluster_state_controller.start_stopped_hosts()
with reporter.step("Check object data is not corrupted"):
replicated_nodes = wait_object_replication(container, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes)
got_file_path = get_object(wallet, container, oid, shell=self.shell, endpoint=replicated_nodes[0].get_rpc_endpoint())
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
@pytest.mark.parametrize("s3_policy", [S3_POLICY_FILE_LOCATION], indirect=True)
@allure.title("Do not ignore unhealthy tree endpoints (s3_client={s3_client})")
def test_unhealthy_tree(
self,
s3_client: S3ClientWrapper,
default_wallet: WalletInfo,
simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController,
bucket_container_resolver: BucketContainerResolver,
):
default_node = self.cluster.cluster_nodes[0]
with reporter.step("Turn S3 GW off on default node"):
cluster_state_controller.stop_service_of_type(default_node, S3Gate)
with reporter.step("Turn off storage on default node"):
cluster_state_controller.stop_service_of_type(default_node, StorageNode)
with reporter.step("Turn on S3 GW on default node"):
cluster_state_controller.start_service_of_type(default_node, S3Gate)
with reporter.step("Turn on storage on default node"):
cluster_state_controller.start_service_of_type(default_node, StorageNode)
with reporter.step("Create bucket with REP 1 SELECT 1 policy"):
bucket = s3_client.create_bucket(
location_constraint="rep-1",
)
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
with reporter.step("Put object into bucket"):
put_object = s3_client.put_object(bucket, file_path)
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
node_bucket = s3_helper.search_nodes_with_bucket(
cluster=self.cluster,
bucket_name=bucket,
wallet=default_wallet,
shell=self.shell,
endpoint=self.cluster.storage_nodes[0].get_rpc_endpoint(),
bucket_container_resolver=bucket_container_resolver,
)[0]
with reporter.step("Turn off all storage nodes except bucket node"):
for node in [node_to_stop for node_to_stop in self.cluster.cluster_nodes if node_to_stop != node_bucket]:
with reporter.step(f"Stop storage service on node: {node}"):
cluster_state_controller.stop_service_of_type(node, StorageNode)
with reporter.step(f"Change s3 endpoint to bucket node"):
s3_client.set_endpoint(node_bucket.s3_gate.get_endpoint())
with reporter.step("Check that object is available"):
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
with reporter.step("Start storage nodes"):
cluster_state_controller.start_all_stopped_services()
@pytest.mark.failover
@pytest.mark.failover_empty_map
class TestEmptyMap(ClusterTestBase):
"""
A set of tests for makes map empty and verify that we can read objects after that
"""
@reporter.step("Teardown after EmptyMap offline test")
@pytest.fixture()
def empty_map_offline_teardown(self):
yield
with reporter.step("Return all storage nodes to network map"):
for node in stopped_nodes:
include_node_to_network_map(node, node, shell=self.shell, cluster=self.cluster)
stopped_nodes.remove(node)
@pytest.mark.failover_empty_map_offlne
@allure.title("Empty network map via offline all storage nodes (s3_client={s3_client})")
def test_offline_all_storage_nodes(
self,
s3_client: S3ClientWrapper,
bucket: str,
simple_object_size: ObjectSize,
empty_map_offline_teardown,
):
"""
The test makes network map empty (set offline status on all storage nodes) then returns all nodes to map and checks that object can read through s3.
Steps:
1. Check that bucket is empty
2: PUT object into bucket
3: Check that object exists in bucket
4: Exclude all storage nodes from network map (set status OFFLINE)
5: Return all storage nodes to network map
6: Check that we can read object from #2
Args:
bucket: bucket which contains tested object
simple_object_size: size of object
"""
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
bucket_objects = [file_name]
with reporter.step("Put object into bucket"):
s3_client.put_object(bucket, file_path)
with reporter.step("Check that object exists in bucket"):
s3_helper.check_objects_in_bucket(s3_client, bucket, bucket_objects)
storage_nodes = self.cluster.storage_nodes
with reporter.step("Exclude all storage nodes from network map"):
for node in storage_nodes:
stopped_nodes.append(node)
exclude_node_from_network_map(node, node, shell=self.shell, cluster=self.cluster)
with reporter.step("Return all storage nodes to network map"):
for node in storage_nodes:
include_node_to_network_map(node, node, shell=self.shell, cluster=self.cluster)
stopped_nodes.remove(node)
with reporter.step("Check that we can read object"):
s3_helper.check_objects_in_bucket(s3_client, bucket, bucket_objects)
@reporter.step("Teardown after EmptyMap stop service test")
@pytest.fixture()
def empty_map_stop_service_teardown(self, cluster_state_controller: ClusterStateController):
yield
with reporter.step("Return all storage nodes to network map"):
cluster_state_controller.start_all_stopped_services()
for node in stopped_nodes:
check_node_in_map(node, shell=self.shell, alive_node=node)
@pytest.mark.failover_empty_map_stop_service
@allure.title("Empty network map via stop all storage services (s3_client={s3_client})")
def test_stop_all_storage_nodes(
self,
s3_client: S3ClientWrapper,
bucket: str,
simple_object_size: ObjectSize,
empty_map_stop_service_teardown,
cluster_state_controller: ClusterStateController,
):
"""
The test makes network map empty (stop storage service on all nodes
then use 'frostfs-adm morph delete-nodes' to delete nodes from map)
then start all services and checks that object can read through s3.
Steps:
1. Check that bucket is empty
2: PUT object into bucket
3: Check that object exists in bucket
4: Exclude all storage nodes from network map (stop storage service
and manual exclude from map)
5: Return all storage nodes to network map
6: Check that we can read object from #2
Args:
bucket: bucket which contains tested object
simple_object_size: size of object
"""
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
bucket_objects = [file_name]
with reporter.step("Put object into bucket"):
s3_client.put_object(bucket, file_path)
with reporter.step("Check that object exists in bucket"):
s3_helper.check_objects_in_bucket(s3_client, bucket, bucket_objects)
with reporter.step("Stop all storage nodes"):
cluster_state_controller.stop_services_of_type(StorageNode)
with reporter.step("Remove all nodes from network map"):
remove_nodes_from_map_morph(shell=self.shell, cluster=self.cluster, remove_nodes=self.cluster.services(StorageNode))
with reporter.step("Return all storage nodes to network map"):
self.return_nodes_after_stop_with_check_empty_map(cluster_state_controller)
with reporter.step("Check that object exists in bucket"):
s3_helper.check_objects_in_bucket(s3_client, bucket, bucket_objects)
@reporter.step("Return all nodes to cluster with check empty map first")
def return_nodes_after_stop_with_check_empty_map(self, cluster_state_controller: ClusterStateController) -> None:
first_node = self.cluster.cluster_nodes[0].service(StorageNode)
with reporter.step("Start first node and check network map"):
cluster_state_controller.start_service_of_type(self.cluster.cluster_nodes[0], StorageNode)
wait_for_node_to_be_ready(first_node)
for check_node in self.cluster.storage_nodes:
check_node_not_in_map(check_node, shell=self.shell, alive_node=first_node)
for node in self.cluster.cluster_nodes[1:]:
storage_node = node.service(StorageNode)
cluster_state_controller.start_service_of_type(node, StorageNode)
wait_for_node_to_be_ready(storage_node)
sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
self.tick_epochs(1)
check_node_in_map(storage_node, shell=self.shell, alive_node=first_node)
@allure.title("Object loss from fstree/blobovnicza (versioning=enabled, s3_client={s3_client})")
def test_s3_fstree_blobovnicza_loss_versioning_on(
self,
s3_client: S3ClientWrapper,
simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController,
bucket: str,
):
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
object_versions = []
with reporter.step("Put object into one bucket"):
put_object = s3_client.put_object(bucket, file_path)
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
object_versions.append(put_object)
with reporter.step("Stop all storage nodes"):
cluster_state_controller.stop_services_of_type(StorageNode)
with reporter.step("Delete blobovnicza and fstree from all nodes"):
for node in self.cluster.storage_nodes:
node.delete_blobovnicza()
node.delete_fstree()
with reporter.step("Start all storage nodes"):
cluster_state_controller.start_all_stopped_services()
# need to get Delete Marker first
with reporter.step("Delete the object from the bucket"):
delete_object = s3_client.delete_object(bucket, file_name)
object_versions.append(delete_object["VersionId"])
# and now delete all versions of object (including Delete Markers)
with reporter.step("Delete all versions of the object from the bucket"):
for version in object_versions:
delete_object = s3_client.delete_object(bucket, file_name, version_id=version)
with reporter.step("Delete bucket"):
s3_client.delete_bucket(bucket)
@allure.title("Object loss from fstree/blobovnicza (versioning=disabled, s3_client={s3_client})")
def test_s3_fstree_blobovnicza_loss_versioning_off(
self,
s3_client: S3ClientWrapper,
simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController,
bucket: str,
):
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
with reporter.step("Put object into one bucket"):
s3_client.put_object(bucket, file_path)
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
with reporter.step("Stop all storage nodes"):
cluster_state_controller.stop_services_of_type(StorageNode)
with reporter.step("Delete blobovnicza and fstree from all nodes"):
for node in self.cluster.storage_nodes:
node.delete_blobovnicza()
node.delete_fstree()
with reporter.step("Start all storage nodes"):
cluster_state_controller.start_all_stopped_services()
with reporter.step("Delete the object from the bucket"):
s3_client.delete_object(bucket, file_name)
with reporter.step("Delete bucket"):
s3_client.delete_bucket(bucket)
@pytest.mark.skip(reason="Need to increase cache lifetime")
@pytest.mark.parametrize(
# versioning should NOT be VersioningStatus.SUSPENDED, it needs to be undefined
"versioning_status",
[VersioningStatus.ENABLED, VersioningStatus.UNDEFINED],
)
@allure.title(
"After Pilorama.db loss on all nodes list objects should return nothing in second listing (versioning_status={versioning_status}, s3_client={s3_client})"
)
def test_s3_pilorama_loss(
self,
s3_client: S3ClientWrapper,
simple_object_size: ObjectSize,
versioning_status: VersioningStatus,
cluster_state_controller: ClusterStateController,
bucket: str,
):
s3_helper.set_bucket_versioning(s3_client, bucket, versioning_status)
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
with reporter.step("Put object into one bucket"):
s3_client.put_object(bucket, file_path)
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
with reporter.step("Stop all storage nodes"):
cluster_state_controller.stop_services_of_type(StorageNode)
with reporter.step("Delete pilorama.db from all nodes"):
for node in self.cluster.storage_nodes:
for shard in node.get_shards():
node.delete_file(shard.pilorama)
with reporter.step("Start all storage nodes"):
cluster_state_controller.start_all_stopped_services()
with reporter.step("Check list objects first time"):
objects_list = s3_client.list_objects(bucket)
assert objects_list, f"Expected not empty bucket"
with reporter.step("Check list objects second time"):
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with reporter.step("Delete bucket"):
s3_client.delete_bucket(bucket)
@pytest.mark.failover
@pytest.mark.failover_data_loss
class TestStorageDataLoss(ClusterTestBase):
@allure.title(
"After metabase loss on all nodes operations on objects and buckets should be still available via S3 (s3_client={s3_client})"
)
@pytest.mark.metabase_loss
def test_metabase_loss(
self,
s3_client: S3ClientWrapper,
simple_object_size: ObjectSize,
complex_object_size: ObjectSize,
cluster_state_controller: ClusterStateController,
file_keeper: FileKeeper,
bucket: str,
):
with reporter.step("Put objects into bucket"):
simple_object_path = generate_file(simple_object_size.value)
simple_object_key = s3_helper.object_key_from_file_path(simple_object_path)
complex_object_path = generate_file(complex_object_size.value)
complex_object_key = s3_helper.object_key_from_file_path(complex_object_path)
s3_client.put_object(bucket, simple_object_path)
s3_client.put_object(bucket, complex_object_path)
with reporter.step("Check objects are in bucket"):
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[simple_object_key, complex_object_key])
with reporter.step("Stop storage services on all nodes"):
cluster_state_controller.stop_services_of_type(StorageNode)
with reporter.step("Delete metabase from all nodes"):
for node in cluster_state_controller.cluster.storage_nodes:
node.delete_metabase()
with reporter.step("Enable resync_metabase option for storage services"):
for storage_node in cluster_state_controller.cluster.storage_nodes:
with reporter.step(f"Enable resync_metabase option for {storage_node}"):
config_file_path, config = storage_node.get_shards_config()
if not config["storage"]["shard"]["default"]["resync_metabase"]:
file_keeper.add(storage_node, config_file_path)
config["storage"]["shard"]["default"]["resync_metabase"] = True
storage_node.save_config(config, config_file_path)
with reporter.step("Start storage services on all nodes"):
cluster_state_controller.start_all_stopped_services()
with reporter.step("Wait for tree rebalance"):
# TODO: Use product metric when we have proper ones for this check
sleep(30)
with reporter.step("Delete objects from bucket"):
with reporter.step("Delete simple object from bucket"):
with expect_not_raises():
s3_client.delete_object(bucket, simple_object_key)
with reporter.step("Delete complex object from bucket"):
with expect_not_raises():
s3_client.delete_object(bucket, complex_object_key)
with reporter.step("Delete bucket"):
with expect_not_raises():
s3_client.delete_bucket(bucket)
@allure.title("Write cache loss on one node should not affect shards and should not produce errors in log")
@pytest.mark.write_cache_loss
def test_write_cache_loss_on_one_node(
self,
node_under_test: ClusterNode,
simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController,
shards_watcher: ShardsWatcher,
default_wallet: WalletInfo,
test_start_time: datetime,
):
exception_messages = []
with reporter.step(f"Create container on node {node_under_test}"):
locode = node_under_test.storage_node.get_un_locode()
placement_rule = f"""REP 1 IN X
CBF 1
SELECT 1 FROM C AS X
FILTER 'UN-LOCODE' EQ '{locode}' AS C"""
cid = create_container(
default_wallet,
self.shell,
node_under_test.storage_node.get_rpc_endpoint(),
rule=placement_rule,
)
container = StorageContainer(
StorageContainerInfo(cid, default_wallet),
self.shell,
cluster_state_controller.cluster,
)
with reporter.step(f"Put couple objects to container on node {node_under_test}"):
storage_objects: list[StorageObjectInfo] = []
for _ in range(5):
storage_object = container.generate_object(
simple_object_size.value,
endpoint=node_under_test.storage_node.get_rpc_endpoint(),
)
storage_objects.append(storage_object)
with reporter.step("Take shards snapshot"):
shards_watcher.take_shards_snapshot()
with reporter.step(f"Stop storage service on node {node_under_test}"):
cluster_state_controller.stop_service_of_type(node_under_test, StorageNode)
with reporter.step(f"Delete write cache from node {node_under_test}"):
node_under_test.storage_node.delete_write_cache()
with reporter.step(f"Start storage service on node {node_under_test}"):
cluster_state_controller.start_all_stopped_services()
with reporter.step("Objects should be available"):
for storage_object in storage_objects:
get_object(
storage_object.wallet,
container.get_id(),
storage_object.oid,
self.shell,
node_under_test.storage_node.get_rpc_endpoint(),
)
with reporter.step("No shards should have new errors"):
shards_watcher.take_shards_snapshot()
shards_with_errors = shards_watcher.get_shards_with_new_errors()
if shards_with_errors:
exception_messages.append(f"Shards have new errors: {shards_with_errors}")
with reporter.step("No shards should have degraded status"):
snapshot = shards_watcher.get_shards_snapshot()
for shard in snapshot:
status = snapshot[shard]["mode"]
if status != "read-write":
exception_messages.append(f"Shard {shard} changed status to {status}")
with reporter.step("No related errors should be in log"):
if node_under_test.host.is_message_in_logs(message_regex=r"\Wno such file or directory\W", since=test_start_time):
exception_messages.append(f"Node {node_under_test} have shard errors in logs")
with reporter.step("Pass test if no errors found"):
assert not exception_messages, "\n".join(exception_messages)
@allure.title("Loss of one node should trigger use of tree and storage service in another node (s3_client={s3_client})")
def test_s3_one_endpoint_loss(
self,
bucket,
s3_client: S3ClientWrapper,
simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController,
):
# TODO: need to check that s3 gate is connected to localhost (such metric will be supported in 1.3)
with reporter.step("Stop one node and wait for rebalance connection of s3 gate to storage service"):
current_node = self.cluster.cluster_nodes[0]
cluster_state_controller.stop_service_of_type(current_node, StorageNode)
# waiting for rebalance connection of s3 gate to storage service
sleep(60)
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
with reporter.step("Put object into one bucket"):
put_object = s3_client.put_object(bucket, file_path)
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
@pytest.mark.parametrize("s3_policy", [S3_POLICY_FILE_LOCATION], indirect=True)
@allure.title("After Pilorama.db loss on one node object is retrievable (s3_client={s3_client})")
def test_s3_one_pilorama_loss(
self,
s3_client: S3ClientWrapper,
simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController,
):
bucket = s3_client.create_bucket(
location_constraint="rep3",
grant_read="uri=http://acs.amazonaws.com/groups/global/AllUsers",
if not sequence:
new_nodes = wait_object_replication_on_nodes(wallet, cid, oid, 2, shell=client_shell)
allure.attach(
"\n".join(new_nodes), "Nodes with object after nodes fail", allure.attachment_type.TEXT
)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with reporter.step("Check bucket versioning"):
bucket_versioning = s3_client.get_bucket_versioning_status(bucket)
assert bucket_versioning == "Enabled", "Bucket should have enabled versioning"
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
object_versions = []
with reporter.step("Put object into one bucket"):
put_object = s3_client.put_object(bucket, file_path)
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
object_versions.append(put_object)
node_to_check = self.cluster.storage_nodes[0]
piloramas_list_before_removing = []
with reporter.step("Get list of all pilorama.db on shards"):
for shard in node_to_check.get_shards():
piloramas_list_before_removing.append(shard.pilorama)
with reporter.step("Check that all pilorama.db files exist on node"):
for pilorama in piloramas_list_before_removing:
assert node_to_check.is_file_exist(pilorama), f"File {pilorama} does not exist"
with reporter.step("Stop all storage nodes"):
cluster_state_controller.stop_services_of_type(StorageNode)
with reporter.step("Delete pilorama.db from one node"):
for pilorama in piloramas_list_before_removing:
node_to_check.delete_file(pilorama)
with reporter.step("Start all storage nodes"):
cluster_state_controller.start_all_stopped_services()
with reporter.step("Tick epoch to trigger sync and then wait for 1 minute"):
self.tick_epochs(1)
sleep(120)
with reporter.step("Get list of all pilorama.db after sync"):
for pilorama in piloramas_list_before_removing:
assert node_to_check.is_file_exist(pilorama), f"File {pilorama} does not exist"
with reporter.step("Check bucket versioning"):
bucket_versioning = s3_client.get_bucket_versioning_status(bucket)
assert bucket_versioning == "Enabled", "Bucket should have enabled versioning"
with reporter.step("Check list objects"):
objects_list = s3_client.list_objects(bucket)
assert objects_list, f"Expected not empty bucket"
with reporter.step("Delete the object from the bucket"):
delete_object = s3_client.delete_object(bucket, file_name)
assert "DeleteMarker" in delete_object.keys(), "Delete markers not found"
with reporter.step("Check list objects"):
objects_list = s3_client.list_objects_versions(bucket)
assert objects_list, f"Expected not empty bucket"
object_versions.append(delete_object["VersionId"])
# and now delete all versions of object (including Delete Markers)
with reporter.step("Delete all versions of the object from the bucket"):
for version in object_versions:
delete_object = s3_client.delete_object(bucket, file_name, version_id=version)
with reporter.step("Check list objects"):
objects_list = s3_client.list_objects_versions(bucket)
assert not objects_list, f"Expected empty bucket"
with reporter.step("Delete bucket"):
s3_client.delete_bucket(bucket)
got_file_path = get_object(wallet, cid, oid, shell=client_shell, endpoint=new_nodes[0])
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)

View file

@ -1,426 +0,0 @@
import logging
import random
from time import sleep
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.healthcheck.interfaces import Healthcheck
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import get_object, get_object_nodes, neo_go_query_height, put_object, put_object_to_random_node
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.controllers import ClusterStateController
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, StorageObjectInfo
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.parallel import parallel
from frostfs_testlib.utils.failover_utils import wait_object_replication
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
from ...helpers.container_request import APE_EVERYONE_ALLOW_ALL, PUBLIC_WITH_POLICY, REP_2_2_2_PUBLIC, ContainerRequest, requires_container
logger = logging.getLogger("NeoLogger")
STORAGE_NODE_COMMUNICATION_PORT = "8080"
STORAGE_NODE_COMMUNICATION_PORT_TLS = "8082"
PORTS_TO_BLOCK = [STORAGE_NODE_COMMUNICATION_PORT, STORAGE_NODE_COMMUNICATION_PORT_TLS]
blocked_nodes: list[ClusterNode] = []
OBJECT_ATTRIBUTES = [
None,
{"key1": 1, "key2": "abc", "common_key": "common_value"},
{"key1": 2, "common_key": "common_value"},
]
@pytest.mark.failover
@pytest.mark.failover_network
class TestFailoverNetwork(ClusterTestBase):
@pytest.fixture(autouse=True)
@allure.title("Restore network")
def restore_network(self, healthcheck: Healthcheck, cluster_state_controller: ClusterStateController):
yield
with reporter.step(f"Count blocked nodes {len(blocked_nodes)}"):
not_empty = len(blocked_nodes) != 0
for node in list(blocked_nodes):
with reporter.step(f"Restore network for {node}"):
cluster_state_controller.restore_traffic(node=node)
blocked_nodes.remove(node)
if not_empty:
parallel(healthcheck.storage_healthcheck, self.cluster.cluster_nodes)
@pytest.fixture()
@allure.title("Restore drop traffic to system")
def restore_down_interfaces(self, cluster_state_controller: ClusterStateController):
yield
cluster_state_controller.restore_interfaces()
@pytest.fixture()
def storage_objects(
self,
simple_object_size: ObjectSize,
container: str,
default_wallet: WalletInfo,
) -> list[StorageObjectInfo]:
file_path = generate_file(simple_object_size.value)
file_hash = get_file_hash(file_path)
storage_objects = []
with reporter.step("Put object"):
for attribute in OBJECT_ATTRIBUTES:
oid = put_object_to_random_node(
wallet=default_wallet,
path=file_path,
cid=container,
shell=self.shell,
cluster=self.cluster,
)
storage_object = StorageObjectInfo(cid=container, oid=oid)
storage_object.size = simple_object_size.value
storage_object.wallet = default_wallet
storage_object.file_path = file_path
storage_object.file_hash = file_hash
storage_object.attributes = attribute
storage_objects.append(storage_object)
return storage_objects
@allure.title("Block Storage node traffic")
@requires_container(REP_2_2_2_PUBLIC)
def test_block_storage_node_traffic(
self,
default_wallet: WalletInfo,
container: str,
require_multiple_hosts,
simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController,
):
"""
Block storage nodes traffic using iptables and wait for replication for objects.
"""
wallet = default_wallet
wakeup_node_timeout = 10 # timeout to let nodes detect that traffic has blocked
nodes_to_block_count = 2
source_file_path = generate_file(simple_object_size.value)
oid = put_object_to_random_node(wallet, source_file_path, container, self.shell, self.cluster)
nodes = wait_object_replication(container, oid, 2, self.shell, self.cluster.storage_nodes)
logger.info(f"Nodes are {nodes}")
nodes_to_block = nodes
if nodes_to_block_count > len(nodes):
# TODO: the intent of this logic is not clear, need to revisit
nodes_to_block = random.choices(nodes, k=2)
nodes_non_block = list(set(self.cluster.storage_nodes) - set(nodes_to_block))
nodes_non_block_cluster = [
cluster_node for cluster_node in self.cluster.cluster_nodes if cluster_node.storage_node in nodes_non_block
]
with reporter.step("Block traffic and check corrupted object"):
for node in nodes_non_block_cluster:
with reporter.step(f"Block incoming traffic at node {node}"):
blocked_nodes.append(node)
cluster_state_controller.drop_traffic(
node=node, wakeup_timeout=wakeup_node_timeout, name_interface="data", block_nodes=nodes_to_block
)
with reporter.step(f"Check object is not stored on node {node}"):
new_nodes = wait_object_replication(
container,
oid,
2,
shell=self.shell,
nodes=list(set(self.cluster.storage_nodes) - set(nodes_non_block)),
)
assert node.storage_node not in new_nodes
with reporter.step("Check object data is not corrupted"):
got_file_path = get_object(wallet, container, oid, endpoint=new_nodes[0].get_rpc_endpoint(), shell=self.shell)
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
with reporter.step(f"Unblock incoming traffic"):
for node in nodes_non_block_cluster:
with reporter.step(f"Unblock at host {node}"):
cluster_state_controller.restore_traffic(node=node)
block_node = [
cluster_node for cluster_node in self.cluster.cluster_nodes if cluster_node.storage_node == node.storage_node
]
blocked_nodes.remove(*block_node)
sleep(wakeup_node_timeout)
with reporter.step("Check object data is not corrupted"):
new_nodes = wait_object_replication(container, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes)
got_file_path = get_object(wallet, container, oid, shell=self.shell, endpoint=new_nodes[0].get_rpc_endpoint())
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
@pytest.mark.interfaces
@allure.title("Block DATA interface node")
@requires_container(PUBLIC_WITH_POLICY("REP 1 CBF 1", short_name="REP 1 CBF 1"))
def test_block_data_interface(
self,
cluster_state_controller: ClusterStateController,
default_wallet: WalletInfo,
restore_down_interfaces: None,
storage_objects: list[StorageObjectInfo],
):
storage_object = storage_objects[0]
with reporter.step("Search nodes with object"):
nodes_with_object = get_object_nodes(
cluster=self.cluster,
cid=storage_object.cid,
oid=storage_object.oid,
alive_node=self.cluster.cluster_nodes[0],
)
with reporter.step("Get data interface to node"):
config_interfaces = list(nodes_with_object[0].host.config.interfaces.keys())
with reporter.step(f"Get data in {config_interfaces}"):
data_interfaces = [interface for interface in config_interfaces if "data" in interface]
with reporter.step("Block data interfaces for node"):
for interface in data_interfaces:
cluster_state_controller.down_interface(nodes=nodes_with_object, interface=interface)
with reporter.step("Tick epoch and wait 2 block"):
nodes_without_an_object = list(set(self.cluster.cluster_nodes) - set(nodes_with_object))
self.tick_epochs(1, alive_node=nodes_without_an_object[0].storage_node, wait_block=2)
with reporter.step("Get object for target nodes to data interfaces, expect false"):
with pytest.raises(RuntimeError, match="can't create API client: can't init SDK client: context (deadline exceeded|canceled)"):
get_object(
wallet=default_wallet,
cid=storage_object.cid,
oid=storage_object.oid,
shell=self.shell,
endpoint=nodes_with_object[0].storage_node.get_rpc_endpoint(),
)
with reporter.step(f"Get object others nodes, expect true"):
input_file = get_object(
wallet=default_wallet,
cid=storage_object.cid,
oid=storage_object.oid,
shell=self.shell,
endpoint=nodes_without_an_object[0].storage_node.get_rpc_endpoint(),
)
with reporter.step("Restore interface and tick 1 epoch, wait 2 block"):
cluster_state_controller.restore_interfaces()
self.tick_epochs(1, alive_node=nodes_without_an_object[0].storage_node, wait_block=2)
@pytest.mark.interfaces
@allure.title("Block INTERNAL interface node")
@requires_container(PUBLIC_WITH_POLICY("REP 1 CBF 1", short_name="REP 1 CBF 1"))
def test_block_internal_interface(
self,
cluster_state_controller: ClusterStateController,
default_wallet: WalletInfo,
restore_down_interfaces: None,
storage_objects: list[StorageObjectInfo],
simple_object_size: ObjectSize,
):
storage_object = storage_objects[0]
with reporter.step("Search nodes with object"):
nodes_with_object = get_object_nodes(
cluster=self.cluster,
cid=storage_object.cid,
oid=storage_object.oid,
alive_node=self.cluster.cluster_nodes[0],
)
with reporter.step("Get internal interface to node"):
config_interfaces = list(nodes_with_object[0].host.config.interfaces.keys())
with reporter.step(f"Get internal in {config_interfaces}"):
internal_interfaces = [interface for interface in config_interfaces if "internal" in interface]
with reporter.step("Block internal interfaces for node"):
for interface in internal_interfaces:
cluster_state_controller.down_interface(nodes=nodes_with_object, interface=interface)
with reporter.step("Tick epoch and wait 2 block"):
nodes_without_an_object = list(set(self.cluster.cluster_nodes) - set(nodes_with_object))
self.tick_epochs(1, alive_node=nodes_without_an_object[0].storage_node, wait_block=2)
with reporter.step("Get object others node, expect false"):
with pytest.raises(RuntimeError, match="rpc error"):
get_object(
wallet=default_wallet,
cid=storage_object.cid,
oid=storage_object.oid,
shell=self.shell,
endpoint=nodes_without_an_object[0].storage_node.get_rpc_endpoint(),
)
with reporter.step("Put object, others node, expect false"):
with pytest.raises(RuntimeError, match="rpc error"):
put_object(
wallet=default_wallet,
path=storage_object.file_path,
cid=storage_object.cid,
shell=self.shell,
endpoint=nodes_without_an_object[0].storage_node.get_rpc_endpoint(),
)
with reporter.step(f"Get object nodes with object, expect true"):
_ = get_object(
wallet=default_wallet,
cid=storage_object.cid,
oid=storage_object.oid,
shell=self.shell,
endpoint=nodes_with_object[0].storage_node.get_rpc_endpoint(),
)
with reporter.step(f"Put object nodes with object, expect true"):
temp_file_path = generate_file(simple_object_size.value)
_ = put_object(
wallet=default_wallet,
path=temp_file_path,
cid=storage_object.cid,
shell=self.shell,
endpoint=nodes_with_object[0].storage_node.get_rpc_endpoint(),
)
with reporter.step("Restore interface and tick 1 epoch, wait 2 block"):
cluster_state_controller.restore_interfaces()
self.tick_epochs(1, alive_node=nodes_without_an_object[0].storage_node, wait_block=2)
@pytest.mark.interfaces
@pytest.mark.failover_baremetal
@pytest.mark.parametrize(
"block_interface, other_interface",
[(Interfaces.DATA_O, Interfaces.DATA_1), (Interfaces.DATA_1, Interfaces.DATA_O)],
)
@allure.title("Down data interfaces to all nodes(interface={block_interface})")
def test_down_data_interface(
self,
require_multiple_interfaces,
cluster_state_controller: ClusterStateController,
default_wallet: WalletInfo,
simple_object_size: ObjectSize,
restore_down_interfaces: None,
block_interface: Interfaces,
other_interface: Interfaces,
):
endpoint_id_map = {
Interfaces.DATA_O: 0,
Interfaces.DATA_1: 1,
}
endpoint_id = endpoint_id_map[other_interface]
cluster_nodes = self.cluster.cluster_nodes
with reporter.step(f"Block {block_interface.value} interfaces"):
cluster_state_controller.down_interface(cluster_nodes, block_interface.value)
with reporter.step("Tick 1 epoch and wait 2 block for sync all nodes"):
self.tick_epochs(1, alive_node=cluster_nodes[0].storage_node, wait_block=2)
with reporter.step("Create container"):
cid = create_container(
wallet=default_wallet,
shell=self.shell,
endpoint=cluster_nodes[0].storage_node.get_all_rpc_endpoint()[endpoint_id],
rule="REP 4 CBF 1",
)
with reporter.step("Put object"):
file_path = generate_file(simple_object_size.value)
oid = put_object(
wallet=default_wallet,
path=file_path,
cid=cid,
shell=self.shell,
endpoint=cluster_nodes[0].storage_node.get_all_rpc_endpoint()[endpoint_id],
)
with reporter.step("Get object"):
get_object(
wallet=default_wallet,
cid=cid,
oid=oid,
shell=self.shell,
endpoint=cluster_nodes[0].storage_node.get_all_rpc_endpoint()[endpoint_id],
)
with reporter.step("Restore interfaces all nodes"):
cluster_state_controller.restore_interfaces()
self.tick_epochs(1, alive_node=cluster_nodes[0].storage_node, wait_block=2)
@pytest.mark.interfaces
@pytest.mark.failover_baremetal
@pytest.mark.parametrize(
"container_request",
[ContainerRequest(f"REP %NODE_COUNT% IN X CBF 1 SELECT %NODE_COUNT% FROM * AS X", APE_EVERYONE_ALLOW_ALL)],
indirect=True,
)
@pytest.mark.parametrize("interface", [Interfaces.INTERNAL_0, Interfaces.INTERNAL_1])
@allure.title("Down internal interfaces to all nodes(interface={interface})")
def test_down_internal_interface(
self,
require_multiple_interfaces,
cluster_state_controller: ClusterStateController,
default_wallet: WalletInfo,
simple_object_size: ObjectSize,
restore_down_interfaces: None,
interface: Interfaces,
container: str,
):
cluster_nodes = self.cluster.cluster_nodes
latest_block = {}
with reporter.step("Get block all nodes"):
for cluster_node in cluster_nodes:
latest_block[cluster_node] = neo_go_query_height(
shell=cluster_node.host.get_shell(), endpoint=cluster_node.morph_chain.get_http_endpoint()
)
with reporter.step(f"Block {interface} interfaces"):
cluster_state_controller.down_interface(cluster_nodes, interface.value)
with reporter.step("Tick 1 epoch and wait 2 block for sync all nodes"):
self.tick_epochs(1, alive_node=cluster_nodes[0].storage_node, wait_block=2)
with reporter.step(f"Put object, after down {interface}"):
file_path = generate_file(simple_object_size.value)
oid = put_object(
wallet=default_wallet,
path=file_path,
cid=container,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
with reporter.step("Get object"):
get_object(
wallet=default_wallet,
cid=container,
oid=oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
now_block = {}
with reporter.step("Get actual block"):
for cluster_node in cluster_nodes:
now_block[cluster_node] = neo_go_query_height(
shell=cluster_node.host.get_shell(), endpoint=cluster_node.morph_chain.get_http_endpoint()
)
with reporter.step("Compare block"):
for cluster_node, items in now_block.items():
with reporter.step(
f"Node - {cluster_node.host_ip}, old block - {latest_block[cluster_node]['Latest block']}, "
f"now block - {now_block[cluster_node]['Latest block']}"
):
assert latest_block[cluster_node]["Latest block"] < now_block[cluster_node]["Latest block"]
with reporter.step("Restore interfaces all nodes"):
cluster_state_controller.restore_interfaces()
self.tick_epochs(1, alive_node=self.cluster.cluster_nodes[0].storage_node, wait_block=2)

View file

@ -1,533 +0,0 @@
import logging
import random
from time import sleep
from typing import Callable, Optional
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.cli.netmap_parser import NetmapParser
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
from frostfs_testlib.steps.cli.container import create_container, search_nodes_with_container
from frostfs_testlib.steps.cli.object import (
delete_object,
get_object,
get_object_from_random_node,
head_object,
put_object,
put_object_to_random_node,
search_object,
)
from frostfs_testlib.steps.node_management import (
check_node_in_map,
delete_node_data,
drop_object,
exclude_node_from_network_map,
get_locode_from_random_node,
include_node_to_network_map,
node_shard_list,
node_shard_set_mode,
storage_node_set_status,
wait_for_node_to_be_ready,
)
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.cluster import ClusterNode, StorageNode
from frostfs_testlib.storage.controllers import ClusterStateController
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeStatus
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils import string_utils
from frostfs_testlib.utils.failover_utils import wait_object_replication
from frostfs_testlib.utils.file_utils import generate_file
from ...helpers.container_creation import create_container_with_ape
from ...helpers.container_request import APE_EVERYONE_ALLOW_ALL, REP_1_1_1_PUBLIC, ContainerRequest, requires_container
from ...helpers.utility import wait_for_gc_pass_on_storage_nodes
logger = logging.getLogger("NeoLogger")
check_nodes: list[StorageNode] = []
@pytest.mark.node_mgmt
@pytest.mark.failover
@pytest.mark.order(10)
class TestNodeManagement(ClusterTestBase):
@pytest.fixture
@allure.title("Pick the node with data")
def node_with_data(self, container: str, default_wallet: WalletInfo, simple_object_size: ObjectSize) -> StorageNode:
file_path = generate_file(simple_object_size.value)
oid = put_object_to_random_node(default_wallet, file_path, container, self.shell, self.cluster)
nodes = get_nodes_with_object(container, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(nodes) == 1
node = nodes[0]
yield node
shards = node_shard_list(node)
assert shards
for shard in shards:
node_shard_set_mode(node, shard, "read-write")
node_shard_list(node)
@reporter.step("Tick epoch with retries")
def tick_epoch_with_retries(self, attempts: int = 3, timeout: int = 3, wait_block: int = None):
for attempt in range(attempts):
try:
self.tick_epoch(wait_block=wait_block)
except RuntimeError:
sleep(timeout)
if attempt >= attempts - 1:
raise
continue
return
@pytest.fixture
def return_nodes_after_test_run(self):
yield
self.return_nodes()
@reporter.step("Return node to cluster")
def return_nodes(self, alive_node: Optional[StorageNode] = None) -> None:
for node in list(check_nodes):
with reporter.step(f"Start node {node}"):
node.start_service()
with reporter.step(f"Waiting status ready for node {node}"):
wait_for_node_to_be_ready(node)
# We need to wait for node to establish notifications from morph-chain
# Otherwise it will hang up when we will try to set status
self.wait_for_blocks()
with reporter.step(f"Move node {node} to online state"):
storage_node_set_status(node, status="online", retries=2)
check_nodes.remove(node)
self.wait_for_blocks()
self.tick_epoch_with_retries(3, wait_block=2)
check_node_in_map(node, shell=self.shell, alive_node=alive_node)
@allure.title("Add one node to cluster")
def test_add_nodes(
self,
default_wallet: WalletInfo,
simple_object_size: ObjectSize,
frostfs_cli: FrostfsCli,
return_nodes_after_test_run,
):
"""
This test remove one node from frostfs_testlib.storage.cluster then add it back. Test uses base control operations with storage nodes (healthcheck, netmap-snapshot, set-status).
"""
wallet = default_wallet
placement_rule_3 = "REP 3 IN X CBF 1 SELECT 3 FROM * AS X"
placement_rule_4 = "REP 4 IN X CBF 1 SELECT 4 FROM * AS X"
source_file_path = generate_file(simple_object_size.value)
storage_nodes = self.cluster.storage_nodes
random_node = random.choice(storage_nodes[1:])
alive_node = random.choice([storage_node for storage_node in storage_nodes if storage_node.id != random_node.id])
check_node_in_map(random_node, shell=self.shell, alive_node=alive_node)
# Add node to recovery list before messing with it
check_nodes.append(random_node)
exclude_node_from_network_map(random_node, alive_node, shell=self.shell, cluster=self.cluster)
delete_node_data(random_node)
cid = create_container_with_ape(
ContainerRequest(placement_rule_3, APE_EVERYONE_ALLOW_ALL),
frostfs_cli,
default_wallet,
self.shell,
self.cluster,
alive_node.get_rpc_endpoint(),
)
oid = put_object(wallet, source_file_path, cid, self.shell, alive_node.get_rpc_endpoint())
wait_object_replication(cid, oid, 3, shell=self.shell, nodes=storage_nodes)
self.return_nodes(alive_node)
with reporter.step("Check data could be replicated to new node"):
random_node = random.choice(list(set(storage_nodes) - {random_node, alive_node}))
# Add node to recovery list before messing with it
check_nodes.append(random_node)
exclude_node_from_network_map(random_node, alive_node, shell=self.shell, cluster=self.cluster)
wait_object_replication(
cid,
oid,
3,
shell=self.shell,
nodes=list(set(storage_nodes) - {random_node}),
)
include_node_to_network_map(random_node, alive_node, shell=self.shell, cluster=self.cluster)
wait_object_replication(cid, oid, 3, shell=self.shell, nodes=storage_nodes)
with reporter.step("Check container could be created with new node"):
cid = create_container_with_ape(
ContainerRequest(placement_rule_4, APE_EVERYONE_ALLOW_ALL),
frostfs_cli,
default_wallet,
self.shell,
self.cluster,
alive_node.get_rpc_endpoint(),
)
oid = put_object(
wallet,
source_file_path,
cid,
shell=self.shell,
endpoint=alive_node.get_rpc_endpoint(),
)
wait_object_replication(cid, oid, 4, shell=self.shell, nodes=storage_nodes)
@allure.title("Drop object using control command")
def test_drop_object(self, default_wallet, complex_object_size: ObjectSize, simple_object_size: ObjectSize):
"""
Test checks object could be dropped using `frostfs-cli control drop-objects` command.
"""
wallet = default_wallet
endpoint = self.cluster.default_rpc_endpoint
file_path_simple = generate_file(simple_object_size.value)
file_path_complex = generate_file(complex_object_size.value)
locode = get_locode_from_random_node(self.cluster)
rule = f"REP 1 IN SE CBF 1 SELECT 1 FROM LOC AS SE FILTER 'UN-LOCODE' EQ '{locode}' AS LOC"
cid = create_container(wallet, rule=rule, shell=self.shell, endpoint=endpoint)
oid_simple = put_object_to_random_node(wallet, file_path_simple, cid, shell=self.shell, cluster=self.cluster)
oid_complex = put_object_to_random_node(wallet, file_path_complex, cid, shell=self.shell, cluster=self.cluster)
for oid in (oid_simple, oid_complex):
get_object_from_random_node(wallet, cid, oid, shell=self.shell, cluster=self.cluster)
head_object(wallet, cid, oid, shell=self.shell, endpoint=endpoint)
nodes_with_object = get_nodes_with_object(cid, oid_simple, shell=self.shell, nodes=self.cluster.storage_nodes)
random_node = random.choice(nodes_with_object)
for oid in (oid_simple, oid_complex):
with reporter.step(f"Drop object {oid}"):
get_object_from_random_node(wallet, cid, oid, shell=self.shell, cluster=self.cluster)
head_object(wallet, cid, oid, shell=self.shell, endpoint=endpoint)
drop_object(random_node, cid, oid)
self.wait_for_obj_dropped(wallet, cid, oid, endpoint, get_object)
self.wait_for_obj_dropped(wallet, cid, oid, endpoint, head_object)
@pytest.mark.skip(reason="Need to clarify scenario")
@allure.title("Control Operations with storage nodes")
@requires_container(REP_1_1_1_PUBLIC)
def test_shards(
self,
default_wallet: WalletInfo,
container: str,
node_with_data: StorageNode,
simple_object_size: ObjectSize,
):
wallet = default_wallet
file_path = generate_file(simple_object_size.value)
original_oid = put_object_to_random_node(wallet, file_path, container, self.shell, self.cluster)
# for mode in ('read-only', 'degraded'):
for mode in ("degraded",):
shards = node_shard_list(node_with_data)
assert shards
for shard in shards:
node_shard_set_mode(node_with_data, shard, mode)
shards = node_shard_list(node_with_data)
assert shards
# TODO: Add match for error
with pytest.raises(RuntimeError):
put_object_to_random_node(wallet, file_path, container, self.shell, self.cluster)
# TODO: Add match for error
with pytest.raises(RuntimeError):
delete_object(wallet, container, original_oid, self.shell, self.cluster.default_rpc_endpoint)
get_object_from_random_node(wallet, container, original_oid, self.shell, self.cluster)
for shard in shards:
node_shard_set_mode(node_with_data, shard, "read-write")
shards = node_shard_list(node_with_data)
assert shards
oid = put_object_to_random_node(wallet, file_path, container, self.shell, self.cluster)
delete_object(wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
@allure.title("Put object with stopped node")
def test_stop_node(
self, default_wallet: WalletInfo, frostfs_cli: FrostfsCli, return_nodes_after_test_run, simple_object_size: ObjectSize
):
wallet = default_wallet
placement_rule = "REP 3 IN X SELECT 4 FROM * AS X"
source_file_path = generate_file(simple_object_size.value)
storage_nodes = self.cluster.storage_nodes
random_node = random.choice(storage_nodes[1:])
alive_node = random.choice([storage_node for storage_node in storage_nodes if storage_node.id != random_node.id])
with reporter.step("Create container from random node endpoint"):
cid = create_container_with_ape(
ContainerRequest(placement_rule, APE_EVERYONE_ALLOW_ALL),
frostfs_cli,
default_wallet,
self.shell,
self.cluster,
random_node.get_rpc_endpoint(),
)
with reporter.step("Stop the random node"):
check_nodes.append(random_node)
random_node.stop_service()
with reporter.step("Try to put an object and expect success"):
put_object(
wallet,
source_file_path,
cid,
shell=self.shell,
endpoint=alive_node.get_rpc_endpoint(),
)
self.return_nodes(alive_node)
@reporter.step("Wait for object to be dropped")
def wait_for_obj_dropped(self, wallet: str, cid: str, oid: str, endpoint: str, checker: Callable) -> None:
for _ in range(3):
try:
checker(wallet, cid, oid, shell=self.shell, endpoint=endpoint)
wait_for_gc_pass_on_storage_nodes()
except Exception as err:
if string_utils.is_str_match_pattern(err, OBJECT_NOT_FOUND):
return
raise AssertionError(f'Expected "{OBJECT_NOT_FOUND}" error, got\n{err}')
raise AssertionError(f"Object {oid} was not dropped from node")
@pytest.mark.maintenance
@pytest.mark.failover
@pytest.mark.order(9)
class TestMaintenanceMode(ClusterTestBase):
@pytest.fixture()
@allure.title("Init Frostfs CLI remote")
def frostfs_cli_remote(self, node_under_test: ClusterNode) -> FrostfsCli:
host = node_under_test.host
service_config = host.get_service_config(node_under_test.storage_node.name)
wallet_path = service_config.attributes["wallet_path"]
wallet_password = service_config.attributes["wallet_password"]
shell = host.get_shell()
wallet_config_path = f"/tmp/{node_under_test.storage_node.name}-config.yaml"
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
cli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config_path)
return cli
@pytest.fixture()
def restore_node_status(self, cluster_state_controller: ClusterStateController, default_wallet: WalletInfo):
nodes_to_restore = []
yield nodes_to_restore
for node_to_restore in nodes_to_restore:
cluster_state_controller.set_node_status(node_to_restore, default_wallet, NodeStatus.ONLINE)
def check_node_status(self, expected_status: NodeStatus, node_under_test: ClusterNode, frostfs_cli: FrostfsCli, rpc_endpoint: str):
netmap = frostfs_cli.netmap.snapshot(rpc_endpoint).stdout
all_snapshots = NetmapParser.snapshot_all_nodes(netmap)
node_snapshot = [snapshot for snapshot in all_snapshots if node_under_test.get_interface(Interfaces.MGMT) == snapshot.node]
if expected_status == NodeStatus.OFFLINE and not node_snapshot:
assert (
node_under_test.get_interface(Interfaces.MGMT) not in netmap
), f"{node_under_test} status should be {expected_status}. See netmap:\n{netmap}"
return
assert node_snapshot, f"{node_under_test} status should be {expected_status}, but was not in netmap. See netmap:\n{netmap}"
node_snapshot = node_snapshot[0]
assert (
expected_status == node_snapshot.node_status
), f"{node_under_test} status should be {expected_status}, but was {node_snapshot.node_status}. See netmap:\n{netmap}"
@allure.title("Test of basic node operations in maintenance mode")
def test_maintenance_mode(
self,
default_wallet: WalletInfo,
simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController,
restore_node_status: list[ClusterNode],
):
with reporter.step("Create container and put object"):
cid = create_container(
wallet=default_wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule="REP 1 CBF 1",
)
nodes_with_container = search_nodes_with_container(
wallet=default_wallet,
cid=cid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
cluster=self.cluster,
)
node_under_test = nodes_with_container[0]
endpoint = node_under_test.storage_node.get_rpc_endpoint()
file_path = generate_file(simple_object_size.value)
oid = put_object(
wallet=default_wallet,
path=file_path,
cid=cid,
shell=self.shell,
endpoint=endpoint,
)
with reporter.step("Set node status to 'maintenance'"):
restore_node_status.append(node_under_test)
cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.MAINTENANCE)
node_under_maintenance_error = "node is under maintenance"
with reporter.step("Run basic operations with node in maintenance"):
with pytest.raises(RuntimeError, match=node_under_maintenance_error):
get_object(default_wallet, cid, oid, self.shell, endpoint)
with pytest.raises(RuntimeError, match=node_under_maintenance_error):
search_object(default_wallet, cid, self.shell, endpoint)
with pytest.raises(RuntimeError, match=node_under_maintenance_error):
delete_object(default_wallet, cid, oid, self.shell, endpoint)
with pytest.raises(RuntimeError, match=node_under_maintenance_error):
put_object(default_wallet, file_path, cid, self.shell, endpoint)
with reporter.step("Run basic operations with node not in maintenance"):
other_nodes = list(set(self.cluster.cluster_nodes) - set(nodes_with_container))
endpoint = other_nodes[0].storage_node.get_rpc_endpoint()
with pytest.raises(RuntimeError, match=OBJECT_NOT_FOUND):
get_object(default_wallet, cid, oid, self.shell, endpoint)
search_object(default_wallet, cid, self.shell, endpoint)
with pytest.raises(RuntimeError, match=OBJECT_NOT_FOUND):
delete_object(default_wallet, cid, oid, self.shell, endpoint)
with pytest.raises(RuntimeError, match=node_under_maintenance_error):
put_object(default_wallet, file_path, cid, self.shell, endpoint)
@allure.title("MAINTENANCE and OFFLINE mode transitions")
def test_mode_transitions(
self,
cluster_state_controller: ClusterStateController,
node_under_test: ClusterNode,
default_wallet: WalletInfo,
frostfs_cli: FrostfsCli,
restore_node_status: list[ClusterNode],
):
restore_node_status.append(node_under_test)
alive_nodes = list(set(self.cluster.cluster_nodes) - {node_under_test})
alive_storage_node = alive_nodes[0].storage_node
alive_rpc_endpoint = alive_storage_node.get_rpc_endpoint()
with reporter.step("Set node status to 'offline'"):
cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.OFFLINE)
with reporter.step("Check node status is 'offline' after update the network map"):
self.check_node_status(NodeStatus.OFFLINE, node_under_test, frostfs_cli, alive_rpc_endpoint)
with reporter.step("Restart storage service"):
cluster_state_controller.stop_storage_service(node_under_test)
cluster_state_controller.start_storage_service(node_under_test)
with reporter.step("Tick 2 epochs"):
self.tick_epochs(2, alive_storage_node, 2)
with reporter.step("Check node status is 'online' after storage service restart"):
self.check_node_status(NodeStatus.ONLINE, node_under_test, frostfs_cli, alive_rpc_endpoint)
with reporter.step("Set node status to 'maintenance'"):
cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.MAINTENANCE)
with reporter.step("Restart storage service"):
cluster_state_controller.stop_storage_service(node_under_test)
cluster_state_controller.start_storage_service(node_under_test)
with reporter.step("Tick 2 epochs"):
self.tick_epochs(2, alive_storage_node, 2)
with reporter.step("Check node staus is 'maintenance' after storage service restart"):
self.check_node_status(NodeStatus.MAINTENANCE, node_under_test, frostfs_cli, alive_rpc_endpoint)
with reporter.step("Set node status to 'offline'"):
cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.OFFLINE)
with reporter.step("Stop storage service"):
cluster_state_controller.stop_storage_service(node_under_test)
with reporter.step("Tick 2 epochs"):
self.tick_epochs(2, alive_storage_node, 2)
with reporter.step("Start storage service"):
cluster_state_controller.start_storage_service(node_under_test)
with reporter.step("Tick 2 epochs"):
self.tick_epochs(2, alive_storage_node, 2)
with reporter.step("Check node status is 'online' after storage service start"):
self.check_node_status(NodeStatus.ONLINE, node_under_test, frostfs_cli, alive_rpc_endpoint)
with reporter.step("Set node status to 'maintenance'"):
cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.MAINTENANCE)
with reporter.step("Stop storage service"):
cluster_state_controller.stop_storage_service(node_under_test)
with reporter.step("Tick 2 epochs"):
self.tick_epochs(2, alive_storage_node, 2)
with reporter.step("Start storage service"):
cluster_state_controller.start_storage_service(node_under_test)
with reporter.step("Check node status is 'maintenance'"):
self.check_node_status(NodeStatus.MAINTENANCE, node_under_test, frostfs_cli, alive_rpc_endpoint)
with reporter.step("Tick 2 epochs"):
self.tick_epochs(2, alive_storage_node, 2)
with reporter.step("Check node status is 'maintenance'"):
self.check_node_status(NodeStatus.MAINTENANCE, node_under_test, frostfs_cli, alive_rpc_endpoint)
@allure.title("A node cannot go into maintenance if maintenance is prohibited globally in the network")
def test_maintenance_globally_forbidden(
self,
cluster_state_controller: ClusterStateController,
node_under_test: ClusterNode,
frostfs_cli_remote: FrostfsCli,
default_wallet: WalletInfo,
restore_node_status: list[ClusterNode],
):
restore_node_status.append(node_under_test)
control_endpoint = node_under_test.service(StorageNode).get_control_endpoint()
with reporter.step("Set MaintenanceModeAllowed = false"):
cluster_state_controller.set_maintenance_mode_allowed("false", node_under_test)
with reporter.step("Set node status to 'maintenance'"):
with pytest.raises(RuntimeError, match="maintenance mode is not allowed by the network"):
frostfs_cli_remote.control.set_status(endpoint=control_endpoint, status="maintenance")
with reporter.step("Set MaintenanceModeAllowed = true"):
cluster_state_controller.set_maintenance_mode_allowed("true", node_under_test)
with reporter.step("Set node status to 'maintenance'"):
cluster_state_controller.set_node_status(node_under_test, default_wallet, NodeStatus.MAINTENANCE)

View file

@ -1,246 +0,0 @@
import math
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.steps.cli.container import delete_container, search_nodes_with_container, wait_for_container_deletion
from frostfs_testlib.steps.cli.object import delete_object, head_object, put_object_to_random_node
from frostfs_testlib.steps.metrics import calc_metrics_count_from_stdout, check_metrics_counter, get_metrics_value
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.parallel import parallel
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils.file_utils import TestFile, generate_file
from ...helpers.container_request import PUBLIC_WITH_POLICY, REP_2_1_4_PUBLIC, ContainerRequest, requires_container
from ...helpers.utility import are_numbers_similar
@pytest.mark.order(-10)
@pytest.mark.nightly
@pytest.mark.metrics
class TestContainerMetrics(ClusterTestBase):
@reporter.step("Put object to container: {cid}")
def put_object_parallel(self, file_path: str, wallet: WalletInfo, cid: str):
oid = put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster)
return oid
@reporter.step("Get metrics value from node")
def get_metrics_search_by_greps_parallel(self, node: ClusterNode, **greps):
try:
content_stdout = node.metrics.storage.get_metrics_search_by_greps(greps)
return calc_metrics_count_from_stdout(content_stdout)
except Exception as e:
return None
@wait_for_success(max_wait_time=300, interval=30)
def check_metrics_value_by_approx(self, cluster_nodes: list[ClusterNode], metric_name: str, cid: str, expected_value: int, copies: int):
futures = parallel(get_metrics_value, cluster_nodes, command=metric_name, cid=cid)
metric_values = [future.result() for future in futures if future.result()]
actual_value = sum(metric_values) // copies
assert are_numbers_similar(actual_value, expected_value, tolerance_percentage=2), "metric container size bytes value not correct"
@allure.title("Container metrics (obj_size={object_size}, policy={container_request})")
@pytest.mark.parametrize(
"container_request, copies",
[
(PUBLIC_WITH_POLICY("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", short_name="REP"), 2),
(PUBLIC_WITH_POLICY("EC 1.1 CBF 1", short_name="EC"), 1),
],
indirect=["container_request"],
)
def test_container_metrics(
self,
object_size: ObjectSize,
max_object_size: int,
default_wallet: WalletInfo,
cluster: Cluster,
copies: int,
container: str,
test_file: TestFile,
container_request: ContainerRequest,
):
object_chunks = 1
link_object = 0
if object_size.value > max_object_size:
object_chunks = math.ceil(object_size.value / max_object_size)
link_object = len(search_nodes_with_container(default_wallet, container, self.shell, cluster.default_rpc_endpoint, cluster))
with reporter.step("Put object to random node"):
oid = put_object_to_random_node(default_wallet, test_file.path, container, self.shell, cluster)
with reporter.step("Get object nodes"):
object_storage_nodes = get_nodes_with_object(container, oid, self.shell, cluster.storage_nodes)
object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes]
with reporter.step("Check metric appears in node where the object is located"):
count_metrics = (object_chunks * copies) + link_object
if container_request.short_name == "EC":
count_metrics = (object_chunks * 2) + link_object
check_metrics_counter(object_nodes, counter_exp=count_metrics, command="container_objects_total", cid=container, type="phy")
check_metrics_counter(object_nodes, counter_exp=count_metrics, command="container_objects_total", cid=container, type="logic")
check_metrics_counter(object_nodes, counter_exp=copies, command="container_objects_total", cid=container, type="user")
with reporter.step("Delete file, wait until gc remove object"):
delete_object(default_wallet, container, oid, self.shell, cluster.default_rpc_endpoint)
with reporter.step(f"Check container metrics 'the counter should equal {len(object_nodes)}' in object nodes"):
check_metrics_counter(object_nodes, counter_exp=len(object_nodes), command="container_objects_total", cid=container, type="phy")
check_metrics_counter(
object_nodes, counter_exp=len(object_nodes), command="container_objects_total", cid=container, type="logic"
)
check_metrics_counter(object_nodes, counter_exp=0, command="container_objects_total", cid=container, type="user")
with reporter.step("Check metrics(Phy, Logic, User) in each nodes"):
# Phy and Logic metrics are x2, because in rule 'CBF 2 SELECT 2 FROM', cbf2*sel2=4
expect_metrics = copies * 2
check_metrics_counter(
cluster.cluster_nodes, counter_exp=expect_metrics, command="container_objects_total", cid=container, type="phy"
)
check_metrics_counter(
cluster.cluster_nodes, counter_exp=expect_metrics, command="container_objects_total", cid=container, type="logic"
)
check_metrics_counter(cluster.cluster_nodes, counter_exp=0, command="container_objects_total", cid=container, type="user")
@allure.title("Container size metrics (obj_size={object_size}, policy={container_request})")
@requires_container(
[PUBLIC_WITH_POLICY("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", short_name="REP"), PUBLIC_WITH_POLICY("EC 1.1 CBF 1", short_name="EC")]
)
def test_container_size_metrics(
self,
object_size: ObjectSize,
default_wallet: WalletInfo,
test_file: TestFile,
container: str,
):
with reporter.step("Put object to random node"):
oid = put_object_to_random_node(default_wallet, test_file.path, container, self.shell, self.cluster)
with reporter.step("Get object nodes"):
object_storage_nodes = get_nodes_with_object(container, oid, self.shell, self.cluster.storage_nodes)
object_nodes = [
cluster_node for cluster_node in self.cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes
]
with reporter.step("Check metric appears in all node where the object is located"):
self.check_metrics_value_by_approx(
object_nodes,
metric_name="frostfs_node_engine_container_size_bytes",
cid=container,
expected_value=object_size.value,
copies=2, # for policy REP 2, actual metric value divide by 2
)
with reporter.step("Delete file, wait until gc remove object"):
id_tombstone = delete_object(default_wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
tombstone = head_object(default_wallet, container, id_tombstone, self.shell, self.cluster.default_rpc_endpoint)
with reporter.step(f"Check container size metrics"):
act_metric = get_metrics_value(object_nodes[0], command="frostfs_node_engine_container_size_bytes", cid=container)
assert act_metric == int(tombstone["header"]["payloadLength"])
@allure.title("Container size metrics put {objects_count} objects (obj_size={object_size})")
@pytest.mark.parametrize("objects_count", [5, 10, 20])
@requires_container(REP_2_1_4_PUBLIC)
def test_container_size_metrics_more_objects(
self, object_size: ObjectSize, default_wallet: WalletInfo, objects_count: int, container: str
):
with reporter.step(f"Put {objects_count} objects"):
files_path = [generate_file(object_size.value) for _ in range(objects_count)]
futures = parallel(self.put_object_parallel, files_path, wallet=default_wallet, cid=container)
oids = [future.result() for future in futures]
with reporter.step("Check metric appears in all nodes"):
self.check_metrics_value_by_approx(
self.cluster.cluster_nodes,
metric_name="frostfs_node_engine_container_size_bytes",
cid=container,
expected_value=object_size.value * objects_count,
copies=2, # for policy REP 2, actual metric value divide by 2
)
with reporter.step("Delete file, wait until gc remove object"):
tombstones_size = 0
for oid in oids:
tombstone_id = delete_object(default_wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
tombstone = head_object(default_wallet, container, tombstone_id, self.shell, self.cluster.default_rpc_endpoint)
tombstones_size += int(tombstone["header"]["payloadLength"])
with reporter.step(f"Check container size metrics, 'should be positive in all nodes'"):
with reporter.step("Search container nodes"):
container_nodes = search_nodes_with_container(
wallet=default_wallet,
cid=container,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
cluster=self.cluster,
)
with reporter.step(f"Get metrics value from container nodes"):
futures = parallel(get_metrics_value, container_nodes, command="frostfs_node_engine_container_size_bytes", cid=container)
metrics_value_nodes = [future.result() for future in futures]
for act_metric in metrics_value_nodes:
assert act_metric >= 0, "Metrics value is negative"
with reporter.step(f"Check container size metrics for tombstone"):
assert sum(metrics_value_nodes) // len(container_nodes) == tombstones_size, "tomstone size of objects not correct"
@allure.title("Container metrics (policy={container_request})")
@pytest.mark.parametrize(
"container_request, copies",
[
(PUBLIC_WITH_POLICY("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", short_name="REP"), 2),
(PUBLIC_WITH_POLICY("EC 1.1 CBF 1", short_name="EC"), 1),
],
indirect=["container_request"],
)
def test_container_metrics_delete_complex_objects(
self,
complex_object_size: ObjectSize,
default_wallet: WalletInfo,
cluster: Cluster,
copies: int,
container: str,
container_request: ContainerRequest,
):
objects_count = 2
metric_name = "frostfs_node_engine_container_objects_total"
with reporter.step(f"Put {objects_count} objects"):
files_path = [generate_file(complex_object_size.value) for _ in range(objects_count)]
futures = parallel(self.put_object_parallel, files_path, wallet=default_wallet, cid=container)
oids = [future.result() for future in futures]
with reporter.step(f"Check metrics value in each nodes, should be {objects_count} for 'user'"):
check_metrics_counter(
cluster.cluster_nodes, counter_exp=objects_count * copies, command=metric_name, cid=container, type="user"
)
with reporter.step("Delete objects and container"):
for oid in oids:
delete_object(default_wallet, container, oid, self.shell, cluster.default_rpc_endpoint)
delete_container(default_wallet, container, self.shell, cluster.default_rpc_endpoint)
with reporter.step("Tick epoch and check container was deleted"):
self.tick_epoch()
wait_for_container_deletion(default_wallet, container, shell=self.shell, endpoint=cluster.default_rpc_endpoint)
with reporter.step(f"Check metric {metric_name} in each nodes, should not be show any result"):
futures = parallel(self.get_metrics_search_by_greps_parallel, cluster.cluster_nodes, command=metric_name, cid=container)
metrics_results = [future.result() for future in futures if future.result() is not None]
assert len(metrics_results) == 0, f"Metrics value is not empty in Prometheus, actual value in nodes: {metrics_results}"
with reporter.step("Check metric 'frostfs_node_engine_container_size_bytes' in each nodes, should not be show any result"):
futures = parallel(
self.get_metrics_search_by_greps_parallel,
cluster.cluster_nodes,
command="frostfs_node_engine_container_size_bytes",
cid=container,
)
metrics_results = [future.result() for future in futures if future.result() is not None]
assert len(metrics_results) == 0, f"Metrics value is not empty, actual value in nodes: {metrics_results}"

View file

@ -1,40 +0,0 @@
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.steps.metrics import get_metrics_value
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.testing import parallel
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
@pytest.mark.order(-12)
@pytest.mark.nightly
@pytest.mark.metrics
class TestEpochMetrics(ClusterTestBase):
@reporter.step("Get metrics value from node: {node}")
def get_metrics_search_by_greps_parallel(self, node: ClusterNode, **greps):
try:
return get_metrics_value(node, parse_from_command=True, **greps)
except Exception as e:
return None
@allure.title("Check changes in metric frostfs_node_ir_epoch value")
def test_check_increase_epoch_metric(self, cluster: Cluster):
metric_name = "frostfs_node_ir_epoch"
with reporter.step("Get current value of metric: {metric_name} from each nodes"):
futures = parallel(self.get_metrics_search_by_greps_parallel, cluster.cluster_nodes, command=metric_name)
metrics_results = [future.result() for future in futures if future.result() is not None]
with reporter.step("Check that the metric values are the same in all nodes"):
assert len(set(metrics_results)) == 1, f"Metric {metric_name} values aren't same in all nodes"
assert len(metrics_results) == len(cluster.cluster_nodes), "Metrics are not available in some nodes"
with reporter.step("Tick epoch"):
self.tick_epoch(wait_block=2)
with reporter.step("Check that metric value increase"):
futures = parallel(self.get_metrics_search_by_greps_parallel, cluster.cluster_nodes, command=metric_name)
new_metrics_results = [future.result() for future in futures if future.result() is not None]
assert len(set(new_metrics_results)) == 1, f"Metric {metric_name} values aren't same in all nodes"
assert new_metrics_results[0] > metrics_results[0], "Metric value doesn't increased"

View file

@ -1,114 +0,0 @@
import re
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.steps.cli.object import delete_object, put_object_to_random_node
from frostfs_testlib.steps.metrics import check_metrics_counter, get_metrics_value
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils.file_utils import generate_file
from ...helpers.container_request import PUBLIC_WITH_POLICY, requires_container
@pytest.mark.order(-13)
@pytest.mark.nightly
@pytest.mark.metrics
class TestGarbageCollectorMetrics(ClusterTestBase):
@wait_for_success(interval=10)
def check_metrics_in_node(self, cluster_node: ClusterNode, counter_exp: int, **metrics_greps: str):
counter_act = 0
try:
metric_result = cluster_node.metrics.storage.get_metrics_search_by_greps(**metrics_greps)
counter_act += self.calc_metrics_count_from_stdout(metric_result.stdout)
except RuntimeError as e:
...
assert counter_act == counter_exp, f"Expected: {counter_exp}, Actual: {counter_act} in node: {cluster_node}"
@staticmethod
def calc_metrics_count_from_stdout(metric_result_stdout: str):
result = re.findall(r"}\s(\d+)", metric_result_stdout)
return sum(map(int, result))
@allure.title("Garbage collector expire_at object")
@requires_container(PUBLIC_WITH_POLICY("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", short_name="REP 2"))
def test_garbage_collector_metrics_expire_at_object(
self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster, container: str
):
file_path = generate_file(simple_object_size.value)
metrics_step = 1
with reporter.step("Get current garbage collector metrics for each nodes"):
metrics_counter = {}
for node in cluster.cluster_nodes:
metrics_counter[node] = get_metrics_value(node, command="frostfs_node_garbage_collector_marked_for_removal_objects_total")
with reporter.step("Put object to random node with expire_at"):
current_epoch = self.get_epoch()
oid = put_object_to_random_node(
default_wallet,
file_path,
container,
self.shell,
cluster,
expire_at=current_epoch + 1,
)
with reporter.step("Get object nodes"):
object_storage_nodes = get_nodes_with_object(container, oid, self.shell, cluster.storage_nodes)
object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes]
with reporter.step("Tick Epoch"):
self.tick_epochs(epochs_to_tick=2, wait_block=2)
with reporter.step(f"Check garbage collector metrics 'the counter should increase by {metrics_step}' in object nodes"):
for node in object_nodes:
metrics_counter[node] += metrics_step
for node, counter in metrics_counter.items():
check_metrics_counter(
[node],
counter_exp=counter,
command="frostfs_node_garbage_collector_marked_for_removal_objects_total",
)
@allure.title("Garbage collector delete object")
@requires_container(PUBLIC_WITH_POLICY("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", short_name="REP 2"))
def test_garbage_collector_metrics_deleted_objects(
self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster, container: str
):
file_path = generate_file(simple_object_size.value)
metrics_step = 1
with reporter.step("Get current garbage collector metrics for each nodes"):
metrics_counter = {}
for node in cluster.cluster_nodes:
metrics_counter[node] = get_metrics_value(node, command="frostfs_node_garbage_collector_deleted_objects_total")
with reporter.step("Put object to random node"):
oid = put_object_to_random_node(
default_wallet,
file_path,
container,
self.shell,
cluster,
)
with reporter.step("Get object nodes"):
object_storage_nodes = get_nodes_with_object(container, oid, self.shell, cluster.storage_nodes)
object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes]
with reporter.step("Delete file, wait until gc remove object"):
delete_object(default_wallet, container, oid, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step(f"Check garbage collector metrics 'the counter should increase by {metrics_step}'"):
for node in object_nodes:
exp_metrics_counter = metrics_counter[node] + metrics_step
check_metrics_counter(
[node], counter_exp=exp_metrics_counter, command="frostfs_node_garbage_collector_deleted_objects_total"
)

View file

@ -1,219 +0,0 @@
import random
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.healthcheck.interfaces import Healthcheck
from frostfs_testlib.steps.cli.container import create_container, get_container, list_containers
from frostfs_testlib.steps.cli.object import get_object, head_object, put_object, search_object
from frostfs_testlib.steps.cli.tree import get_tree_list
from frostfs_testlib.steps.metrics import check_metrics_counter, get_metrics_value
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
@pytest.mark.order(-11)
@pytest.mark.nightly
@pytest.mark.metrics
class TestGRPCMetrics(ClusterTestBase):
@pytest.fixture
def disable_policer(self, cluster_state_controller: ClusterStateController):
config_manager = cluster_state_controller.manager(ConfigStateManager)
config_manager.set_on_all_nodes(StorageNode, {"policer:unsafe_disable": "true"})
yield
cluster_state_controller.manager(ConfigStateManager).revert_all()
@allure.title("GRPC metrics container operations")
def test_grpc_metrics_container_operations(self, default_wallet: WalletInfo, cluster: Cluster):
operations_count = 10
placement_policy = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
with reporter.step("Select random node"):
node = random.choice(cluster.cluster_nodes)
with reporter.step("Get current gRPC metrics for method 'Put'"):
metrics_counter_put = get_metrics_value(node, command="grpc_server_handled_total", service="ContainerService", method="Put")
with reporter.step(f"Create container with policy {placement_policy}"):
cid = create_container(default_wallet, self.shell, node.storage_node.get_rpc_endpoint(), placement_policy)
with reporter.step(f"Check gRPC metrics method 'Put', 'the counter should increase by 1'"):
metrics_counter_put += 1
check_metrics_counter(
[node],
counter_exp=metrics_counter_put,
command="grpc_server_handled_total",
service="ContainerService",
method="Put",
)
with reporter.step("Get current gRPC metrics for method 'Get'"):
metrics_counter_get = get_metrics_value(node, command="grpc_server_handled_total", service="ContainerService", method="Get")
with reporter.step(f"Get container"):
for _ in range(operations_count):
get_container(default_wallet, cid, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step(f"Check gRPC metrics method=Get, 'the counter should increase by 1'"):
metrics_counter_get += operations_count
check_metrics_counter(
[node],
operator=">=",
counter_exp=metrics_counter_get,
command="grpc_server_handled_total",
service="ContainerService",
method="Get",
)
with reporter.step("Get current gRPC metrics for method 'List'"):
metrics_counter_list = get_metrics_value(node, command="grpc_server_handled_total", service="ContainerService", method="List")
with reporter.step(f"Get container list"):
for _ in range(operations_count):
list_containers(default_wallet, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step(f"Check gRPC metrics method=List, 'the counter should increase by 1'"):
metrics_counter_list += operations_count
check_metrics_counter(
[node],
operator=">=",
counter_exp=metrics_counter_list,
command="grpc_server_handled_total",
service="ContainerService",
method="List",
)
@allure.title("GRPC metrics object operations")
def test_grpc_metrics_object_operations(
self, simple_object_size: ObjectSize, default_wallet: WalletInfo, cluster: Cluster, container: str, disable_policer
):
operations_count = 10
file_path = generate_file(simple_object_size.value)
with reporter.step("Select random node"):
node = random.choice(cluster.cluster_nodes)
with reporter.step("Get current gRPC metrics for method 'Put'"):
metrics_counter_put = get_metrics_value(node, command="grpc_server_handled_total", service="ObjectService", method="Put")
with reporter.step("Put object to selected node"):
for _ in range(operations_count):
oid = put_object(default_wallet, file_path, container, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step(f"Check gRPC metrics method 'Put', 'the counter should increase by {operations_count}'"):
metrics_counter_put += operations_count
check_metrics_counter(
[node],
operator=">=",
counter_exp=metrics_counter_put,
command="grpc_server_handled_total",
service="ObjectService",
method="Put",
)
with reporter.step("Get current gRPC metrics for method 'Get'"):
metrics_counter_get = get_metrics_value(node, command="grpc_server_handled_total", service="ObjectService", method="Get")
with reporter.step(f"Get object"):
for _ in range(operations_count):
get_object(default_wallet, container, oid, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step(f"Check gRPC metrics method=Get, 'the counter should increase by {operations_count}'"):
metrics_counter_get += operations_count
check_metrics_counter(
[node],
operator=">=",
counter_exp=metrics_counter_get,
command="grpc_server_handled_total",
service="ObjectService",
method="Get",
)
with reporter.step("Get current gRPC metrics for method 'Search'"):
metrics_counter_search = get_metrics_value(node, command="grpc_server_handled_total", service="ObjectService", method="Search")
with reporter.step(f"Search object"):
for _ in range(operations_count):
search_object(default_wallet, container, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step(f"Check gRPC metrics method=Search, 'the counter should increase by {operations_count}'"):
metrics_counter_search += operations_count
check_metrics_counter(
[node],
operator=">=",
counter_exp=metrics_counter_search,
command="grpc_server_handled_total",
service="ObjectService",
method="Search",
)
with reporter.step("Get current gRPC metrics for method 'Head'"):
metrics_counter_head = get_metrics_value(node, command="grpc_server_handled_total", service="ObjectService", method="Head")
with reporter.step(f"Head object"):
for _ in range(operations_count):
head_object(default_wallet, container, oid, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step(f"Check gRPC metrics method=Head, 'the counter should increase by {operations_count}'"):
metrics_counter_head += operations_count
check_metrics_counter(
[node],
operator=">=",
counter_exp=metrics_counter_head,
command="grpc_server_handled_total",
service="ObjectService",
method="Head",
)
@allure.title("GRPC metrics Tree healthcheck")
def test_grpc_metrics_tree_service(self, cluster: Cluster, healthcheck: Healthcheck):
with reporter.step("Select random node"):
node = random.choice(cluster.cluster_nodes)
with reporter.step("Get current gRPC metrics for Healthcheck"):
metrics_counter = get_metrics_value(node, command="grpc_server_handled_total", service="TreeService", method="Healthcheck")
with reporter.step("Query Tree healthcheck status"):
healthcheck.tree_healthcheck(node)
with reporter.step(f"Check gRPC metrics for Healthcheck, 'the counter should increase'"):
check_metrics_counter(
[node],
">",
metrics_counter,
command="grpc_server_handled_total",
service="TreeService",
method="Healthcheck",
)
@allure.title("GRPC metrics Tree list")
def test_grpc_metrics_tree_list(self, default_wallet: WalletInfo, cluster: Cluster):
placement_policy = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
with reporter.step("Select random node"):
node = random.choice(cluster.cluster_nodes)
with reporter.step(f"Create container with policy {placement_policy}"):
cid = create_container(default_wallet, self.shell, node.storage_node.get_rpc_endpoint(), placement_policy)
with reporter.step("Get current gRPC metrics for Tree List"):
metrics_counter = get_metrics_value(node, command="grpc_server_handled_total", service="TreeService", method="TreeList")
with reporter.step("Query Tree List"):
get_tree_list(default_wallet, cid, self.shell, node.storage_node.get_rpc_endpoint())
with reporter.step(f"Check gRPC metrics for Tree List, 'the counter should increase by 1'"):
metrics_counter += 1
check_metrics_counter(
[node],
counter_exp=metrics_counter,
command="grpc_server_handled_total",
service="TreeService",
method="TreeList",
)

View file

@ -1,72 +0,0 @@
import random
import re
from datetime import datetime, timezone
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.steps.metrics import get_metrics_value
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import wait_for_success
@pytest.mark.order(-14)
@pytest.mark.nightly
@pytest.mark.metrics
class TestLogsMetrics(ClusterTestBase):
@pytest.fixture
def revert_all(self, cluster_state_controller: ClusterStateController):
yield
cluster_state_controller.manager(ConfigStateManager).revert_all()
def restart_storage_service(self, cluster_state_controller: ClusterStateController) -> datetime:
config_manager = cluster_state_controller.manager(ConfigStateManager)
config_manager.csc.stop_services_of_type(StorageNode)
restart_time = datetime.now(timezone.utc)
config_manager.csc.start_services_of_type(StorageNode)
return restart_time
@wait_for_success(max_wait_time=300, interval=30)
def check_metrics_in_node(self, cluster_node: ClusterNode, restart_time: datetime, log_priority: str = None, **metrics_greps):
current_time = datetime.now(timezone.utc)
counter_metrics = get_metrics_value(cluster_node, **metrics_greps)
counter_logs = self.get_count_logs_by_level(cluster_node, metrics_greps.get("level"), restart_time, current_time, log_priority)
assert counter_logs == pytest.approx(
counter_metrics, rel=0.05
), f"counter_logs: {counter_logs}, counter_metrics: {counter_metrics} in node: {cluster_node}"
@staticmethod
def get_count_logs_by_level(cluster_node: ClusterNode, log_level: str, after_time: datetime, until_time: datetime, log_priority: str):
count_logs = 0
try:
logs = cluster_node.host.get_filtered_logs(
log_level, unit="frostfs-storage", since=after_time, until=until_time, priority=log_priority
)
result = re.findall(rf":\s+{log_level}\s+", logs)
count_logs += len(result)
except RuntimeError as e:
...
return count_logs
@allure.title("Metrics for the log counter")
def test_log_counter_metrics(self, cluster_state_controller: ClusterStateController, revert_all):
restart_time = self.restart_storage_service(cluster_state_controller)
with reporter.step("Select random node"):
node = random.choice(self.cluster.cluster_nodes)
with reporter.step(f"Check metrics count logs with level 'info'"):
self.check_metrics_in_node(
node,
restart_time,
log_priority="6..6",
command="frostfs_node_logger_entry_count",
level="info",
dropped="false",
)
with reporter.step(f"Check metrics count logs with level 'error'"):
self.check_metrics_in_node(node, restart_time, command="frostfs_node_logger_entry_count", level="error", dropped="false")

View file

@ -1,350 +0,0 @@
import random
import re
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.clients.s3.interfaces import BucketContainerResolver, S3ClientWrapper
from frostfs_testlib.steps import s3_helper
from frostfs_testlib.steps.cli.container import delete_container, search_nodes_with_container
from frostfs_testlib.steps.cli.object import delete_object, lock_object, put_object, put_object_to_random_node
from frostfs_testlib.steps.metrics import check_metrics_counter, get_metrics_value
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import TestFile, generate_file, split_file
from ...helpers.container_request import PUBLIC_WITH_POLICY, ContainerRequest, requires_container
@pytest.mark.order(-16)
@pytest.mark.nightly
@pytest.mark.metrics
class TestObjectMetrics(ClusterTestBase):
@allure.title("Object metrics of removed container (obj_size={object_size})")
@requires_container(PUBLIC_WITH_POLICY("REP 2 IN X CBF 2 SELECT 2 FROM * AS X"))
def test_object_metrics_removed_container(self, default_wallet: WalletInfo, cluster: Cluster, container: str, test_file: TestFile):
with reporter.step("Put object to random node"):
oid = put_object_to_random_node(default_wallet, test_file.path, container, self.shell, cluster)
with reporter.step("Check metric appears in node where the object is located"):
object_storage_nodes = get_nodes_with_object(container, oid, self.shell, cluster.storage_nodes)
object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes]
check_metrics_counter(
object_nodes,
counter_exp=2,
command="frostfs_node_engine_container_objects_total",
cid=container,
type="user",
)
with reporter.step("Delete container"):
delete_container(default_wallet, container, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
with reporter.step("Tick Epoch"):
self.tick_epochs(epochs_to_tick=2, wait_block=2)
with reporter.step("Check metrics of removed containers doesn't appear in the storage node"):
check_metrics_counter(
object_nodes, counter_exp=0, command="frostfs_node_engine_container_objects_total", cid=container, type="user"
)
check_metrics_counter(object_nodes, counter_exp=0, command="frostfs_node_engine_container_size_byte", cid=container)
with reporter.step("Check removed containers shouldn't appear in the storage node"):
for node in object_nodes:
try:
all_metrics = node.metrics.storage.get_metrics_search_by_greps(command="frostfs_node_engine_container_size_byte")
except:
continue
assert container not in all_metrics.stdout, "metrics of removed containers shouldn't appear in the storage node"
@allure.title("Object metrics, locked object (obj_size={object_size}, policy={container_request})")
@requires_container(
[
PUBLIC_WITH_POLICY("REP 1 IN X CBF 1 SELECT 1 FROM * AS X", short_name="REP 1"),
PUBLIC_WITH_POLICY("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", short_name="REP 2"),
]
)
def test_object_metrics_blocked_object(
self, default_wallet: WalletInfo, cluster: Cluster, container: str, container_request: ContainerRequest, test_file: TestFile
):
metric_step = int(re.search(r"REP\s(\d+)", container_request.policy).group(1))
with reporter.step("Search container nodes"):
container_nodes = search_nodes_with_container(
wallet=default_wallet,
cid=container,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
cluster=cluster,
)
with reporter.step("Get current metrics for metric_type=user"):
objects_metric_counter = 0
for node in container_nodes:
objects_metric_counter += get_metrics_value(node, command="frostfs_node_engine_objects_total", type="user")
with reporter.step("Put object to container node"):
oid = put_object(default_wallet, test_file.path, container, self.shell, container_nodes[0].storage_node.get_rpc_endpoint())
with reporter.step(f"Check metric user 'the counter should increase by {metric_step}'"):
objects_metric_counter += metric_step
check_metrics_counter(
container_nodes,
operator=">=",
counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total",
type="user",
)
check_metrics_counter(
container_nodes,
counter_exp=metric_step,
command="frostfs_node_engine_container_objects_total",
cid=container,
type="user",
)
with reporter.step("Delete object"):
delete_object(default_wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
with reporter.step(f"Check metric user 'the counter should decrease by {metric_step}'"):
objects_metric_counter -= metric_step
check_metrics_counter(
container_nodes,
operator=">=",
counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total",
type="user",
)
check_metrics_counter(
container_nodes,
counter_exp=0,
command="frostfs_node_engine_container_objects_total",
cid=container,
type="user",
)
with reporter.step("Put object and lock it to next epoch"):
oid = put_object(default_wallet, test_file.path, container, self.shell, container_nodes[0].storage_node.get_rpc_endpoint())
current_epoch = self.get_epoch()
lock_object(
default_wallet,
container,
oid,
self.shell,
container_nodes[0].storage_node.get_rpc_endpoint(),
expire_at=current_epoch + 1,
)
with reporter.step(f"Check metric user 'the counter should increase by {metric_step}'"):
objects_metric_counter += metric_step
check_metrics_counter(
container_nodes,
operator=">=",
counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total",
type="user",
)
check_metrics_counter(
container_nodes,
counter_exp=metric_step,
command="frostfs_node_engine_container_objects_total",
cid=container,
type="user",
)
with reporter.step(f"Wait until remove locking 'the counter doesn't change'"):
self.tick_epochs(epochs_to_tick=2)
check_metrics_counter(
container_nodes,
operator="<=",
counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total",
type="user",
)
with reporter.step("Delete object"):
delete_object(default_wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
with reporter.step(f"Check metric user 'the counter should decrease by {metric_step}'"):
objects_metric_counter -= metric_step
check_metrics_counter(
container_nodes,
operator=">=",
counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total",
type="user",
)
check_metrics_counter(
container_nodes,
counter_exp=0,
command="frostfs_node_engine_container_objects_total",
cid=container,
type="user",
)
with reporter.step("Put object with expire_at"):
current_epoch = self.get_epoch()
oid = put_object(
default_wallet,
test_file.path,
container,
self.shell,
container_nodes[0].storage_node.get_rpc_endpoint(),
expire_at=current_epoch + 1,
)
with reporter.step(f"Check metric user 'the counter should increase by {metric_step}'"):
objects_metric_counter += metric_step
check_metrics_counter(
container_nodes,
operator=">=",
counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total",
type="user",
)
check_metrics_counter(
container_nodes,
counter_exp=metric_step,
command="frostfs_node_engine_container_objects_total",
cid=container,
type="user",
)
with reporter.step("Tick Epoch"):
self.tick_epochs(epochs_to_tick=2)
with reporter.step(f"Check metric user 'the counter should decrease by {metric_step}'"):
objects_metric_counter -= metric_step
check_metrics_counter(
container_nodes,
operator=">=",
counter_exp=objects_metric_counter,
command="frostfs_node_engine_objects_total",
type="user",
)
check_metrics_counter(
container_nodes,
counter_exp=0,
command="frostfs_node_engine_container_objects_total",
cid=container,
type="user",
)
@allure.title("Object metrics, stop the node (obj_size={object_size})")
@requires_container(PUBLIC_WITH_POLICY("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", short_name="REP 2"))
def test_object_metrics_stop_node(
self,
default_wallet: WalletInfo,
cluster_state_controller: ClusterStateController,
container: str,
test_file: TestFile,
):
copies = 2
with reporter.step(f"Check object metrics in container 'should be zero'"):
check_metrics_counter(
self.cluster.cluster_nodes,
counter_exp=0,
command="frostfs_node_engine_container_objects_total",
type="user",
cid=container,
)
with reporter.step("Get current metrics for each nodes"):
objects_metric_counter: dict[ClusterNode:int] = {}
for node in self.cluster.cluster_nodes:
objects_metric_counter[node] = get_metrics_value(node, command="frostfs_node_engine_objects_total", type="user")
with reporter.step("Put object"):
oid = put_object(default_wallet, test_file.path, container, self.shell, self.cluster.default_rpc_endpoint)
with reporter.step("Get object nodes"):
object_storage_nodes = get_nodes_with_object(container, oid, self.shell, self.cluster.storage_nodes)
object_nodes = [
cluster_node for cluster_node in self.cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes
]
with reporter.step(f"Check metrics in object nodes 'the counter should increase by {copies}'"):
counter_exp = sum(objects_metric_counter[node] for node in object_nodes) + copies
check_metrics_counter(object_nodes, counter_exp=counter_exp, command="frostfs_node_engine_objects_total", type="user")
check_metrics_counter(
object_nodes,
counter_exp=copies,
command="frostfs_node_engine_container_objects_total",
type="user",
cid=container,
)
with reporter.step(f"Select node to stop"):
node_to_stop = random.choice(object_nodes)
alive_nodes = set(object_nodes).difference({node_to_stop})
with reporter.step(f"Stop the node, wait until the object is replicated to another node"):
cluster_state_controller.stop_node_host(node_to_stop, "hard")
objects_metric_counter[node_to_stop] += 1
with reporter.step(f"Check metric in alive nodes 'the counter should increase'"):
counter_exp = sum(objects_metric_counter[node] for node in alive_nodes)
check_metrics_counter(alive_nodes, ">=", counter_exp, command="frostfs_node_engine_objects_total", type="user")
with reporter.step("Start node"):
cluster_state_controller.start_node_host(node_to_stop)
with reporter.step(f"Check metric in restarted node, 'the counter doesn't change'"):
check_metrics_counter(
object_nodes,
counter_exp=copies,
command="frostfs_node_engine_container_objects_total",
type="user",
cid=container,
)
@allure.title("Multipart object metrics, PUT over S3, check part counts (s3_client={s3_client}, parts_count={parts_count})")
@pytest.mark.parametrize("parts_count", [4, 5, 10])
def test_multipart_object_metrics_check_parts_count(
self, s3_client: S3ClientWrapper, bucket_container_resolver: BucketContainerResolver, parts_count: int
):
parts = []
object_size_10_mb = 10 * 1024 * 1024
original_size = object_size_10_mb * parts_count
with reporter.step("Create public container"):
bucket = s3_client.create_bucket()
with reporter.step("Generate original object and split it into parts"):
original_file = generate_file(original_size)
file_parts = split_file(original_file, parts_count)
object_key = s3_helper.object_key_from_file_path(original_file)
with reporter.step("Create multipart and upload parts"):
upload_id = s3_client.create_multipart_upload(bucket, object_key)
for part_id, file_path in enumerate(file_parts, start=1):
etag = s3_client.upload_part(bucket, object_key, upload_id, part_id, file_path)
parts.append((part_id, etag))
with reporter.step("Check all parts are visible in bucket"):
got_parts = s3_client.list_parts(bucket, object_key, upload_id)
assert len(got_parts) == len(file_parts), f"Expected {parts_count} parts, got:\n{got_parts}"
with reporter.step("Complete multipart upload"):
s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts)
with reporter.step(f"Get related container_id for bucket"):
cid = bucket_container_resolver.resolve(self.cluster.cluster_nodes[0], bucket)
with reporter.step(f"Check metric count object should be equal count parts of multipart object"):
# plus 1, because creating an additional object when calling CompleteMultipart
# multiply by 2 because default location constraint is REP 2
expected_user_metric = (parts_count + 1) * 2
check_metrics_counter(
self.cluster.cluster_nodes,
counter_exp=expected_user_metric,
command="frostfs_node_engine_container_objects_total",
type="user",
cid=cid,
)

View file

@ -1,195 +0,0 @@
import random
import re
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
from frostfs_testlib.steps.cli.object import get_object, put_object
from frostfs_testlib.steps.metrics import check_metrics_counter
from frostfs_testlib.steps.node_management import node_shard_list, node_shard_set_mode
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.controllers import ShardsWatcher
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing import parallel, wait_for_success
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
from ...helpers.container_request import PUBLIC_WITH_POLICY, requires_container
@pytest.mark.order(-15)
@pytest.mark.nightly
@pytest.mark.metrics
class TestShardMetrics(ClusterTestBase):
@pytest.fixture()
@allure.title("Get two shards for set mode")
def two_shards_and_node(self, cluster: Cluster) -> tuple[str, str, ClusterNode]:
node = random.choice(cluster.cluster_nodes)
shards = node_shard_list(node.storage_node)
two_shards = random.sample(shards, k=2)
yield two_shards[0], two_shards[1], node
for shard in two_shards:
node_shard_set_mode(node.storage_node, shard, "read-write")
node_shard_list(node.storage_node)
@pytest.fixture()
@allure.title("Revert all shards mode")
def revert_all_shards_mode(self):
yield
parallel(self.set_shard_rw_mode, self.cluster.cluster_nodes)
def set_shard_rw_mode(self, node: ClusterNode):
watcher = ShardsWatcher(node)
shards = watcher.get_shards()
for shard in shards:
watcher.set_shard_mode(shard["shard_id"], mode="read-write")
watcher.await_for_all_shards_status(status="read-write")
@staticmethod
def get_error_count_from_logs(cluster_node: ClusterNode, object_path: str, object_name: str):
error_count = 0
try:
logs = cluster_node.host.get_filtered_logs("error count", unit="frostfs-storage")
# search error logs for current object
for error_line in logs.split("\n"):
if object_path in error_line and object_name in error_line:
result = re.findall(r'"error\scount":\s(\d+)', error_line)
error_count += sum(map(int, result))
except RuntimeError as e:
...
return error_count
@staticmethod
@wait_for_success(180, 30)
def get_object_path_and_name_file(oid: str, cid: str, node: ClusterNode) -> tuple[str, str]:
oid_path = f"{oid[0]}/{oid[1]}/{oid[2]}/{oid[3]}"
object_path = None
with reporter.step("Search object file"):
node_shell = node.storage_node.host.get_shell()
data_path = node.storage_node.get_data_directory()
all_datas = node_shell.exec(f"ls -la {data_path}/data | awk '{{ print $9 }}'").stdout.strip()
for data_dir in all_datas.replace(".", "").strip().split("\n"):
check_dir = node_shell.exec(f" [ -d {data_path}/data/{data_dir}/data/{oid_path} ] && echo 1 || echo 0").stdout
if "1" in check_dir:
object_path = f"{data_path}/data/{data_dir}/data/{oid_path}"
object_name = f"{oid[4:]}.{cid}"
break
assert object_path is not None, f"{oid} object not found in directory - {data_path}/data"
return object_path, object_name
@allure.title("Metric for shard mode")
def test_shard_metrics_set_mode(self, two_shards_and_node: tuple[str, str, ClusterNode]):
metrics_counter = 1
metric_name_blobstore = "frostfs_node_blobstore_mode"
shard1, shard2, node = two_shards_and_node
with reporter.step("Shard1 set to mode 'read-only'"):
node_shard_set_mode(node.storage_node, shard1, "read-only")
with reporter.step(f"Check shard metrics, 'the mode will change to 'READ_ONLY'"):
check_metrics_counter(
[node],
counter_exp=metrics_counter,
command="frostfs_node_engine_mode_info",
mode="READ_ONLY",
shard_id=shard1,
)
with reporter.step(f"Check {metric_name_blobstore} metrics, 'the mode will change to 'READ_ONLY'"):
check_metrics_counter(
[node],
counter_exp=metrics_counter,
command=metric_name_blobstore,
mode="READ_ONLY",
shard_id=shard1,
)
with reporter.step("Shard2 set to mode 'degraded-read-only'"):
node_shard_set_mode(node.storage_node, shard2, "degraded-read-only")
with reporter.step(f"Check shard metrics, 'the mode will change to 'DEGRADED_READ_ONLY'"):
check_metrics_counter(
[node],
counter_exp=metrics_counter,
command="frostfs_node_engine_mode_info",
mode="DEGRADED_READ_ONLY",
shard_id=shard2,
)
with reporter.step(f"Check {metric_name_blobstore} metrics, 'the mode will save 'READ_ONLY'"):
check_metrics_counter(
[node],
counter_exp=metrics_counter,
command=metric_name_blobstore,
mode="READ_ONLY",
shard_id=shard2,
)
with reporter.step("Both shards set to mode 'read-write'"):
for shard in [shard1, shard2]:
node_shard_set_mode(node.storage_node, shard, "read-write")
with reporter.step(f"Check shard metrics, 'the mode will change to 'READ_WRITE'"):
for shard in [shard1, shard2]:
check_metrics_counter(
[node],
counter_exp=metrics_counter,
command="frostfs_node_engine_mode_info",
mode="READ_WRITE",
shard_id=shard,
)
with reporter.step(f"Check {metric_name_blobstore} metrics, 'the mode will change to 'READ_WRITE'"):
for shard in [shard1, shard2]:
check_metrics_counter(
[node],
counter_exp=metrics_counter,
command=metric_name_blobstore,
mode="READ_WRITE",
shard_id=shard,
)
@allure.title("Metric for error count on shard")
@requires_container(PUBLIC_WITH_POLICY("REP 1 CBF 1", short_name="REP 1"))
def test_shard_metrics_error_count(
self, max_object_size: int, default_wallet: WalletInfo, cluster: Cluster, container: str, revert_all_shards_mode
):
file_path = generate_file(round(max_object_size * 0.8))
with reporter.step("Put object"):
oid = put_object(default_wallet, file_path, container, self.shell, cluster.default_rpc_endpoint)
with reporter.step("Get object nodes"):
object_storage_nodes = get_nodes_with_object(container, oid, self.shell, cluster.storage_nodes)
object_nodes = [cluster_node for cluster_node in cluster.cluster_nodes if cluster_node.storage_node in object_storage_nodes]
node = random.choice(object_nodes)
with reporter.step("Search object in system."):
object_path, object_name = self.get_object_path_and_name_file(oid, container, node)
with reporter.step("Block read file"):
node.host.get_shell().exec(f"chmod a-r {object_path}/{object_name}")
with reporter.step("Get object, expect error"):
with pytest.raises(RuntimeError, match=OBJECT_NOT_FOUND):
get_object(
wallet=default_wallet,
cid=container,
oid=oid,
shell=self.shell,
endpoint=node.storage_node.get_rpc_endpoint(),
)
with reporter.step(f"Get shard error count from logs"):
counter = self.get_error_count_from_logs(node, object_path, object_name)
with reporter.step(f"Check shard error metrics"):
check_metrics_counter([node], counter_exp=counter, command="frostfs_node_engine_errors_total")

View file

@ -0,0 +1,528 @@
import logging
from random import choice
from time import sleep
from typing import Optional
import allure
import pytest
from common import (
COMPLEX_OBJ_SIZE,
MORPH_BLOCK_TIME,
NEOFS_CONTRACT_CACHE_TIMEOUT,
NEOFS_NETMAP_DICT,
STORAGE_RPC_ENDPOINT_1,
STORAGE_WALLET_PASS,
)
from data_formatters import get_wallet_public_key
from epoch import tick_epoch
from file_helper import generate_file
from grpc_responses import OBJECT_NOT_FOUND, error_matches_status
from neofs_testlib.hosting import Hosting
from neofs_testlib.shell import Shell
from python_keywords.container import create_container, get_container
from python_keywords.failover_utils import wait_object_replication_on_nodes
from python_keywords.neofs_verbs import delete_object, get_object, head_object, put_object
from python_keywords.node_management import (
check_node_in_map,
delete_node_data,
drop_object,
exclude_node_from_network_map,
get_locode,
get_netmap_snapshot,
include_node_to_network_map,
node_healthcheck,
node_set_status,
node_shard_list,
node_shard_set_mode,
start_nodes,
stop_nodes,
)
from storage_policy import get_nodes_with_object, get_simple_object_copies
from utility import parse_time, placement_policy_from_container, wait_for_gc_pass_on_storage_nodes
from wellknown_acl import PUBLIC_ACL
logger = logging.getLogger("NeoLogger")
check_nodes = []
@pytest.fixture
@allure.title("Create container and pick the node with data")
def create_container_and_pick_node(prepare_wallet_and_deposit, client_shell, hosting: Hosting):
wallet = prepare_wallet_and_deposit
file_path = generate_file()
placement_rule = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
cid = create_container(wallet, shell=client_shell, rule=placement_rule, basic_acl=PUBLIC_ACL)
oid = put_object(wallet, file_path, cid, shell=client_shell)
nodes = get_nodes_with_object(wallet, cid, oid, shell=client_shell)
assert len(nodes) == 1
node = nodes[0]
node_name = choice(
[node_name for node_name, params in NEOFS_NETMAP_DICT.items() if params.get("rpc") == node]
)
yield cid, node_name
shards = node_shard_list(hosting, node_name)
assert shards
for shard in shards:
node_shard_set_mode(hosting, node_name, shard, "read-write")
node_shard_list(hosting, node_name)
@pytest.fixture
def after_run_start_all_nodes(hosting: Hosting):
yield
try:
start_nodes(hosting, list(NEOFS_NETMAP_DICT.keys()))
except Exception as err:
logger.error(f"Node start fails with error:\n{err}")
@pytest.fixture
def return_nodes_after_test_run(client_shell: Shell, hosting: Hosting):
yield
return_nodes(client_shell, hosting)
@allure.step("Return node to cluster")
def return_nodes(shell: Shell, hosting: Hosting, alive_node: Optional[str] = None) -> None:
for node in list(check_nodes):
with allure.step(f"Start node {node}"):
host = hosting.get_host_by_service(node)
host.start_service(node)
with allure.step(f"Waiting status ready for node {node}"):
wait_for_node_to_be_ready(hosting, node)
# We need to wait for node to establish notifications from morph-chain
# Otherwise it will hang up when we will try to set status
sleep(parse_time(MORPH_BLOCK_TIME))
with allure.step(f"Move node {node} to online state"):
node_set_status(hosting, node, status="online", retries=2)
check_nodes.remove(node)
sleep(parse_time(MORPH_BLOCK_TIME))
for __attempt in range(3):
try:
tick_epoch()
break
except RuntimeError:
sleep(3)
check_node_in_map(node, shell=shell, alive_node=alive_node)
@allure.title("Add one node to cluster")
@pytest.mark.add_nodes
@pytest.mark.node_mgmt
def test_add_nodes(
prepare_tmp_dir,
client_shell,
prepare_wallet_and_deposit,
return_nodes_after_test_run,
hosting: Hosting,
):
wallet = prepare_wallet_and_deposit
placement_rule_3 = "REP 3 IN X CBF 1 SELECT 3 FROM * AS X"
placement_rule_4 = "REP 4 IN X CBF 1 SELECT 4 FROM * AS X"
source_file_path = generate_file()
additional_node = choice(
[
node
for node, node_config in NEOFS_NETMAP_DICT.items()
if node_config.get("rpc") != STORAGE_RPC_ENDPOINT_1
]
)
alive_node = choice([node for node in NEOFS_NETMAP_DICT if node != additional_node])
check_node_in_map(additional_node, shell=client_shell, alive_node=alive_node)
# Add node to recovery list before messing with it
check_nodes.append(additional_node)
exclude_node_from_network_map(hosting, additional_node, alive_node, shell=client_shell)
delete_node_data(hosting, additional_node)
cid = create_container(wallet, rule=placement_rule_3, basic_acl=PUBLIC_ACL, shell=client_shell)
oid = put_object(
wallet,
source_file_path,
cid,
endpoint=NEOFS_NETMAP_DICT[alive_node].get("rpc"),
shell=client_shell,
)
wait_object_replication_on_nodes(wallet, cid, oid, 3, shell=client_shell)
return_nodes(shell=client_shell, hosting=hosting, alive_node=alive_node)
with allure.step("Check data could be replicated to new node"):
random_node = choice(
[node for node in NEOFS_NETMAP_DICT if node not in (additional_node, alive_node)]
)
exclude_node_from_network_map(hosting, random_node, alive_node, shell=client_shell)
wait_object_replication_on_nodes(
wallet, cid, oid, 3, excluded_nodes=[random_node], shell=client_shell
)
include_node_to_network_map(hosting, random_node, alive_node, shell=client_shell)
wait_object_replication_on_nodes(wallet, cid, oid, 3, shell=client_shell)
with allure.step("Check container could be created with new node"):
cid = create_container(
wallet, rule=placement_rule_4, basic_acl=PUBLIC_ACL, shell=client_shell
)
oid = put_object(
wallet,
source_file_path,
cid,
endpoint=NEOFS_NETMAP_DICT[alive_node].get("rpc"),
shell=client_shell,
)
wait_object_replication_on_nodes(wallet, cid, oid, 4, shell=client_shell)
@allure.title("Control Operations with storage nodes")
@pytest.mark.node_mgmt
def test_nodes_management(prepare_tmp_dir, client_shell, hosting: Hosting):
"""
This test checks base control operations with storage nodes (healthcheck, netmap-snapshot, set-status).
"""
random_node = choice(list(NEOFS_NETMAP_DICT))
alive_node = choice([node for node in NEOFS_NETMAP_DICT if node != random_node])
# Calculate public key that identifies node in netmap
random_node_wallet_path = NEOFS_NETMAP_DICT[random_node]["wallet_path"]
random_node_netmap_key = get_wallet_public_key(random_node_wallet_path, STORAGE_WALLET_PASS)
with allure.step("Check node {random_node} is in netmap"):
snapshot = get_netmap_snapshot(node_name=alive_node, shell=client_shell)
assert random_node_netmap_key in snapshot, f"Expected node {random_node} in netmap"
with allure.step("Run health check for all storage nodes"):
for node_name in NEOFS_NETMAP_DICT.keys():
health_check = node_healthcheck(hosting, node_name)
assert health_check.health_status == "READY" and health_check.network_status == "ONLINE"
with allure.step(f"Move node {random_node} to offline state"):
node_set_status(hosting, random_node, status="offline")
sleep(parse_time(MORPH_BLOCK_TIME))
tick_epoch()
with allure.step(f"Check node {random_node} went to offline"):
health_check = node_healthcheck(hosting, random_node)
assert health_check.health_status == "READY" and health_check.network_status == "OFFLINE"
snapshot = get_netmap_snapshot(node_name=alive_node, shell=client_shell)
assert random_node_netmap_key not in snapshot, f"Expected node {random_node} not in netmap"
with allure.step(f"Check node {random_node} went to online"):
node_set_status(hosting, random_node, status="online")
sleep(parse_time(MORPH_BLOCK_TIME))
tick_epoch()
with allure.step(f"Check node {random_node} went to online"):
health_check = node_healthcheck(hosting, random_node)
assert health_check.health_status == "READY" and health_check.network_status == "ONLINE"
snapshot = get_netmap_snapshot(node_name=alive_node, shell=client_shell)
assert random_node_netmap_key in snapshot, f"Expected node {random_node} in netmap"
@pytest.mark.parametrize(
"placement_rule,expected_copies",
[
("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", 2),
("REP 2 IN X CBF 1 SELECT 2 FROM * AS X", 2),
("REP 3 IN X CBF 1 SELECT 3 FROM * AS X", 3),
("REP 1 IN X CBF 1 SELECT 1 FROM * AS X", 1),
("REP 1 IN X CBF 2 SELECT 1 FROM * AS X", 1),
("REP 4 IN X CBF 1 SELECT 4 FROM * AS X", 4),
("REP 2 IN X CBF 1 SELECT 4 FROM * AS X", 2),
],
)
@pytest.mark.node_mgmt
@allure.title("Test object copies based on placement policy")
def test_placement_policy(
prepare_wallet_and_deposit, placement_rule, expected_copies, client_shell: Shell
):
"""
This test checks object's copies based on container's placement policy.
"""
wallet = prepare_wallet_and_deposit
file_path = generate_file()
validate_object_copies(wallet, placement_rule, file_path, expected_copies, shell=client_shell)
@pytest.mark.parametrize(
"placement_rule,expected_copies,nodes",
[
("REP 4 IN X CBF 1 SELECT 4 FROM * AS X", 4, ["s01", "s02", "s03", "s04"]),
(
"REP 1 IN LOC_PLACE CBF 1 SELECT 1 FROM LOC_SW AS LOC_PLACE FILTER Country EQ Sweden AS LOC_SW",
1,
["s03"],
),
("REP 1 CBF 1 SELECT 1 FROM LOC_SPB FILTER 'UN-LOCODE' EQ 'RU LED' AS LOC_SPB", 1, ["s02"]),
(
"REP 1 IN LOC_SPB_PLACE REP 1 IN LOC_MSK_PLACE CBF 1 SELECT 1 FROM LOC_SPB AS LOC_SPB_PLACE "
"SELECT 1 FROM LOC_MSK AS LOC_MSK_PLACE "
"FILTER 'UN-LOCODE' EQ 'RU LED' AS LOC_SPB FILTER 'UN-LOCODE' EQ 'RU MOW' AS LOC_MSK",
2,
["s01", "s02"],
),
(
"REP 4 CBF 1 SELECT 4 FROM LOC_EU FILTER Continent EQ Europe AS LOC_EU",
4,
["s01", "s02", "s03", "s04"],
),
(
"REP 1 CBF 1 SELECT 1 FROM LOC_SPB "
"FILTER 'UN-LOCODE' NE 'RU MOW' AND 'UN-LOCODE' NE 'SE STO' AND 'UN-LOCODE' NE 'FI HEL' AS LOC_SPB",
1,
["s02"],
),
(
"REP 2 CBF 1 SELECT 2 FROM LOC_RU FILTER SubDivCode NE 'AB' AND SubDivCode NE '18' AS LOC_RU",
2,
["s01", "s02"],
),
(
"REP 2 CBF 1 SELECT 2 FROM LOC_RU FILTER Country EQ 'Russia' AS LOC_RU",
2,
["s01", "s02"],
),
(
"REP 2 CBF 1 SELECT 2 FROM LOC_EU FILTER Country NE 'Russia' AS LOC_EU",
2,
["s03", "s04"],
),
],
)
@pytest.mark.node_mgmt
@allure.title("Test object copies and storage nodes based on placement policy")
def test_placement_policy_with_nodes(
prepare_wallet_and_deposit, placement_rule, expected_copies, nodes, client_shell: Shell
):
"""
Based on container's placement policy check that storage nodes are piked correctly and object has
correct copies amount.
"""
wallet = prepare_wallet_and_deposit
file_path = generate_file()
cid, oid, found_nodes = validate_object_copies(
wallet, placement_rule, file_path, expected_copies, shell=client_shell
)
expected_nodes = [NEOFS_NETMAP_DICT[node_name].get("rpc") for node_name in nodes]
assert set(found_nodes) == set(
expected_nodes
), f"Expected nodes {expected_nodes}, got {found_nodes}"
@pytest.mark.parametrize(
"placement_rule,expected_copies",
[
("REP 2 IN X CBF 2 SELECT 6 FROM * AS X", 2),
],
)
@pytest.mark.node_mgmt
@allure.title("Negative cases for placement policy")
def test_placement_policy_negative(
prepare_wallet_and_deposit, placement_rule, expected_copies, client_shell: Shell
):
"""
Negative test for placement policy.
"""
wallet = prepare_wallet_and_deposit
file_path = generate_file()
with pytest.raises(RuntimeError, match=".*not enough nodes to SELECT from.*"):
validate_object_copies(
wallet, placement_rule, file_path, expected_copies, shell=client_shell
)
@pytest.mark.skip(reason="We cover this scenario in failover tests")
@pytest.mark.node_mgmt
@allure.title("NeoFS object replication on node failover")
def test_replication(prepare_wallet_and_deposit, after_run_start_all_nodes, hosting: Hosting):
"""
Test checks object replication on storage not failover and come back.
"""
wallet = prepare_wallet_and_deposit
file_path = generate_file()
expected_nodes_count = 2
cid = create_container(wallet, basic_acl=PUBLIC_ACL)
oid = put_object(wallet, file_path, cid)
nodes = get_nodes_with_object(wallet, cid, oid)
assert (
len(nodes) == expected_nodes_count
), f"Expected {expected_nodes_count} copies, got {len(nodes)}"
node_names = [name for name, config in NEOFS_NETMAP_DICT.items() if config.get("rpc") in nodes]
stopped_nodes = stop_nodes(hosting, 1, node_names)
wait_for_expected_object_copies(wallet, cid, oid)
start_nodes(hosting, stopped_nodes)
tick_epoch()
for node_name in node_names:
wait_for_node_go_online(hosting, node_name)
wait_for_expected_object_copies(wallet, cid, oid)
@pytest.mark.node_mgmt
@allure.title("NeoFS object could be dropped using control command")
def test_drop_object(prepare_wallet_and_deposit, hosting: Hosting):
"""
Test checks object could be dropped using `neofs-cli control drop-objects` command.
"""
wallet = prepare_wallet_and_deposit
file_path_simple, file_path_complex = generate_file(), generate_file(COMPLEX_OBJ_SIZE)
locode = get_locode()
rule = f"REP 1 CBF 1 SELECT 1 FROM * FILTER 'UN-LOCODE' EQ '{locode}' AS LOC"
cid = create_container(wallet, rule=rule)
oid_simple = put_object(wallet, file_path_simple, cid)
oid_complex = put_object(wallet, file_path_complex, cid)
for oid in (oid_simple, oid_complex):
get_object(wallet, cid, oid)
head_object(wallet, cid, oid)
nodes = get_nodes_with_object(wallet, cid, oid_simple)
node_name = choice(
[name for name, config in NEOFS_NETMAP_DICT.items() if config.get("rpc") in nodes]
)
for oid in (oid_simple, oid_complex):
with allure.step(f"Drop object {oid}"):
get_object(wallet, cid, oid)
head_object(wallet, cid, oid)
drop_object(hosting, node_name, cid, oid)
wait_for_obj_dropped(wallet, cid, oid, get_object)
wait_for_obj_dropped(wallet, cid, oid, head_object)
@pytest.mark.node_mgmt
@pytest.mark.skip(reason="Need to clarify scenario")
@allure.title("Control Operations with storage nodes")
def test_shards(prepare_wallet_and_deposit, create_container_and_pick_node, hosting: Hosting):
wallet = prepare_wallet_and_deposit
file_path = generate_file()
cid, node_name = create_container_and_pick_node
original_oid = put_object(wallet, file_path, cid)
# for mode in ('read-only', 'degraded'):
for mode in ("degraded",):
shards = node_shard_list(hosting, node_name)
assert shards
for shard in shards:
node_shard_set_mode(hosting, node_name, shard, mode)
shards = node_shard_list(hosting, node_name)
assert shards
with pytest.raises(RuntimeError):
put_object(wallet, file_path, cid)
with pytest.raises(RuntimeError):
delete_object(wallet, cid, original_oid)
# head_object(wallet, cid, original_oid)
get_object(wallet, cid, original_oid)
for shard in shards:
node_shard_set_mode(hosting, node_name, shard, "read-write")
shards = node_shard_list(hosting, node_name)
assert shards
oid = put_object(wallet, file_path, cid)
delete_object(wallet, cid, oid)
@allure.step("Validate object has {expected_copies} copies")
def validate_object_copies(
wallet: str, placement_rule: str, file_path: str, expected_copies: int, shell: Shell
):
cid = create_container(wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=shell)
got_policy = placement_policy_from_container(
get_container(wallet, cid, json_mode=False, shell=shell)
)
assert got_policy == placement_rule.replace(
"'", ""
), f"Expected \n{placement_rule} and got policy \n{got_policy} are the same"
oid = put_object(wallet, file_path, cid, shell=shell)
nodes = get_nodes_with_object(wallet, cid, oid, shell=shell)
assert len(nodes) == expected_copies, f"Expected {expected_copies} copies, got {len(nodes)}"
return cid, oid, nodes
@allure.step("Wait for node {node_name} goes online")
def wait_for_node_go_online(hosting: Hosting, node_name: str) -> None:
timeout, attempts = 5, 20
for _ in range(attempts):
try:
health_check = node_healthcheck(hosting, node_name)
assert health_check.health_status == "READY" and health_check.network_status == "ONLINE"
return
except Exception as err:
logger.warning(f"Node {node_name} is not online:\n{err}")
sleep(timeout)
raise AssertionError(
f"Node {node_name} hasn't gone to the READY and ONLINE state after {timeout * attempts} second"
)
@allure.step("Wait for node {node_name} is ready")
def wait_for_node_to_be_ready(hosting: Hosting, node_name: str) -> None:
timeout, attempts = 30, 6
for _ in range(attempts):
try:
health_check = node_healthcheck(hosting, node_name)
if health_check.health_status == "READY":
return
except Exception as err:
logger.warning(f"Node {node_name} is not ready:\n{err}")
sleep(timeout)
raise AssertionError(
f"Node {node_name} hasn't gone to the READY state after {timeout * attempts} seconds"
)
@allure.step("Wait for {expected_copies} object copies in the wallet")
def wait_for_expected_object_copies(
wallet: str, cid: str, oid: str, expected_copies: int = 2
) -> None:
for i in range(2):
copies = get_simple_object_copies(wallet, cid, oid)
if copies == expected_copies:
break
tick_epoch()
sleep(parse_time(NEOFS_CONTRACT_CACHE_TIMEOUT))
else:
raise AssertionError(f"There are no {expected_copies} copies during time")
@allure.step("Wait for object to be dropped")
def wait_for_obj_dropped(wallet: str, cid: str, oid: str, checker) -> None:
for _ in range(3):
try:
checker(wallet, cid, oid)
wait_for_gc_pass_on_storage_nodes()
except Exception as err:
if error_matches_status(err, OBJECT_NOT_FOUND):
return
raise AssertionError(f'Expected "{OBJECT_NOT_FOUND}" error, got\n{err}')
raise AssertionError(f"Object {oid} was not dropped from node")

725
pytest_tests/testsuites/object/test_object_api.py Executable file → Normal file
View file

@ -1,590 +1,217 @@
import logging
import random
import sys
from time import sleep
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.resources.error_patterns import (
INVALID_LENGTH_SPECIFIER,
INVALID_OFFSET_SPECIFIER,
INVALID_RANGE_OVERFLOW,
INVALID_RANGE_ZERO_LENGTH,
OBJECT_ALREADY_REMOVED,
OUT_OF_RANGE,
)
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import DEFAULT_EC_PLACEMENT_RULE, DEFAULT_PLACEMENT_RULE, search_nodes_with_container
from frostfs_testlib.steps.cli.object import (
get_object_from_random_node,
from common import COMPLEX_OBJ_SIZE, SIMPLE_OBJ_SIZE
from container import create_container
from epoch import get_epoch, tick_epoch
from file_helper import generate_file, get_file_content, get_file_hash
from grpc_responses import OBJECT_ALREADY_REMOVED, OBJECT_NOT_FOUND, error_matches_status
from neofs_testlib.shell import Shell
from python_keywords.neofs_verbs import (
delete_object,
get_object,
get_range,
get_range_hash,
head_object,
put_object,
put_object_to_random_node,
search_object,
)
from frostfs_testlib.steps.complex_object_actions import get_complex_object_split_ranges
from frostfs_testlib.steps.storage_object import delete_object, delete_objects
from frostfs_testlib.steps.storage_policy import get_complex_object_copies, get_simple_object_copies
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.policy import PlacementPolicy
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import TestFile, get_file_content, get_file_hash
from ...helpers.container_creation import create_container_with_ape
from ...helpers.container_request import APE_EVERYONE_ALLOW_ALL, ContainerRequest
from python_keywords.storage_policy import get_complex_object_copies, get_simple_object_copies
from tombstone import verify_head_tombstone
from utility import wait_for_gc_pass_on_storage_nodes
logger = logging.getLogger("NeoLogger")
CLEANUP_TIMEOUT = 10
COMMON_ATTRIBUTE = {"common_key": "common_value"}
COMMON_CONTAINER_RULE = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
# Will upload object for each attribute set
OBJECT_ATTRIBUTES = [
None,
{"key1": 1, "key2": "abc", "common_key": "common_value"},
{"key1": 2, "common_key": "common_value"},
]
# Config for Range tests
RANGES_COUNT = 4 # by quarters
RANGE_MIN_LEN = 10
RANGE_MAX_LEN = 500
# Used for static ranges found with issues
STATIC_RANGES = {}
def generate_ranges(storage_object: StorageObjectInfo, max_object_size: int, shell: Shell, cluster: Cluster) -> list[(int, int)]:
file_range_step = storage_object.size / RANGES_COUNT
file_ranges = []
file_ranges_to_test = []
for i in range(0, RANGES_COUNT):
file_ranges.append((int(file_range_step * i), int(file_range_step)))
# For simple object we can read all file ranges without too much time for testing
if storage_object.size < max_object_size:
file_ranges_to_test.extend(file_ranges)
# For complex object we need to fetch multiple child objects from different nodes.
else:
assert (
storage_object.size >= RANGE_MAX_LEN + max_object_size
), f"Complex object size should be at least {max_object_size + RANGE_MAX_LEN}. Current: {storage_object.size}"
file_ranges_to_test.append((RANGE_MAX_LEN, max_object_size - RANGE_MAX_LEN))
file_ranges_to_test.extend(get_complex_object_split_ranges(storage_object, shell, cluster))
# Special cases to read some bytes from start and some bytes from end of object
file_ranges_to_test.append((0, RANGE_MIN_LEN))
file_ranges_to_test.append((storage_object.size - RANGE_MIN_LEN, RANGE_MIN_LEN))
for offset, length in file_ranges:
range_length = random.randint(RANGE_MIN_LEN, RANGE_MAX_LEN)
range_start = random.randint(offset, offset + length - 1)
file_ranges_to_test.append((range_start, min(range_length, storage_object.size - range_start)))
file_ranges_to_test.extend(STATIC_RANGES.get(storage_object.size, []))
return file_ranges_to_test
@pytest.fixture(scope="module")
def common_container(
frostfs_cli: FrostfsCli,
default_wallet: WalletInfo,
client_shell: Shell,
cluster: Cluster,
rpc_endpoint: str,
) -> str:
container_request = ContainerRequest(COMMON_CONTAINER_RULE, APE_EVERYONE_ALLOW_ALL)
return create_container_with_ape(container_request, frostfs_cli, default_wallet, client_shell, cluster, rpc_endpoint)
@pytest.fixture(scope="module")
def container_nodes(default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster, common_container: str) -> list[ClusterNode]:
return search_nodes_with_container(default_wallet, common_container, client_shell, cluster.default_rpc_endpoint, cluster)
@pytest.fixture(scope="module")
def non_container_nodes(cluster: Cluster, container_nodes: list[ClusterNode]) -> list[ClusterNode]:
return list(set(cluster.cluster_nodes) - set(container_nodes))
@pytest.fixture(
# Scope session to upload/delete each files set only once
scope="module"
)
def storage_objects(
default_wallet: WalletInfo,
client_shell: Shell,
cluster: Cluster,
object_size: ObjectSize,
test_file_module: TestFile,
frostfs_cli: FrostfsCli,
placement_policy: PlacementPolicy,
rpc_endpoint: str,
) -> list[StorageObjectInfo]:
cid = create_container_with_ape(
ContainerRequest(placement_policy.value, APE_EVERYONE_ALLOW_ALL), frostfs_cli, default_wallet, client_shell, cluster, rpc_endpoint
)
with reporter.step("Generate file"):
file_hash = get_file_hash(test_file_module.path)
storage_objects = []
with reporter.step("Put objects"):
# We need to upload objects multiple times with different attributes
for attributes in OBJECT_ATTRIBUTES:
storage_object_id = put_object_to_random_node(
default_wallet,
test_file_module.path,
cid,
client_shell,
cluster,
attributes=attributes,
)
storage_object = StorageObjectInfo(cid, storage_object_id)
storage_object.size = object_size.value
storage_object.wallet = default_wallet
storage_object.file_path = test_file_module.path
storage_object.file_hash = file_hash
storage_object.attributes = attributes
storage_objects.append(storage_object)
yield storage_objects
# Teardown after all tests done with current param
delete_objects(storage_objects, client_shell, cluster)
@pytest.fixture()
def expected_object_copies(placement_policy: PlacementPolicy) -> int:
if placement_policy.name == "rep":
return 2
return 4
@pytest.mark.nightly
@allure.title("Test native object API")
@pytest.mark.sanity
@pytest.mark.grpc_api
class TestObjectApi(ClusterTestBase):
@allure.title("Storage policy by native API (obj_size={object_size}, policy={placement_policy})")
def test_object_storage_policies(
self,
storage_objects: list[StorageObjectInfo],
simple_object_size: ObjectSize,
expected_object_copies: int,
):
"""
Validate object storage policy
"""
@pytest.mark.parametrize(
"object_size", [SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE], ids=["simple object", "complex object"]
)
def test_object_api(prepare_wallet_and_deposit, client_shell, request, object_size):
"""
Test common gRPC API for object (put/get/head/get_range_hash/get_range/search/delete).
"""
allure.dynamic.title(f"Test native object API for {request.node.callspec.id}")
with reporter.step("Validate storage policy for objects"):
for storage_object in storage_objects:
if storage_object.size == simple_object_size.value:
copies = get_simple_object_copies(
storage_object.wallet,
storage_object.cid,
storage_object.oid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
)
else:
copies = get_complex_object_copies(
storage_object.wallet,
storage_object.cid,
storage_object.oid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
)
assert copies == expected_object_copies, f"Expected {expected_object_copies} copies"
wallet = prepare_wallet_and_deposit
cid = create_container(wallet, shell=client_shell)
wallet_cid = {"wallet": wallet, "cid": cid}
file_usr_header = {"key1": 1, "key2": "abc", "common_key": "common_value"}
file_usr_header_oth = {"key1": 2, "common_key": "common_value"}
common_header = {"common_key": "common_value"}
range_len = 10
range_cut = f"0:{range_len}"
file_path = generate_file(object_size)
file_hash = get_file_hash(file_path)
@allure.title("Get object by native API (obj_size={object_size}, policy={placement_policy})")
def test_get_object_api(self, storage_objects: list[StorageObjectInfo]):
"""
Validate get object native API
"""
with allure.step("Check container is empty"):
search_object(**wallet_cid, expected_objects_list=[], shell=client_shell)
with reporter.step("Get objects and compare hashes"):
for storage_object in storage_objects:
file_path = get_object_from_random_node(
storage_object.wallet,
storage_object.cid,
storage_object.oid,
self.shell,
cluster=self.cluster,
oids = []
with allure.step("Put objects"):
oids.append(put_object(wallet=wallet, path=file_path, cid=cid, shell=client_shell))
oids.append(
put_object(
wallet=wallet,
path=file_path,
cid=cid,
shell=client_shell,
attributes=file_usr_header,
)
)
oids.append(
put_object(
wallet=wallet,
path=file_path,
cid=cid,
shell=client_shell,
attributes=file_usr_header_oth,
)
)
with allure.step("Validate storage policy for objects"):
for oid_to_check in oids:
if object_size == SIMPLE_OBJ_SIZE:
copies = get_simple_object_copies(
wallet=wallet, cid=cid, oid=oid_to_check, shell=client_shell
)
file_hash = get_file_hash(file_path)
assert storage_object.file_hash == file_hash
else:
copies = get_complex_object_copies(
wallet=wallet, cid=cid, oid=oid_to_check, shell=client_shell
)
assert copies == 2, "Expected 2 copies"
@allure.title("Head object by native API (obj_size={object_size}, policy={placement_policy})")
def test_head_object_api(self, storage_objects: list[StorageObjectInfo]):
"""
Validate head object native API
"""
with allure.step("Get objects and compare hashes"):
for oid_to_check in oids:
got_file_path = get_object(wallet=wallet, cid=cid, oid=oid_to_check, shell=client_shell)
got_file_hash = get_file_hash(got_file_path)
assert file_hash == got_file_hash
storage_object_1 = storage_objects[0]
storage_object_2 = storage_objects[1]
with allure.step("Get range/range hash"):
range_hash = get_range_hash(
**wallet_cid, oid=oids[0], shell=client_shell, range_cut=range_cut
)
assert (
get_file_hash(file_path, range_len) == range_hash
), f"Expected range hash to match {range_cut} slice of file payload"
with reporter.step("Head object and validate"):
head_object(
storage_object_1.wallet,
storage_object_1.cid,
storage_object_1.oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
head_info = head_object(
storage_object_2.wallet,
storage_object_2.cid,
storage_object_2.oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
self.check_header_is_presented(head_info, storage_object_2.attributes)
range_hash = get_range_hash(
**wallet_cid, oid=oids[1], shell=client_shell, range_cut=range_cut
)
assert (
get_file_hash(file_path, range_len) == range_hash
), f"Expected range hash to match {range_cut} slice of file payload"
@allure.title("Head deleted object with --raw arg (obj_size={object_size}, policy={container_request})")
@pytest.mark.parametrize(
"container_request",
[
ContainerRequest(DEFAULT_PLACEMENT_RULE, APE_EVERYONE_ALLOW_ALL, "rep"),
ContainerRequest(DEFAULT_EC_PLACEMENT_RULE, APE_EVERYONE_ALLOW_ALL, "ec"),
],
indirect=True,
ids=["rep", "ec"],
_, range_content = get_range(
**wallet_cid, oid=oids[1], shell=client_shell, range_cut=range_cut
)
assert (
get_file_content(file_path, content_len=range_len, mode="rb") == range_content
), f"Expected range content to match {range_cut} slice of file payload"
with allure.step("Search objects"):
search_object(**wallet_cid, shell=client_shell, expected_objects_list=oids)
search_object(
**wallet_cid,
shell=client_shell,
filters=file_usr_header,
expected_objects_list=oids[1:2],
)
search_object(
**wallet_cid,
shell=client_shell,
filters=file_usr_header_oth,
expected_objects_list=oids[2:3],
)
search_object(
**wallet_cid, shell=client_shell, filters=common_header, expected_objects_list=oids[1:3]
)
with allure.step("Head object and validate"):
head_object(**wallet_cid, oid=oids[0], shell=client_shell)
head_info = head_object(**wallet_cid, oid=oids[1], shell=client_shell)
check_header_is_presented(head_info, file_usr_header)
with allure.step("Delete objects"):
tombstone_s = delete_object(**wallet_cid, oid=oids[0], shell=client_shell)
tombstone_h = delete_object(**wallet_cid, oid=oids[1], shell=client_shell)
verify_head_tombstone(
wallet_path=wallet, cid=cid, oid_ts=tombstone_s, oid=oids[0], shell=client_shell
)
verify_head_tombstone(
wallet_path=wallet, cid=cid, oid_ts=tombstone_h, oid=oids[1], shell=client_shell
)
def test_object_head_raw(self, default_wallet: str, container: str, test_file: TestFile):
with reporter.step("Upload object"):
oid = put_object_to_random_node(default_wallet, test_file.path, container, self.shell, self.cluster)
with reporter.step("Delete object"):
delete_object(default_wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint)
tick_epoch()
sleep(CLEANUP_TIMEOUT)
with reporter.step("Call object head --raw and expect error"):
with pytest.raises(Exception, match=OBJECT_ALREADY_REMOVED):
head_object(default_wallet, container, oid, self.shell, self.cluster.default_rpc_endpoint, is_raw=True)
with allure.step("Get objects and check errors"):
get_object_and_check_error(
**wallet_cid, oid=oids[0], error_pattern=OBJECT_ALREADY_REMOVED, shell=client_shell
)
get_object_and_check_error(
**wallet_cid, oid=oids[1], error_pattern=OBJECT_ALREADY_REMOVED, shell=client_shell
)
@allure.title("Search objects by native API (obj_size={object_size}, policy={placement_policy})")
def test_search_object_api(self, storage_objects: list[StorageObjectInfo]):
"""
Validate object search by native API
"""
oids = [storage_object.oid for storage_object in storage_objects]
wallet = storage_objects[0].wallet
cid = storage_objects[0].cid
@allure.title("Test object life time")
@pytest.mark.sanity
@pytest.mark.grpc_api
@pytest.mark.parametrize(
"object_size", [SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE], ids=["simple object", "complex object"]
)
def test_object_api_lifetime(prepare_wallet_and_deposit, client_shell, request, object_size):
"""
Test object deleted after expiration epoch.
"""
wallet = prepare_wallet_and_deposit
cid = create_container(wallet, shell=client_shell)
test_table = [
(OBJECT_ATTRIBUTES[1], oids[1:2]),
(OBJECT_ATTRIBUTES[2], oids[2:3]),
(COMMON_ATTRIBUTE, oids[1:3]),
]
allure.dynamic.title(f"Test object life time for {request.node.callspec.id}")
with reporter.step("Search objects"):
# Search with no attributes
result = search_object(
wallet,
cid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
expected_objects_list=oids,
root=True,
)
assert sorted(oids) == sorted(result)
file_path = generate_file(object_size)
file_hash = get_file_hash(file_path)
epoch = get_epoch()
# search by test table
for filter, expected_oids in test_table:
result = search_object(
wallet,
cid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
filters=filter,
expected_objects_list=expected_oids,
root=True,
)
assert sorted(expected_oids) == sorted(result)
oid = put_object(wallet, file_path, cid, shell=client_shell, expire_at=epoch + 1)
got_file = get_object(wallet, cid, oid, shell=client_shell)
assert get_file_hash(got_file) == file_hash
@allure.title("Search objects with removed items (obj_size={object_size})")
@pytest.mark.exclude_sanity
def test_object_search_should_return_tombstone_items(
self,
default_wallet: WalletInfo,
container: str,
object_size: ObjectSize,
rpc_endpoint: str,
test_file: TestFile,
):
"""
Validate object search with removed items
"""
with allure.step("Tick two epochs"):
for _ in range(2):
tick_epoch()
with reporter.step("Put object to container"):
storage_object = StorageObjectInfo(
cid=container,
oid=put_object_to_random_node(default_wallet, test_file.path, container, self.shell, self.cluster),
size=object_size.value,
wallet=default_wallet,
file_path=test_file.path,
file_hash=get_file_hash(test_file.path),
)
# Wait for GC, because object with expiration is counted as alive until GC removes it
wait_for_gc_pass_on_storage_nodes()
with reporter.step("Search object"):
# Root Search object should return root object oid
result = search_object(default_wallet, container, self.shell, rpc_endpoint, root=True)
objects_before_deletion = len(result)
assert storage_object.oid in result
with allure.step("Check object deleted because it expires-on epoch"):
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_object(wallet, cid, oid, shell=client_shell)
with reporter.step("Delete object"):
delete_objects([storage_object], self.shell, self.cluster)
with reporter.step("Search deleted object with --root"):
# Root Search object should return nothing
result = search_object(default_wallet, container, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, root=True)
assert len(result) == 0
def get_object_and_check_error(
wallet: str, cid: str, oid: str, error_pattern: str, shell: Shell
) -> None:
try:
get_object(wallet=wallet, cid=cid, oid=oid, shell=shell)
raise AssertionError(f"Expected object {oid} removed, but it is not")
except Exception as err:
logger.info(f"Error is {err}")
assert error_matches_status(err, error_pattern), f"Expected {err} to match {error_pattern}"
with reporter.step("Search deleted object with --phy should return only tombstones"):
# Physical Search object should return only tombstones
result = search_object(default_wallet, container, self.shell, rpc_endpoint, phy=True)
assert storage_object.tombstone in result, "Search result should contain tombstone of removed object"
assert storage_object.oid not in result, "Search result should not contain ObjectId of removed object"
for tombstone_oid in result:
head_info = head_object(default_wallet, container, tombstone_oid, self.shell, rpc_endpoint)
object_type = head_info["header"]["objectType"]
assert object_type == "TOMBSTONE", f"Object wasn't deleted properly. Found object {tombstone_oid} with type {object_type}"
@allure.title("Get range hash by native API (obj_size={object_size}, policy={placement_policy})")
@pytest.mark.grpc_api
def test_object_get_range_hash(self, storage_objects: list[StorageObjectInfo], max_object_size):
"""
Validate get_range_hash for object by native gRPC API
"""
wallet = storage_objects[0].wallet
cid = storage_objects[0].cid
oids = [storage_object.oid for storage_object in storage_objects[:2]]
file_path = storage_objects[0].file_path
file_ranges_to_test = generate_ranges(storage_objects[0], max_object_size, self.shell, self.cluster)
logging.info(f"Ranges used in test {file_ranges_to_test}")
for range_start, range_len in file_ranges_to_test:
range_cut = f"{range_start}:{range_len}"
with reporter.step(f"Get range hash ({range_cut})"):
for oid in oids:
range_hash = get_range_hash(
wallet,
cid,
oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
range_cut=range_cut,
)
assert (
get_file_hash(file_path, range_len, range_start) == range_hash
), f"Expected range hash to match {range_cut} slice of file payload"
@allure.title("Get range by native API (obj_size={object_size}, policy={placement_policy})")
@pytest.mark.grpc_api
def test_object_get_range(self, storage_objects: list[StorageObjectInfo], max_object_size):
"""
Validate get_range for object by native gRPC API
"""
wallet = storage_objects[0].wallet
cid = storage_objects[0].cid
oids = [storage_object.oid for storage_object in storage_objects[:2]]
file_path = storage_objects[0].file_path
file_ranges_to_test = generate_ranges(storage_objects[0], max_object_size, self.shell, self.cluster)
logging.info(f"Ranges used in test {file_ranges_to_test}")
for range_start, range_len in file_ranges_to_test:
range_cut = f"{range_start}:{range_len}"
with reporter.step(f"Get range ({range_cut})"):
for oid in oids:
_, range_content = get_range(
wallet,
cid,
oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
range_cut=range_cut,
)
assert (
get_file_content(file_path, content_len=range_len, mode="rb", offset=range_start) == range_content
), f"Expected range content to match {range_cut} slice of file payload"
@allure.title("[NEGATIVE] Get invalid range by native API (obj_size={object_size}, policy={placement_policy})")
@pytest.mark.grpc_api
def test_object_get_range_negatives(
self,
storage_objects: list[StorageObjectInfo],
):
"""
Validate get_range negative for object by native gRPC API
"""
wallet = storage_objects[0].wallet
cid = storage_objects[0].cid
oids = [storage_object.oid for storage_object in storage_objects[:2]]
file_size = storage_objects[0].size
assert RANGE_MIN_LEN < file_size, f"Incorrect test setup. File size ({file_size}) is less than RANGE_MIN_LEN ({RANGE_MIN_LEN})"
file_ranges_to_test: list[tuple(int, int, str)] = [
# Offset is bigger than the file size, the length is small.
(file_size + 1, RANGE_MIN_LEN, OUT_OF_RANGE),
# Offset is ok, but offset+length is too big.
(file_size - RANGE_MIN_LEN, RANGE_MIN_LEN * 2, OUT_OF_RANGE),
# Offset is ok, and length is very-very big (e.g. MaxUint64) so that offset+length is wrapped and still "valid".
(RANGE_MIN_LEN, sys.maxsize * 2 + 1, INVALID_RANGE_OVERFLOW),
# Length is zero
(10, 0, INVALID_RANGE_ZERO_LENGTH),
# Negative values
(-1, 1, INVALID_OFFSET_SPECIFIER),
(10, -5, INVALID_LENGTH_SPECIFIER),
]
for range_start, range_len, expected_error in file_ranges_to_test:
range_cut = f"{range_start}:{range_len}"
expected_error = expected_error.format(range=range_cut) if "{range}" in expected_error else expected_error
with reporter.step(f"Get range ({range_cut})"):
for oid in oids:
with pytest.raises(Exception, match=expected_error):
get_range(
wallet,
cid,
oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
range_cut=range_cut,
)
@allure.title("[NEGATIVE] Get invalid range hash by native API (obj_size={object_size}, policy={placement_policy})")
def test_object_get_range_hash_negatives(
self,
storage_objects: list[StorageObjectInfo],
):
"""
Validate get_range_hash negative for object by native gRPC API
"""
wallet = storage_objects[0].wallet
cid = storage_objects[0].cid
oids = [storage_object.oid for storage_object in storage_objects[:2]]
file_size = storage_objects[0].size
assert RANGE_MIN_LEN < file_size, f"Incorrect test setup. File size ({file_size}) is less than RANGE_MIN_LEN ({RANGE_MIN_LEN})"
file_ranges_to_test: list[tuple(int, int, str)] = [
# Offset is bigger than the file size, the length is small.
(file_size + 1, RANGE_MIN_LEN, OUT_OF_RANGE),
# Offset is ok, but offset+length is too big.
(file_size - RANGE_MIN_LEN, RANGE_MIN_LEN * 2, OUT_OF_RANGE),
# Offset is ok, and length is very-very big (e.g. MaxUint64) so that offset+length is wrapped and still "valid".
(RANGE_MIN_LEN, sys.maxsize * 2 + 1, INVALID_RANGE_OVERFLOW),
# Length is zero
(10, 0, INVALID_RANGE_ZERO_LENGTH),
# Negative values
(-1, 1, INVALID_OFFSET_SPECIFIER),
(10, -5, INVALID_LENGTH_SPECIFIER),
]
for range_start, range_len, expected_error in file_ranges_to_test:
range_cut = f"{range_start}:{range_len}"
expected_error = expected_error.format(range=range_cut) if "{range}" in expected_error else expected_error
with reporter.step(f"Get range hash ({range_cut})"):
for oid in oids:
with pytest.raises(Exception, match=expected_error):
get_range_hash(
wallet,
cid,
oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
range_cut=range_cut,
)
@allure.title("Get range from container and non-container nodes (object_size={object_size})")
def test_get_range_from_different_node(
self,
default_wallet: str,
common_container: str,
container_nodes: list[ClusterNode],
non_container_nodes: list[ClusterNode],
file_path: str,
):
with reporter.step("Put object to container"):
container_node = random.choice(container_nodes)
oid = put_object(default_wallet, file_path, common_container, self.shell, container_node.storage_node.get_rpc_endpoint())
with reporter.step("Get range from container node endpoint"):
get_range(
default_wallet,
common_container,
oid,
"0:10",
self.shell,
container_node.storage_node.get_rpc_endpoint(),
)
with reporter.step("Get range from non-container node endpoint"):
non_container_node = random.choice(non_container_nodes)
get_range(
default_wallet,
common_container,
oid,
"0:10",
self.shell,
non_container_node.storage_node.get_rpc_endpoint(),
)
@allure.title("Get range hash from container and non-container nodes (object_size={object_size})")
def test_get_range_hash_from_different_node(
self,
default_wallet: str,
common_container: str,
container_nodes: list[ClusterNode],
non_container_nodes: list[ClusterNode],
file_path: str,
):
with reporter.step("Put object to container"):
container_node = random.choice(container_nodes)
oid = put_object(default_wallet, file_path, common_container, self.shell, container_node.storage_node.get_rpc_endpoint())
with reporter.step("Get range hash from container node endpoint"):
get_range_hash(
default_wallet,
common_container,
oid,
"0:10",
self.shell,
container_node.storage_node.get_rpc_endpoint(),
)
with reporter.step("Get range hash from non-container node endpoint"):
non_container_node = random.choice(non_container_nodes)
get_range_hash(
default_wallet,
common_container,
oid,
"0:10",
self.shell,
non_container_node.storage_node.get_rpc_endpoint(),
)
def check_header_is_presented(self, head_info: dict, object_header: dict) -> None:
for key_to_check, val_to_check in object_header.items():
assert key_to_check in head_info["header"]["attributes"], f"Key {key_to_check} is found in {head_object}"
assert head_info["header"]["attributes"].get(key_to_check) == str(val_to_check), f"Value {val_to_check} is equal"
def check_header_is_presented(head_info: dict, object_header: dict):
for key_to_check, val_to_check in object_header.items():
assert (
key_to_check in head_info["header"]["attributes"]
), f"Key {key_to_check} is found in {head_object}"
assert head_info["header"]["attributes"].get(key_to_check) == str(
val_to_check
), f"Value {val_to_check} is equal"

View file

@ -1,170 +0,0 @@
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import (
REP_2_FOR_3_NODES_PLACEMENT_RULE,
SINGLE_PLACEMENT_RULE,
StorageContainer,
StorageContainerInfo,
)
from frostfs_testlib.steps.cli.object import delete_object, get_object
from frostfs_testlib.steps.storage_object import StorageObjectInfo
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses import ape
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.storage.grpc_operations.client_wrappers import CliClientWrapper
from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises
from pytest import FixtureRequest
from ...helpers.bearer_token import create_bearer_token
from ...helpers.container_access import assert_full_access_to_container
from ...helpers.container_creation import create_container_with_ape
from ...helpers.container_request import ContainerRequest
@pytest.fixture(scope="session")
@allure.title("Create user container for bearer token usage")
def user_container(
frostfs_cli: FrostfsCli, default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster, rpc_endpoint: str, request: FixtureRequest
) -> StorageContainer:
policy = request.param if "param" in request.__dict__ else SINGLE_PLACEMENT_RULE
container_request = ContainerRequest(policy)
container_id = create_container_with_ape(container_request, frostfs_cli, default_wallet, client_shell, cluster, rpc_endpoint)
# Deliberately using s3gate wallet here to test bearer token
s3_gate_wallet = WalletInfo.from_node(cluster.s3_gates[0])
return StorageContainer(StorageContainerInfo(container_id, s3_gate_wallet), client_shell, cluster)
@pytest.fixture(scope="session")
@allure.title("Create bearer token with allowed put for container")
def bearer_token(frostfs_cli: FrostfsCli, temp_directory: str, user_container: StorageContainer, cluster: Cluster) -> str:
rule = ape.Rule(ape.Verb.ALLOW, ape.ObjectOperations.WILDCARD_ALL)
return create_bearer_token(frostfs_cli, temp_directory, user_container.get_id(), rule, cluster.default_rpc_endpoint)
@pytest.fixture(scope="session")
def grpc_client_with_other_wallet(client_shell: Shell, other_wallet: WalletInfo) -> GrpcClientWrapper:
return CliClientWrapper(FrostfsCli(client_shell, FROSTFS_CLI_EXEC, other_wallet.config_path))
@pytest.fixture()
def storage_objects(
user_container: StorageContainer,
bearer_token: str,
object_size: ObjectSize,
cluster: Cluster,
) -> list[StorageObjectInfo]:
storage_objects: list[StorageObjectInfo] = []
for node in cluster.storage_nodes:
storage_objects.append(
user_container.generate_object(
object_size.value,
bearer_token=bearer_token,
endpoint=node.get_rpc_endpoint(),
)
)
return storage_objects
@pytest.mark.nightly
@pytest.mark.bearer
@pytest.mark.ape
@pytest.mark.grpc_api
class TestObjectApiWithBearerToken(ClusterTestBase):
@allure.title("Object can be deleted from any node using s3gate wallet with bearer token (obj_size={object_size})")
@pytest.mark.parametrize(
"user_container",
[SINGLE_PLACEMENT_RULE],
indirect=True,
)
def test_delete_object_with_s3_wallet_bearer(
self,
storage_objects: list[StorageObjectInfo],
bearer_token: str,
):
s3_gate_wallet = WalletInfo.from_node(self.cluster.s3_gates[0])
with reporter.step("Delete each object from first storage node"):
for storage_object in storage_objects:
with expect_not_raises():
delete_object(
s3_gate_wallet,
storage_object.cid,
storage_object.oid,
self.shell,
endpoint=self.cluster.default_rpc_endpoint,
bearer=bearer_token,
)
@allure.title("Object can be fetched from any node using s3gate wallet with bearer token (obj_size={object_size})")
@pytest.mark.parametrize(
"user_container",
[REP_2_FOR_3_NODES_PLACEMENT_RULE],
indirect=True,
)
def test_get_object_with_s3_wallet_bearer_from_all_nodes(
self,
user_container: StorageContainer,
object_size: ObjectSize,
bearer_token: str,
):
s3_gate_wallet = WalletInfo.from_node(self.cluster.s3_gates[0])
with reporter.step("Put object to container"):
storage_object = user_container.generate_object(
object_size.value,
bearer_token=bearer_token,
endpoint=self.cluster.default_rpc_endpoint,
)
with reporter.step("Get object from each storage node"):
for node in self.cluster.storage_nodes:
with expect_not_raises():
get_object(
s3_gate_wallet,
storage_object.cid,
storage_object.oid,
self.shell,
node.get_rpc_endpoint(),
bearer_token,
)
# TODO: Without PATCH operation,
# since it requires specific permissions that do not apply when testing all operations at once
@allure.title("Wildcard APE rule contains all permissions (obj_size={object_size})")
def test_ape_wildcard_contains_all_rules(
self,
other_wallet: WalletInfo,
storage_objects: list[StorageObjectInfo],
bearer_token: str,
):
obj = storage_objects.pop()
with reporter.step("Assert all operations available with object"):
assert_full_access_to_container(other_wallet, obj.cid, obj.oid, obj.file_path, self.shell, self.cluster, bearer_token)
# ^
@allure.title("Wildcard APE rule contains PATCH permission (obj_size={object_size})")
def test_ape_wildcard_contains_patch_rule(
self,
grpc_client_with_other_wallet: GrpcClientWrapper,
storage_objects: list[StorageObjectInfo],
bearer_token: str,
):
obj = storage_objects.pop()
with reporter.step("Verify patch is available"):
patched_oid = grpc_client_with_other_wallet.object.patch(
obj.cid,
obj.oid,
self.cluster.default_rpc_endpoint,
ranges=["99:88"],
payloads=[obj.file_path],
new_attrs="test-attribute=100",
bearer=bearer_token,
timeout="200s",
)
assert patched_oid != obj.oid, "OID of patched object must be different from original one"

File diff suppressed because it is too large Load diff

View file

@ -1,61 +0,0 @@
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.resources.common import STORAGE_GC_TIME
from frostfs_testlib.resources.error_patterns import OBJECT_ALREADY_REMOVED, OBJECT_NOT_FOUND
from frostfs_testlib.steps.cli.container import DEFAULT_EC_PLACEMENT_RULE, DEFAULT_PLACEMENT_RULE
from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils import datetime_utils
from frostfs_testlib.utils.file_utils import TestFile
from ...helpers.container_request import APE_EVERYONE_ALLOW_ALL, ContainerRequest
@wait_for_success(datetime_utils.parse_time(STORAGE_GC_TIME) * 5, datetime_utils.parse_time(STORAGE_GC_TIME))
def wait_for_object_status_change_to(status: str, grpc_client: GrpcClientWrapper, cid: str, oid: str, endpoint: str) -> None:
with pytest.raises(Exception, match=status):
grpc_client.object.head(cid, oid, endpoint)
@pytest.mark.nightly
@pytest.mark.sanity
@pytest.mark.grpc_api
class TestObjectApiLifetime(ClusterTestBase):
@allure.title("Object is removed when lifetime expired (obj_size={object_size}, policy={container_request.short_name})")
@pytest.mark.parametrize(
"container_request",
[
ContainerRequest(DEFAULT_PLACEMENT_RULE, APE_EVERYONE_ALLOW_ALL, "REP 2"),
ContainerRequest(DEFAULT_EC_PLACEMENT_RULE, APE_EVERYONE_ALLOW_ALL, "EC 3.1"),
],
)
def test_object_api_lifetime(self, grpc_client: GrpcClientWrapper, container: str, test_file: TestFile):
"""
Test object deleted after expiration epoch.
"""
with reporter.step("Get current epoch"):
current_epoch = self.get_epoch()
last_active_epoch = current_epoch + 1
with reporter.step("Put object to random node"):
oid = grpc_client.object.put_to_random_node(test_file, container, self.cluster, expire_at=last_active_epoch)
with reporter.step("Ensure that expiration of object has expected value"):
object_info: dict = grpc_client.object.head(container, oid, self.cluster.default_rpc_endpoint)
expiration_epoch = int(object_info["header"]["attributes"]["__SYSTEM__EXPIRATION_EPOCH"])
assert expiration_epoch == last_active_epoch, f"Expiration time set for object is not expected: {expiration_epoch}"
with reporter.step("Tick two epoch for object expiration"):
self.tick_epochs(2)
with reporter.step("Wait until GC marks object as 'already removed' or 'not found'"):
wait_for_object_status_change_to(
f"{OBJECT_ALREADY_REMOVED}|{OBJECT_NOT_FOUND}", grpc_client, container, oid, self.cluster.default_rpc_endpoint
)
with reporter.step("Try to get object from random node and make sure it is really deleted"):
with pytest.raises(Exception, match=f"{OBJECT_ALREADY_REMOVED}|{OBJECT_NOT_FOUND}"):
grpc_client.object.get_from_random_node(container, oid, self.cluster)

View file

@ -1,662 +0,0 @@
import logging
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.resources.common import STORAGE_GC_TIME
from frostfs_testlib.resources.error_patterns import (
LIFETIME_REQUIRED,
LOCK_NON_REGULAR_OBJECT,
LOCK_OBJECT_EXPIRATION,
LOCK_OBJECT_REMOVAL,
OBJECT_IS_LOCKED,
OBJECT_NOT_FOUND,
)
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import StorageContainer, StorageContainerInfo
from frostfs_testlib.steps.cli.object import delete_object, head_object, lock_object
from frostfs_testlib.steps.complex_object_actions import get_link_object, get_storage_object_chunks
from frostfs_testlib.steps.epoch import ensure_fresh_epoch
from frostfs_testlib.steps.node_management import drop_object
from frostfs_testlib.steps.storage_object import delete_objects
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.storage_object_info import LockObjectInfo, StorageObjectInfo
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises, wait_for_success
from frostfs_testlib.utils import datetime_utils
from ...helpers.container_creation import create_container_with_ape
from ...helpers.container_request import EVERYONE_ALLOW_ALL
from ...helpers.utility import wait_for_gc_pass_on_storage_nodes
logger = logging.getLogger("NeoLogger")
FIXTURE_LOCK_LIFETIME = 5
FIXTURE_OBJECT_LIFETIME = 10
@pytest.fixture(scope="module")
def user_container(
frostfs_cli: FrostfsCli, default_wallet: WalletInfo, client_shell: Shell, cluster: Cluster, rpc_endpoint: str
) -> StorageContainer:
cid = create_container_with_ape(EVERYONE_ALLOW_ALL, frostfs_cli, default_wallet, client_shell, cluster, rpc_endpoint)
return StorageContainer(StorageContainerInfo(cid, default_wallet), client_shell, cluster)
@pytest.fixture(scope="module")
def locked_storage_object(user_container: StorageContainer, client_shell: Shell, cluster: Cluster, object_size: ObjectSize):
"""
Intention of this fixture is to provide storage object which is NOT expected to be deleted during test act phase
"""
with reporter.step("Creating locked object"):
current_epoch = ensure_fresh_epoch(client_shell, cluster)
expiration_epoch = current_epoch + FIXTURE_LOCK_LIFETIME
storage_object = user_container.generate_object(object_size.value, expire_at=current_epoch + FIXTURE_OBJECT_LIFETIME)
lock_object_id = lock_object(
storage_object.wallet,
storage_object.cid,
storage_object.oid,
client_shell,
cluster.default_rpc_endpoint,
lifetime=FIXTURE_LOCK_LIFETIME,
)
storage_object.locks = [LockObjectInfo(storage_object.cid, lock_object_id, FIXTURE_LOCK_LIFETIME, expiration_epoch)]
return storage_object
@wait_for_success(datetime_utils.parse_time(STORAGE_GC_TIME))
def check_object_not_found(wallet: WalletInfo, cid: str, oid: str, shell: Shell, rpc_endpoint: str):
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
head_object(wallet, cid, oid, shell, rpc_endpoint)
def verify_object_available(wallet: WalletInfo, cid: str, oid: str, shell: Shell, rpc_endpoint: str):
with expect_not_raises():
head_object(wallet, cid, oid, shell, rpc_endpoint)
@pytest.mark.nightly
@pytest.mark.grpc_api
@pytest.mark.grpc_object_lock
class TestObjectLockWithGrpc(ClusterTestBase):
@pytest.fixture()
def new_locked_storage_object(self, user_container: StorageContainer, object_size: ObjectSize) -> StorageObjectInfo:
"""
Intention of this fixture is to provide new storage object for tests which may delete or corrupt the object or it's complementary objects
So we need a new one each time we ask for it
"""
with reporter.step("Creating locked object"):
current_epoch = self.get_epoch()
storage_object = user_container.generate_object(object_size.value, expire_at=current_epoch + FIXTURE_OBJECT_LIFETIME)
lock_object(
storage_object.wallet,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
lifetime=FIXTURE_LOCK_LIFETIME,
)
return storage_object
@allure.title("Locked object is protected from deletion (obj_size={object_size})")
def test_locked_object_cannot_be_deleted(
self,
locked_storage_object: StorageObjectInfo,
):
"""
Locked object should be protected from deletion
"""
with pytest.raises(Exception, match=OBJECT_IS_LOCKED):
delete_object(
locked_storage_object.wallet,
locked_storage_object.cid,
locked_storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
)
@allure.title("Lock object itself is protected from deletion")
# We operate with only lock object here so no complex object needed in this test
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
def test_lock_object_itself_cannot_be_deleted(
self,
locked_storage_object: StorageObjectInfo,
):
"""
Lock object itself should be protected from deletion
"""
lock_object = locked_storage_object.locks[0]
wallet_path = locked_storage_object.wallet
with pytest.raises(Exception, match=LOCK_OBJECT_REMOVAL):
delete_object(
wallet_path,
lock_object.cid,
lock_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
)
@allure.title("Lock object itself cannot be locked")
# We operate with only lock object here so no complex object needed in this test
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
def test_lock_object_cannot_be_locked(
self,
locked_storage_object: StorageObjectInfo,
):
"""
Lock object itself cannot be locked
"""
lock_object_info = locked_storage_object.locks[0]
wallet_path = locked_storage_object.wallet
with pytest.raises(Exception, match=LOCK_NON_REGULAR_OBJECT):
lock_object(
wallet_path,
lock_object_info.cid,
lock_object_info.oid,
self.shell,
self.cluster.default_rpc_endpoint,
1,
)
@allure.title("Lock must contain valid lifetime or expire_at field: (lifetime={wrong_lifetime}, expire-at={wrong_expire_at})")
# We operate with only lock object here so no complex object needed in this test
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
@pytest.mark.parametrize(
"wrong_lifetime,wrong_expire_at,expected_error",
[
(None, None, LIFETIME_REQUIRED),
(0, 0, LIFETIME_REQUIRED),
(0, None, LIFETIME_REQUIRED),
(None, 0, LIFETIME_REQUIRED),
(-1, None, 'invalid argument "-1" for "--lifetime" flag'),
(None, -1, 'invalid argument "-1" for "-e, --expire-at" flag'),
],
)
def test_cannot_lock_object_without_lifetime(
self,
locked_storage_object: StorageObjectInfo,
wrong_lifetime: int,
wrong_expire_at: int,
expected_error: str,
):
"""
Cannot lock object without lifetime and expire_at fields
"""
lock_object_info = locked_storage_object.locks[0]
wallet_path = locked_storage_object.wallet
with pytest.raises(Exception, match=expected_error):
lock_object(
wallet_path,
lock_object_info.cid,
lock_object_info.oid,
self.shell,
self.cluster.default_rpc_endpoint,
lifetime=wrong_lifetime,
expire_at=wrong_expire_at,
)
@pytest.mark.sanity
@allure.title("Expired object is deleted when locks are expired (obj_size={object_size})")
def test_expired_object_should_be_deleted_after_locks_are_expired(
self,
user_container: StorageContainer,
object_size: ObjectSize,
):
"""
Expired object should be deleted after locks are expired
"""
current_epoch = self.ensure_fresh_epoch()
storage_object = user_container.generate_object(object_size.value, expire_at=current_epoch + 1)
with reporter.step("Lock object for couple epochs"):
lock_object(
storage_object.wallet,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
lifetime=2,
)
lock_object(
storage_object.wallet,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
expire_at=current_epoch + 2,
)
with reporter.step("Check object is not deleted at expiration time"):
self.tick_epochs(2)
# Must wait to ensure object is not deleted
wait_for_gc_pass_on_storage_nodes()
with expect_not_raises():
head_object(
storage_object.wallet,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
)
with reporter.step("Wait for object to be deleted after third epoch"):
self.tick_epoch()
check_object_not_found(
storage_object.wallet,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
)
@allure.title("Lock multiple objects at once (obj_size={object_size})")
def test_should_be_possible_to_lock_multiple_objects_at_once(
self,
user_container: StorageContainer,
object_size: ObjectSize,
):
"""
Should be possible to lock multiple objects at once
"""
current_epoch = ensure_fresh_epoch(self.shell, self.cluster)
storage_objects: list[StorageObjectInfo] = []
with reporter.step("Generate three objects"):
for _ in range(3):
storage_objects.append(user_container.generate_object(object_size.value, expire_at=current_epoch + 5))
lock_object(
storage_objects[0].wallet,
storage_objects[0].cid,
",".join([storage_object.oid for storage_object in storage_objects]),
self.shell,
self.cluster.default_rpc_endpoint,
expire_at=current_epoch + 1,
)
for storage_object in storage_objects:
with reporter.step(f"Try to delete object {storage_object.oid}"):
with pytest.raises(Exception, match=OBJECT_IS_LOCKED):
delete_object(
storage_object.wallet,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
)
with reporter.step("Tick two epochs"):
self.tick_epoch()
self.tick_epoch()
with expect_not_raises():
delete_objects(storage_objects, self.shell, self.cluster)
@allure.title("Outdated lock cannot be applied (obj_size={object_size})")
def test_already_outdated_lock_should_not_be_applied(
self,
user_container: StorageContainer,
object_size: ObjectSize,
):
"""
Already outdated lock should not be applied
"""
current_epoch = self.ensure_fresh_epoch()
storage_object = user_container.generate_object(object_size.value, expire_at=current_epoch + 1)
expiration_epoch = current_epoch - 1
with pytest.raises(
Exception,
match=LOCK_OBJECT_EXPIRATION.format(expiration_epoch=expiration_epoch, current_epoch=current_epoch),
):
lock_object(
storage_object.wallet,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
expire_at=expiration_epoch,
)
@pytest.mark.sanity
@allure.title("Delete object when lock is expired by lifetime (obj_size={object_size})")
@expect_not_raises()
def test_after_lock_expiration_with_lifetime_user_should_be_able_to_delete_object(
self,
user_container: StorageContainer,
object_size: ObjectSize,
):
"""
After lock expiration with lifetime user should be able to delete object
"""
current_epoch = self.ensure_fresh_epoch()
storage_object = user_container.generate_object(object_size.value, expire_at=current_epoch + 5)
lock_object(
storage_object.wallet,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
lifetime=1,
)
self.tick_epochs(2)
with expect_not_raises():
delete_object(
storage_object.wallet,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
)
@allure.title("Delete object when lock is expired by expire_at (obj_size={object_size})")
@expect_not_raises()
def test_after_lock_expiration_with_expire_at_user_should_be_able_to_delete_object(
self,
user_container: StorageContainer,
object_size: ObjectSize,
):
"""
After lock expiration with expire_at user should be able to delete object
"""
current_epoch = self.ensure_fresh_epoch()
storage_object = user_container.generate_object(object_size.value, expire_at=current_epoch + 5)
lock_object(
storage_object.wallet,
storage_object.cid,
storage_object.oid,
self.shell,
endpoint=self.cluster.default_rpc_endpoint,
expire_at=current_epoch + 1,
)
self.tick_epochs(2)
with expect_not_raises():
delete_object(
storage_object.wallet,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
)
@allure.title("Complex object chunks are protected from deletion")
@pytest.mark.parametrize(
# Only complex objects are required for this test
"object_size",
["complex"],
indirect=True,
)
def test_complex_object_chunks_should_also_be_protected_from_deletion(
self,
locked_storage_object: StorageObjectInfo,
):
"""
Complex object chunks should also be protected from deletion
"""
chunk_object_ids = get_storage_object_chunks(locked_storage_object, self.shell, self.cluster)
for chunk_object_id in chunk_object_ids:
with reporter.step(f"Try to delete chunk object {chunk_object_id}"):
with pytest.raises(Exception, match=OBJECT_IS_LOCKED):
delete_object(
locked_storage_object.wallet,
locked_storage_object.cid,
chunk_object_id,
self.shell,
self.cluster.default_rpc_endpoint,
)
@allure.title("Drop link object of locked complex object")
@pytest.mark.grpc_control
@pytest.mark.parametrize(
"object_size",
# Only complex object is required
["complex"],
indirect=True,
)
def test_link_object_of_locked_complex_object_can_be_dropped(self, new_locked_storage_object: StorageObjectInfo):
link_object_id = get_link_object(
new_locked_storage_object.wallet,
new_locked_storage_object.cid,
new_locked_storage_object.oid,
self.shell,
self.cluster.storage_nodes,
)
with reporter.step(f"Drop link object with id {link_object_id} from nodes"):
nodes_with_object = get_nodes_with_object(
new_locked_storage_object.cid,
link_object_id,
shell=self.shell,
nodes=self.cluster.storage_nodes,
)
for node in nodes_with_object:
with expect_not_raises():
drop_object(node, new_locked_storage_object.cid, link_object_id)
@allure.title("Drop chunks of locked complex object")
@pytest.mark.grpc_control
@pytest.mark.parametrize(
"object_size",
# Only complex object is required
["complex"],
indirect=True,
)
def test_chunks_of_locked_complex_object_can_be_dropped(self, new_locked_storage_object: StorageObjectInfo):
chunk_objects = get_storage_object_chunks(new_locked_storage_object, self.shell, self.cluster)
for chunk_object_id in chunk_objects:
with reporter.step(f"Drop chunk object with id {chunk_object_id} from nodes"):
nodes_with_object = get_nodes_with_object(
new_locked_storage_object.cid,
chunk_object_id,
shell=self.shell,
nodes=self.cluster.storage_nodes,
)
for node in nodes_with_object:
with expect_not_raises():
drop_object(node, new_locked_storage_object.cid, chunk_object_id)
@allure.title("Drop locked object (obj_size={object_size})")
@pytest.mark.grpc_control
def test_locked_object_can_be_dropped(self, new_locked_storage_object: StorageObjectInfo):
nodes_with_object = get_nodes_with_object(
new_locked_storage_object.cid,
new_locked_storage_object.oid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
)
for node in nodes_with_object:
with expect_not_raises():
drop_object(node, new_locked_storage_object.cid, new_locked_storage_object.oid)
@allure.title("Link object of complex object is protected from deletion")
@pytest.mark.parametrize(
# Only complex objects are required for this test
"object_size",
["complex"],
indirect=True,
)
def test_link_object_of_complex_object_should_also_be_protected_from_deletion(
self,
locked_storage_object: StorageObjectInfo,
):
"""
Link object of complex object should also be protected from deletion
"""
link_object_id = get_link_object(
locked_storage_object.wallet,
locked_storage_object.cid,
locked_storage_object.oid,
self.shell,
self.cluster.storage_nodes,
is_direct=False,
)
with reporter.step(f"Try to delete link object {link_object_id}"):
with pytest.raises(Exception, match=OBJECT_IS_LOCKED):
delete_object(
locked_storage_object.wallet,
locked_storage_object.cid,
link_object_id,
self.shell,
self.cluster.default_rpc_endpoint,
)
@allure.title("Expired object is removed after all locks are expired (obj_size={object_size})")
def test_expired_object_should_be_removed_after_relocks_expare_at(
self,
user_container: StorageContainer,
object_size: ObjectSize,
):
current_epoch = self.ensure_fresh_epoch()
storage_object = user_container.generate_object(object_size.value, expire_at=current_epoch + 1)
with reporter.step("Apply first lock to object for 3 epochs"):
lock_object_id_0 = lock_object(
storage_object.wallet,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
expire_at=current_epoch + 3,
)
self.tick_epochs(2)
with reporter.step("Check first lock is still available"):
verify_object_available(
storage_object.wallet,
storage_object.cid,
lock_object_id_0,
self.shell,
self.cluster.default_rpc_endpoint,
)
with reporter.step("Apply second lock to object for 3 more epochs"):
lock_object_id_1 = lock_object(
storage_object.wallet,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
expire_at=current_epoch + 5,
)
self.tick_epochs(2)
with reporter.step("Verify first lock is expired and removed"):
check_object_not_found(
storage_object.wallet,
storage_object.cid,
lock_object_id_0,
self.shell,
self.cluster.default_rpc_endpoint,
)
with reporter.step("Verify second lock is still available"):
verify_object_available(
storage_object.wallet,
storage_object.cid,
lock_object_id_1,
self.shell,
self.cluster.default_rpc_endpoint,
)
with reporter.step("Apply third lock to object for 3 more epochs"):
lock_object(
storage_object.wallet,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
expire_at=current_epoch + 7,
)
with reporter.step("Verify object is deleted after all locks are expired"):
self.tick_epochs(4)
check_object_not_found(
storage_object.wallet,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
)
@pytest.mark.sanity
@allure.title("Two expired objects with one lock are deleted after lock expiration (obj_size={object_size})")
def test_two_objects_expiration_with_one_lock(
self,
user_container: StorageContainer,
object_size: ObjectSize,
):
current_epoch = self.ensure_fresh_epoch()
storage_objects: list[StorageObjectInfo] = []
with reporter.step("Generate two objects"):
for epoch_i in range(2):
storage_objects.append(user_container.generate_object(object_size.value, expire_at=current_epoch + epoch_i + 3))
self.tick_epoch()
with reporter.step("Lock objects for 4 epochs"):
lock_object(
storage_objects[0].wallet,
storage_objects[0].cid,
",".join([storage_object.oid for storage_object in storage_objects]),
self.shell,
self.cluster.default_rpc_endpoint,
expire_at=current_epoch + 4,
)
with reporter.step("Verify objects are available during next three epochs"):
for epoch_i in range(3):
self.tick_epoch()
with reporter.step(f"Check objects at epoch {current_epoch + epoch_i + 2}"):
for storage_object in storage_objects:
verify_object_available(
storage_object.wallet,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
)
with reporter.step("Verify objects are deleted after lock was expired"):
self.tick_epoch()
for storage_object in storage_objects:
check_object_not_found(
storage_object.wallet,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
)

View file

@ -1,62 +0,0 @@
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.resources.common import EXPIRATION_EPOCH_ATTRIBUTE
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode
from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import TestFile
@pytest.mark.nightly
@pytest.mark.grpc_api
class TestObjectTombstone(ClusterTestBase):
@pytest.fixture()
@allure.title("Change tombstone lifetime")
def tombstone_lifetime(self, cluster_state_controller: ClusterStateController, request: pytest.FixtureRequest):
config_manager = cluster_state_controller.manager(ConfigStateManager)
config_manager.set_on_all_nodes(StorageNode, {"object:delete:tombstone_lifetime": request.param}, True)
yield f"Tombstone lifetime was changed to {request.param}"
config_manager.revert_all(True)
@pytest.mark.parametrize("object_size, tombstone_lifetime", [("simple", 2)], indirect=True)
@allure.title("Tombstone object should be removed after expiration")
def test_tombstone_lifetime(
self,
new_epoch: int,
container: str,
grpc_client: GrpcClientWrapper,
test_file: TestFile,
rpc_endpoint: str,
tombstone_lifetime: str,
):
allure.dynamic.description(tombstone_lifetime)
with reporter.step("Put object"):
oid = grpc_client.object.put(test_file.path, container, rpc_endpoint)
with reporter.step("Remove object"):
tombstone_oid = grpc_client.object.delete(container, oid, rpc_endpoint)
with reporter.step("Get tombstone object lifetime"):
tombstone_info = grpc_client.object.head(container, tombstone_oid, rpc_endpoint)
tombstone_expiration_epoch = tombstone_info["header"]["attributes"][EXPIRATION_EPOCH_ATTRIBUTE]
with reporter.step("Tombstone lifetime should be <= 3"):
epochs_to_skip = int(tombstone_expiration_epoch) - new_epoch + 1
assert epochs_to_skip <= 3
with reporter.step("Wait for tombstone expiration"):
self.tick_epochs(epochs_to_skip)
with reporter.step("Tombstone should be removed after expiration"):
with pytest.raises(RuntimeError, match=OBJECT_NOT_FOUND):
grpc_client.object.head(container, tombstone_oid, rpc_endpoint)
with pytest.raises(RuntimeError, match=OBJECT_NOT_FOUND):
grpc_client.object.get(container, tombstone_oid, rpc_endpoint)

View file

@ -1,402 +0,0 @@
import logging
import re
from typing import Literal
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC
from frostfs_testlib.resources.error_patterns import OBJECT_IS_LOCKED
from frostfs_testlib.shell import Shell
from frostfs_testlib.shell.interfaces import CommandResult
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils.file_utils import TestFile, get_file_hash
logger = logging.getLogger("NeoLogger")
def parse_oid(response: CommandResult, response_type: Literal["tombstone", "patch"] = None) -> str:
if response_type == "tombstone":
id_str = response.stdout.split("\n")[1]
oid = id_str.split(":")[1]
return oid.strip()
if response_type == "patch":
return response.stdout.split(":")[1].strip()
id_str = response.stdout.strip().split("\n")[-2]
oid = id_str.split(":")[1]
return oid.strip()
@pytest.mark.nightly
@pytest.mark.grpc_api
@pytest.mark.grpc_without_user
class TestObjectApiWithoutUser(ClusterTestBase):
@pytest.fixture(scope="class")
def cli_without_wallet(self, client_shell: Shell) -> FrostfsCli:
return FrostfsCli(client_shell, FROSTFS_CLI_EXEC)
@allure.title("Get public container by native API with generate private key")
def test_get_container_with_generated_key(self, cli_without_wallet: FrostfsCli, container: str, rpc_endpoint: str):
"""
Validate `container get` native API with flag `--generate-key`.
"""
with reporter.step("Get container with generate key"):
with expect_not_raises():
cli_without_wallet.container.get(rpc_endpoint, container, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
@allure.title("Get list containers by native API with generate private key")
def test_list_containers_with_generated_key(
self, cli_without_wallet: FrostfsCli, default_wallet: WalletInfo, container: str, rpc_endpoint: str
):
"""
Validate `container list` native API with flag `--generate-key`.
"""
owner = default_wallet.get_address_from_json(0)
with reporter.step("List containers with generate key"):
with expect_not_raises():
result = cli_without_wallet.container.list(rpc_endpoint, owner=owner, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
with reporter.step("Expect container in received containers list"):
containers = result.stdout.split()
assert container in containers
@allure.title("Get list of public container objects by native API with generate private key")
def test_list_objects_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, rpc_endpoint: str):
"""
Validate `container list_objects` native API with flag `--generate-key`.
"""
with reporter.step("List objects with generate key"):
with expect_not_raises():
result = cli_without_wallet.container.list_objects(rpc_endpoint, container, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
with reporter.step("Expect empty objects list"):
objects = result.stdout.split()
assert len(objects) == 0, objects
@allure.title("Search public container nodes by native API with generate private key")
def test_search_nodes_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, rpc_endpoint: str):
"""
Validate `container search_node` native API with flag `--generate-key`.
"""
with reporter.step("Search nodes with generate key"):
with expect_not_raises():
cli_without_wallet.container.search_node(rpc_endpoint, container, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
@allure.title("Put object into public container by native API with generate private key (obj_size={object_size})")
def test_put_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, test_file: TestFile, rpc_endpoint: str):
"""
Validate `object put` into container with public ACL and flag `--generate-key`.
"""
with reporter.step("Put object with generate key"):
with expect_not_raises():
result = cli_without_wallet.object.put(
rpc_endpoint,
container,
test_file,
generate_key=True,
no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
oid = parse_oid(result)
with reporter.step("List objects with generate key"):
result = cli_without_wallet.container.list_objects(rpc_endpoint, container, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
with reporter.step("Expect object in received objects list"):
objects = result.stdout.split()
assert oid in objects, objects
@allure.title("Get public container object by native API with generate private key (obj_size={object_size})")
def test_get_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, test_file: TestFile, rpc_endpoint: str):
"""
Validate `object get` for container with public ACL and flag `--generate-key`.
"""
expected_hash = get_file_hash(test_file)
with reporter.step("Put object with generate key"):
result = cli_without_wallet.object.put(
rpc_endpoint,
container,
test_file,
generate_key=True,
no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
oid = parse_oid(result)
with reporter.step("Get object with generate key"):
with expect_not_raises():
cli_without_wallet.object.get(
rpc_endpoint,
container,
oid,
file=test_file,
generate_key=True,
no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
downloaded_hash = get_file_hash(test_file)
with reporter.step("Validate downloaded file"):
assert expected_hash == downloaded_hash
@allure.title("Head public container object by native API with generate private key (obj_size={object_size})")
def test_head_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, test_file: TestFile, rpc_endpoint: str):
"""
Validate `object head` for container with public ACL and flag `--generate-key`.
"""
with reporter.step("Put object with generate key"):
result = cli_without_wallet.object.put(
rpc_endpoint,
container,
test_file,
generate_key=True,
no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
oid = parse_oid(result)
with reporter.step("Head object with generate key"):
with expect_not_raises():
cli_without_wallet.object.head(rpc_endpoint, container, oid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
@allure.title("Delete public container object by native API with generate private key (obj_size={object_size})")
def test_delete_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, test_file: TestFile, rpc_endpoint: str):
"""
Validate `object delete` for container with public ACL and flag `--generate key`.
"""
with reporter.step("Put object with generate key"):
result = cli_without_wallet.object.put(
rpc_endpoint,
container,
test_file,
generate_key=True,
no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
oid = parse_oid(result)
with reporter.step("Delete object with generate key"):
with expect_not_raises():
result = cli_without_wallet.object.delete(rpc_endpoint, container, oid, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
oid = parse_oid(result, response_type="tombstone")
with reporter.step("Head object with generate key"):
result = cli_without_wallet.object.head(
rpc_endpoint,
container,
oid,
generate_key=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
with reporter.step("Expect object type TOMBSTONE"):
object_type = re.search(r"(?<=type: )tombstone", result.stdout, re.IGNORECASE).group()
assert object_type == "TOMBSTONE", object_type
@allure.title("Patch object in public container with generate private key (obj_size={object_size})")
def test_patch_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, test_file: TestFile, rpc_endpoint: str):
with reporter.step("Put object with generate key"):
result = cli_without_wallet.object.put(
rpc_endpoint,
container,
test_file,
generate_key=True,
no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
oid = parse_oid(result)
with reporter.step("Patch object with generate key"):
with expect_not_raises():
result = cli_without_wallet.object.patch(
rpc_endpoint,
container,
oid,
["0:500"],
[test_file],
generate_key=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
patched_oid = parse_oid(result, response_type="patch")
assert oid != patched_oid, "Patched object must have new object id"
@allure.title("Lock public container object by native API with generate private key (obj_size={object_size})")
def test_lock_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, test_file: TestFile, rpc_endpoint: str):
"""
Validate `object lock` for container with public ACL and flag `--generate-key`.
Attempt to delete the locked object.
"""
with reporter.step("Put object with generate key"):
result = cli_without_wallet.object.put(
rpc_endpoint,
container,
test_file,
generate_key=True,
no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
oid = parse_oid(result)
with reporter.step("Lock object with generate key"):
with expect_not_raises():
cli_without_wallet.object.lock(
rpc_endpoint,
container,
oid,
generate_key=True,
timeout=CLI_DEFAULT_TIMEOUT,
lifetime=5,
)
with reporter.step("Delete locked object with generate key and expect error"):
with pytest.raises(Exception, match=OBJECT_IS_LOCKED):
cli_without_wallet.object.delete(
rpc_endpoint,
container,
oid,
generate_key=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
@allure.title("Search public container objects by native API with generate private key (obj_size={object_size})")
def test_search_object_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, test_file: TestFile, rpc_endpoint: str):
"""
Validate `object search` for container with public ACL and flag `--generate-key`.
"""
with reporter.step("Put object with generate key"):
result = cli_without_wallet.object.put(
rpc_endpoint,
container,
test_file,
generate_key=True,
no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
oid = parse_oid(result)
with reporter.step("Object search with generate key"):
with expect_not_raises():
result = cli_without_wallet.object.search(rpc_endpoint, container, generate_key=True, timeout=CLI_DEFAULT_TIMEOUT)
with reporter.step("Expect object in received objects list of container"):
object_ids = re.findall(r"(\w{43,44})", result.stdout)
assert oid in object_ids
@allure.title("Get range of public container object by native API with generate private key (obj_size={object_size})")
def test_range_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, test_file: TestFile, rpc_endpoint: str):
"""
Validate `object range` for container with public ACL and `--generate-key`.
"""
with reporter.step("Put object with generate key"):
result = cli_without_wallet.object.put(
rpc_endpoint,
container,
test_file,
generate_key=True,
no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
oid = parse_oid(result)
with reporter.step("Get range of object with generate key"):
with expect_not_raises():
cli_without_wallet.object.range(
rpc_endpoint,
container,
oid,
"0:10",
file=test_file,
generate_key=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
@allure.title("Get hash of public container object by native API with generate private key (obj_size={object_size})")
def test_hash_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, test_file: TestFile, rpc_endpoint: str):
"""
Validate `object hash` for container with public ACL and `--generate-key`.
"""
with reporter.step("Put object with generate key"):
result = cli_without_wallet.object.put(
rpc_endpoint,
container,
test_file,
generate_key=True,
no_progress=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
oid = parse_oid(result)
with reporter.step("Get range hash of object with generate key"):
with expect_not_raises():
cli_without_wallet.object.hash(
rpc_endpoint,
container,
oid,
range="0:10",
generate_key=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
@allure.title("Get public container object nodes by native API with generate private key (obj_size={object_size})")
def test_nodes_with_generate_key(self, cli_without_wallet: FrostfsCli, container: str, test_file: TestFile, rpc_endpoint: str):
"""
Validate `object nodes` for container with public ACL and `--generate-key`.
"""
with reporter.step("Put object with generate key"):
result = cli_without_wallet.object.put(
rpc_endpoint,
container,
test_file,
no_progress=True,
generate_key=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
oid = parse_oid(result)
with reporter.step("Configure frostfs-cli for alive remote node"):
alive_node = self.cluster.cluster_nodes[0]
node_shell = alive_node.host.get_shell()
alive_endpoint = alive_node.storage_node.get_rpc_endpoint()
node_frostfs_cli_wo_wallet = FrostfsCli(node_shell, FROSTFS_CLI_EXEC)
with reporter.step("Get object nodes with generate key"):
with expect_not_raises():
node_frostfs_cli_wo_wallet.object.nodes(
alive_endpoint,
container,
oid=oid,
generate_key=True,
timeout=CLI_DEFAULT_TIMEOUT,
)

View file

@ -0,0 +1,76 @@
import logging
import os
import allure
import pytest
import yaml
from common import ASSETS_DIR, FREE_STORAGE, NEOFS_CLI_EXEC, NEOFS_ENDPOINT, WALLET_CONFIG
from neofs_testlib.cli import NeofsCli
from python_keywords.payment_neogo import _address_from_wallet
from wallet import init_wallet
logger = logging.getLogger("NeoLogger")
DEPOSIT_AMOUNT = 30
@pytest.mark.payments
@pytest.mark.skipif(FREE_STORAGE, reason="Test only works on public network with paid storage")
class TestBalanceAccounting:
@pytest.fixture(autouse=True)
def prepare_two_wallets(self, prepare_wallet_and_deposit):
self.user_wallet = prepare_wallet_and_deposit
self.address = _address_from_wallet(self.user_wallet, "")
_, self.another_address, _ = init_wallet(ASSETS_DIR)
@allure.title("Test balance request with wallet and address")
def test_balance_wallet_address(self, client_shell):
cli = NeofsCli(client_shell, NEOFS_CLI_EXEC, WALLET_CONFIG)
output = cli.accounting.balance(
wallet=self.user_wallet,
rpc_endpoint=NEOFS_ENDPOINT,
address=self.address,
)
assert int(output.stdout.rstrip()) == DEPOSIT_AMOUNT
@allure.title("Test balance request with wallet only")
def test_balance_wallet(self, client_shell):
cli = NeofsCli(client_shell, NEOFS_CLI_EXEC, WALLET_CONFIG)
output = cli.accounting.balance(wallet=self.user_wallet, rpc_endpoint=NEOFS_ENDPOINT)
assert int(output.stdout.rstrip()) == DEPOSIT_AMOUNT
@allure.title("Test balance request with wallet and wrong address")
def test_balance_wrong_address(self, client_shell):
with pytest.raises(Exception, match="address option must be specified and valid"):
cli = NeofsCli(client_shell, NEOFS_CLI_EXEC, WALLET_CONFIG)
cli.accounting.balance(
wallet=self.user_wallet,
rpc_endpoint=NEOFS_ENDPOINT,
address=self.another_address,
)
@allure.title("Test balance request with config file")
def test_balance_api(self, prepare_tmp_dir, client_shell):
config_file = self.write_api_config(
config_dir=prepare_tmp_dir, endpoint=NEOFS_ENDPOINT, wallet=self.user_wallet
)
logger.info(f"Config with API endpoint: {config_file}")
cli = NeofsCli(client_shell, NEOFS_CLI_EXEC, config_file=config_file)
result = cli.accounting.balance()
assert int(result.stdout.rstrip()) == DEPOSIT_AMOUNT
@staticmethod
@allure.step("Write config with API endpoint")
def write_api_config(config_dir: str, endpoint: str, wallet: str) -> str:
with open(WALLET_CONFIG, "r") as file:
wallet_config = yaml.full_load(file)
api_config = {
**wallet_config,
"rpc-endpoint": endpoint,
"wallet": wallet,
}
api_config_file = os.path.join(config_dir, "neofs-cli-api-config.yaml")
with open(api_config_file, "w") as file:
yaml.dump(api_config, file)
return api_config_file

View file

@ -1,770 +0,0 @@
import json
import time
import allure
import pytest
import yaml
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli
from frostfs_testlib.cli.netmap_parser import NetmapParser
from frostfs_testlib.clients import AwsCliClient, S3ClientWrapper
from frostfs_testlib.clients.s3 import BucketContainerResolver, VersioningStatus
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC
from frostfs_testlib.resources.common import COMPLEX_OBJECT_CHUNKS_COUNT, COMPLEX_OBJECT_TAIL_SIZE, HOSTING_CONFIG_FILE, MORPH_BLOCK_TIME
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode
from frostfs_testlib.storage.controllers import ClusterStateController
from frostfs_testlib.storage.controllers.state_managers.config_state_manager import ConfigStateManager
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk
from frostfs_testlib.storage.grpc_operations.interfaces_wrapper import GrpcClientWrapper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils import datetime_utils
from frostfs_testlib.utils.file_utils import TestFile, generate_file, get_file_hash
from ...helpers.container_request import PUBLIC_WITH_POLICY, requires_container
from ...resources.common import S3_POLICY_FILE_LOCATION
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
if "ec_policy" not in metafunc.fixturenames:
return
with open(HOSTING_CONFIG_FILE, "r") as file:
hosting_config = yaml.full_load(file)
node_count = len(hosting_config["hosts"])
ec_map = {
4: ["EC 1.1", "EC 2.1", "EC 3.1", "EC 2.2"],
8: ["EC 5.3", "EC 3.2", "EC 7.1", "EC 4.4", "EC 3.1"],
16: ["EC 12.4", "EC 8.4", "EC 5.3", "EC 4.4"],
100: ["EC 12.4", "EC 8.4", "EC 5.3", "EC 4.4"],
}
nearest_node_count = ([4] + (list(filter(lambda x: x <= node_count, ec_map.keys()))))[-1]
metafunc.parametrize("ec_policy, node_count", ((ec_policy, node_count) for ec_policy in ec_map[nearest_node_count]))
@allure.title("Initialized remote FrostfsAdm")
@pytest.fixture
def frostfs_remote_adm(cluster: Cluster) -> FrostfsAdm:
node = cluster.cluster_nodes[0]
shell = node.host.get_shell()
return FrostfsAdm(shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH)
@pytest.mark.nightly
@pytest.mark.replication
@pytest.mark.ec_replication
class TestECReplication(ClusterTestBase):
def get_node_cli(self, cluster_node: ClusterNode, config: str) -> FrostfsCli:
shell = cluster_node.host.get_shell()
cli = FrostfsCli(shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=config)
self.cli_change_shards_mode: dict[FrostfsCli, str] = {cli: cluster_node.storage_node.get_control_endpoint()}
return cli
@pytest.fixture()
def restore_nodes_shards_mode(self):
yield
for cli, endpoint in self.cli_change_shards_mode.items():
cli.shards.set_mode(endpoint, mode="read-write", all=True)
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
@pytest.fixture()
def rep_count(self, object_size: ObjectSize) -> int:
rep_count = 3
if object_size.name == "complex":
rep_count *= int(COMPLEX_OBJECT_CHUNKS_COUNT) + 1 if COMPLEX_OBJECT_TAIL_SIZE else int(COMPLEX_OBJECT_CHUNKS_COUNT)
return rep_count
@wait_for_success(120, 5)
def wait_replication(self, total_chunks: int, client: GrpcClientWrapper, cid: str, oid: str, success: bool = True) -> None:
if not success:
assert not self.check_replication(total_chunks, client, cid, oid)
else:
assert self.check_replication(total_chunks, client, cid, oid)
@allure.title("Restore chunk maximum params in network params ")
@pytest.fixture
def restore_network_config(self, frostfs_remote_adm: FrostfsAdm) -> None:
yield
frostfs_remote_adm.morph.set_config(set_key_value='"MaxECDataCount=12" "MaxECParityCount=5"')
@reporter.step("Get object nodes output ")
def get_object_nodes(self, cli: FrostfsCli, cid: str, oid: str, endpoint: str = None) -> dict:
if not endpoint:
endpoint = self.cluster.default_rpc_endpoint
object_nodes = json.loads(cli.object.nodes(endpoint, cid, oid=oid, json=True, timeout=CLI_DEFAULT_TIMEOUT).stdout)
if object_nodes.get("errors"):
raise object_nodes["errors"]
return object_nodes
@reporter.step("Get parity chunk ")
def get_parity_chunk_object(self, cli: FrostfsCli, cid: str, oid: str, endpoint: str = None) -> Chunk:
chunks = self.get_object_nodes(cli, cid, oid, endpoint)["data_objects"]
return Chunk(**chunks[-1])
@reporter.step("Get data chunk ")
def get_data_chunk_object(self, cli: FrostfsCli, cid: str, oid: str, endpoint: str = None) -> Chunk:
chunks = self.get_object_nodes(cli, cid, oid, endpoint)["data_objects"]
return Chunk(**chunks[0])
@reporter.step("Check replication chunks={total_chunks} chunks ")
def check_replication(self, total_chunks: int, client: GrpcClientWrapper, cid: str, oid: str) -> bool:
object_nodes_info = client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, oid)
return len(object_nodes_info) == total_chunks
@pytest.fixture()
def include_excluded_nodes(self, cluster_state_controller: ClusterStateController):
yield
cluster_state_controller.include_all_excluded_nodes()
@allure.title("Disable Policer on all nodes")
@pytest.fixture()
def disable_policer(self, cluster_state_controller: ClusterStateController) -> None:
with reporter.step("Disable policer for nodes"):
cluster_state_controller.manager(ConfigStateManager).set_on_all_nodes(
service_type=StorageNode, values={"policer": {"unsafe_disable": True}}
)
yield
with reporter.step("Enable policer for nodes"):
cluster_state_controller.start_stopped_hosts()
cluster_state_controller.manager(ConfigStateManager).revert_all()
@wait_for_success(300, 15)
@reporter.step("Check count nodes chunks")
def wait_sync_count_chunks_nodes(self, grpc_client: GrpcClientWrapper, cid: str, oid: str, count: int):
all_chunks_after_include_node = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, oid)
chunks_nodes = [node for chunk in all_chunks_after_include_node for node in chunk.confirmed_nodes]
assert len(chunks_nodes) == count
@allure.title("Create container with EC policy (size={object_size})")
@requires_container(PUBLIC_WITH_POLICY("EC 2.1"))
def test_create_container_with_ec_policy(
self, container: str, rep_count: int, grpc_client: GrpcClientWrapper, remote_grpc_client: GrpcClientWrapper, test_file: TestFile
) -> None:
with reporter.step("Put object in container."):
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Check replication chunks."):
assert self.check_replication(rep_count, remote_grpc_client, container, oid)
@allure.title("Lose node with chunk data")
@pytest.mark.failover
@requires_container(PUBLIC_WITH_POLICY("EC 3.1"))
def test_lose_node_with_data_chunk(
self,
grpc_client: GrpcClientWrapper,
remote_grpc_client: GrpcClientWrapper,
simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController,
container: str,
disable_policer: None,
) -> None:
with reporter.step("Put object in container."):
test_file = generate_file(simple_object_size.value)
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Check chunk replication on 4 nodes."):
assert self.check_replication(4, remote_grpc_client, container, oid)
with reporter.step("Search node data chunk"):
chunk = remote_grpc_client.object.chunks.get_first_data(self.cluster.default_rpc_endpoint, container, oid=oid)
chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, chunk)
with reporter.step("Stop node with data chunk."):
cluster_state_controller.stop_node_host(chunk_node[0], "hard")
with reporter.step("Get object"):
node = list(set(self.cluster.cluster_nodes) - {chunk_node[0]})[0]
grpc_client.object.get(container, oid, node.storage_node.get_rpc_endpoint())
with reporter.step("Start stopped node, and check replication chunks."):
cluster_state_controller.start_node_host(chunk_node[0])
self.wait_replication(4, remote_grpc_client, container, oid)
@allure.title("Lose node with chunk parity")
@pytest.mark.failover
@requires_container(PUBLIC_WITH_POLICY("EC 3.1"))
def test_lose_node_with_parity_chunk(
self,
grpc_client: GrpcClientWrapper,
remote_grpc_client: GrpcClientWrapper,
simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController,
container: str,
disable_policer: None,
) -> None:
with reporter.step("Put object in container."):
test_file = generate_file(simple_object_size.value)
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Check chunk replication on 4 nodes."):
assert self.check_replication(4, remote_grpc_client, container, oid)
with reporter.step("Search node with parity chunk"):
chunk = remote_grpc_client.object.chunks.get_parity(self.cluster.default_rpc_endpoint, container, oid=oid)
chunk_node = remote_grpc_client.object.chunks.get_chunk_node(self.cluster, chunk)[0]
with reporter.step("Stop node parity chunk."):
cluster_state_controller.stop_node_host(chunk_node, "hard")
with reporter.step("Get object, expect success."):
node = list(set(self.cluster.cluster_nodes) - {chunk_node})[0]
grpc_client.object.get(container, oid, node.storage_node.get_rpc_endpoint())
with reporter.step("Start stoped node, and check replication chunks."):
cluster_state_controller.start_node_host(chunk_node)
self.wait_replication(4, remote_grpc_client, container, oid)
@allure.title("Lose nodes with chunk data and parity")
@pytest.mark.failover
@requires_container(PUBLIC_WITH_POLICY("EC 3.1"))
def test_lose_nodes_data_chunk_and_parity(
self,
grpc_client: GrpcClientWrapper,
remote_grpc_client: GrpcClientWrapper,
simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController,
container: str,
disable_policer: None,
) -> None:
with reporter.step("Put object in container."):
test_file = generate_file(simple_object_size.value)
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Check count chunks, expect 4."):
assert self.check_replication(4, remote_grpc_client, container, oid)
with reporter.step("Search node data chunk and node parity chunk"):
data_chunk = remote_grpc_client.object.chunks.get_first_data(self.cluster.default_rpc_endpoint, container, oid=oid)
data_chunk_node = remote_grpc_client.object.chunks.get_chunk_node(self.cluster, data_chunk)[0]
parity_chunk = remote_grpc_client.object.chunks.get_parity(self.cluster.default_rpc_endpoint, container, oid=oid)
parity_chunk_node = remote_grpc_client.object.chunks.get_chunk_node(self.cluster, parity_chunk)[0]
with reporter.step("Stop node with data chunk."):
cluster_state_controller.stop_node_host(data_chunk_node, "hard")
with reporter.step("Get object"):
node = list(set(self.cluster.cluster_nodes) - {data_chunk_node, parity_chunk_node})[0]
grpc_client.object.get(container, oid, node.storage_node.get_rpc_endpoint())
with reporter.step("Start stopped host and check chunks."):
cluster_state_controller.start_node_host(data_chunk_node)
self.wait_replication(4, remote_grpc_client, container, oid)
with reporter.step("Stop node with parity chunk and one all node."):
cluster_state_controller.stop_node_host(data_chunk_node, "hard")
cluster_state_controller.stop_node_host(parity_chunk_node, "hard")
with reporter.step("Get object, expect error."):
with pytest.raises(RuntimeError):
grpc_client.object.get(container, oid, node.storage_node.get_rpc_endpoint())
with reporter.step("Start stopped nodes and check replication chunk."):
cluster_state_controller.start_stopped_hosts()
self.wait_replication(4, remote_grpc_client, container, oid)
@allure.title("Policer work with chunk")
@pytest.mark.failover
@requires_container(PUBLIC_WITH_POLICY("EC 2.1"))
def test_work_policer_with_nodes(
self,
simple_object_size: ObjectSize,
grpc_client: GrpcClientWrapper,
remote_grpc_client: GrpcClientWrapper,
cluster_state_controller: ClusterStateController,
container: str,
include_excluded_nodes: None,
) -> None:
with reporter.step("Put object on container."):
test_file = generate_file(simple_object_size.value)
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Check count chunks nodes on 3."):
assert self.check_replication(3, remote_grpc_client, container, oid)
with reporter.step("Search node with chunk."):
data_chunk = remote_grpc_client.object.chunks.get_first_data(self.cluster.default_rpc_endpoint, container, oid=oid)
node_data_chunk = remote_grpc_client.object.chunks.get_chunk_node(self.cluster, data_chunk)[0]
first_all_chunks = remote_grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid)
with reporter.step("Remove chunk node from network map"):
cluster_state_controller.remove_node_from_netmap([node_data_chunk.storage_node])
with reporter.step("Tick epoch."):
alive_node = list(set(self.cluster.cluster_nodes) - {node_data_chunk})[0]
self.tick_epoch(alive_node.storage_node, 2)
with reporter.step("Wait replication chunk with different node."):
node = grpc_client.object.chunks.search_node_without_chunks(
first_all_chunks, self.cluster, alive_node.storage_node.get_rpc_endpoint()
)[0]
self.wait_replication(3, remote_grpc_client, container, oid)
with reporter.step("Get new chunks"):
second_all_chunks = remote_grpc_client.object.chunks.get_all(node.storage_node.get_rpc_endpoint(), container, oid)
with reporter.step("Check that oid no change."):
assert [chunk for chunk in second_all_chunks if data_chunk.object_id == chunk.object_id]
with reporter.step("Include node in netmap"):
cluster_state_controller.include_node_to_netmap(node_data_chunk.storage_node, alive_node.storage_node)
self.wait_sync_count_chunks_nodes(remote_grpc_client, container, oid, 3)
@allure.title("EC X.Y combinations (nodes={node_count},policy={ec_policy},size={object_size})")
def test_create_container_with_difference_count_nodes(
self,
frostfs_cli: FrostfsCli,
node_count: int,
ec_policy: str,
object_size: ObjectSize,
grpc_client: GrpcClientWrapper,
remote_grpc_client: GrpcClientWrapper,
) -> None:
with reporter.step("Create container."):
expected_chunks = int(ec_policy.split(" ")[1].split(".")[0]) + int(ec_policy.split(" ")[1].split(".")[1])
if "complex" in object_size.name:
expected_chunks *= 4
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy=ec_policy, await_mode=True)
with reporter.step("Apply Ape rule for container"):
frostfs_cli.ape_manager.add(
self.cluster.default_rpc_endpoint, chain_id="allowAll", target_name=cid, target_type="container", rule="allow Object.* *"
)
with reporter.step("Wait for one block"):
self.wait_for_blocks()
with reporter.step("Put object in container."):
test_file = generate_file(object_size.value)
oid = grpc_client.object.put(test_file, cid, self.cluster.default_rpc_endpoint)
with reporter.step("Check count object chunks."):
chunks = remote_grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, oid)
assert len(chunks) == expected_chunks
with reporter.step("get object and check hash."):
file_with_node = grpc_client.object.get(cid, oid, self.cluster.default_rpc_endpoint)
assert get_file_hash(test_file) == get_file_hash(file_with_node)
@allure.title("Request PUT with copies_number flag")
@requires_container(PUBLIC_WITH_POLICY("EC 2.1"))
def test_put_object_with_copies_number(
self, container: str, grpc_client: GrpcClientWrapper, remote_grpc_client: GrpcClientWrapper, simple_object_size: ObjectSize
) -> None:
with reporter.step("Put object in container with copies number = 1"):
test_file = generate_file(simple_object_size.value)
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint, copies_number=1)
with reporter.step("Check that count chunks > 1."):
chunks = remote_grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid)
assert len(chunks) > 1
@allure.title("Request PUT and 1 node off")
@pytest.mark.failover
@requires_container(PUBLIC_WITH_POLICY("EC 3.1"))
def test_put_object_with_off_cnr_node(
self,
container: str,
grpc_client: GrpcClientWrapper,
cluster_state_controller: ClusterStateController,
simple_object_size: ObjectSize,
) -> None:
with reporter.step("Stop one node in container nodes"):
cluster_state_controller.stop_node_host(self.cluster.cluster_nodes[1], "hard")
with reporter.step("Put object in container, expect success for EC container."):
test_file = generate_file(simple_object_size.value)
grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint, copies_number=1)
@allure.title("Request PUT (size={object_size})")
@requires_container(PUBLIC_WITH_POLICY("EC 2.1"))
def test_put_object_with_ec_cnr(
self, container: str, grpc_client: GrpcClientWrapper, remote_grpc_client: GrpcClientWrapper, object_size: ObjectSize
) -> None:
with reporter.step("Put object in container"):
test_file = generate_file(object_size.value)
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Get chunks object."):
chunks = remote_grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid)
with reporter.step("Check header chunks object"):
for chunk in chunks:
chunk_head = grpc_client.object.head(
container, chunk.object_id, self.cluster.default_rpc_endpoint, is_raw=True, json_output=False
).stdout
assert "EC header:" in chunk_head
@allure.title("Request GET (size={object_size})")
@requires_container(PUBLIC_WITH_POLICY("EC 2.1 CBF 1"))
def test_get_object_in_ec_cnr(
self, container: str, grpc_client: GrpcClientWrapper, remote_grpc_client: GrpcClientWrapper, object_size: ObjectSize
) -> None:
with reporter.step("Put object in container"):
test_file = generate_file(object_size.value)
hash_origin_file = get_file_hash(test_file)
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Get id all chunks."):
chunks = remote_grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid)
with reporter.step("Search chunk node and not chunks node."):
chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, chunks[0])[0]
not_chunk_node = grpc_client.object.chunks.search_node_without_chunks(chunks, self.cluster, self.cluster.default_rpc_endpoint)[
0
]
with reporter.step("GET request with chunk node, expect success"):
file_one = grpc_client.object.get(container, oid, chunk_node.storage_node.get_rpc_endpoint())
hash_file_one = get_file_hash(file_one)
assert hash_file_one == hash_origin_file
with reporter.step("Get request with not chunk node"):
file_two = grpc_client.object.get(container, oid, not_chunk_node.storage_node.get_rpc_endpoint())
hash_file_two = get_file_hash(file_two)
assert hash_file_two == hash_file_one == hash_origin_file
@allure.title("Request SEARCH with flags 'root' (size={object_size})")
@requires_container(PUBLIC_WITH_POLICY("EC 2.1"))
def test_search_object_in_ec_cnr_root_flags(self, container: str, grpc_client: GrpcClientWrapper, object_size: ObjectSize) -> None:
with reporter.step("Put object in container"):
test_file = generate_file(object_size.value)
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Search operation with --root flags"):
search_output = grpc_client.object.search(container, self.cluster.default_rpc_endpoint, root=True)
assert search_output[0] == oid
@allure.title("Request SEARCH check valid chunk id (size={object_size})")
@requires_container(PUBLIC_WITH_POLICY("EC 2.1"))
def test_search_object_in_ec_cnr_chunk_id(
self, container: str, grpc_client: GrpcClientWrapper, remote_grpc_client: GrpcClientWrapper, object_size: ObjectSize
) -> None:
with reporter.step("Put object in container"):
test_file = generate_file(object_size.value)
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Search operation object"):
search_output = grpc_client.object.search(container, self.cluster.default_rpc_endpoint)
chunks = remote_grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid)
for chunk in chunks:
assert chunk.object_id in search_output
@allure.title("Request SEARCH check no chunk index info (size={object_size})")
@requires_container(PUBLIC_WITH_POLICY("EC 2.1"))
def test_search_object_in_ec_cnr(
self, container: str, grpc_client: GrpcClientWrapper, remote_grpc_client: GrpcClientWrapper, object_size: ObjectSize
) -> None:
with reporter.step("Put object in container"):
test_file = generate_file(object_size.value)
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Search operation all chunk"):
chunks = remote_grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid)
for chunk in chunks:
chunk_search = grpc_client.object.search(container, self.cluster.default_rpc_endpoint, oid=chunk.object_id)
assert "index" not in chunk_search
@allure.title("Request DELETE (size={object_size})")
@pytest.mark.failover
@requires_container(PUBLIC_WITH_POLICY("EC 2.1"))
def test_delete_object_in_ec_cnr(
self,
container: str,
grpc_client: GrpcClientWrapper,
remote_grpc_client: GrpcClientWrapper,
object_size: ObjectSize,
cluster_state_controller: ClusterStateController,
) -> None:
with reporter.step("Put object in container."):
test_file = generate_file(object_size.value)
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Check object chunks nodes."):
chunks = remote_grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid)
replication_count = 3 if object_size.name == "simple" else 3 * 4
assert len(chunks) == replication_count
with reporter.step("Delete object"):
grpc_client.object.delete(container, oid, self.cluster.default_rpc_endpoint)
with reporter.step("Check that delete all chunks."):
for chunk in chunks:
with pytest.raises(RuntimeError, match="object already removed"):
grpc_client.object.head(container, chunk.object_id, self.cluster.default_rpc_endpoint)
with reporter.step("Put second object."):
oid_second = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Check second object chunks nodes."):
chunks_second_object = remote_grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid_second)
assert len(chunks_second_object) == replication_count
with reporter.step("Stop nodes with chunk."):
chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, chunks_second_object[0])
cluster_state_controller.stop_node_host(chunk_node[0], "hard")
with reporter.step("Delete second object"):
cluster_nodes = list(set(self.cluster.cluster_nodes) - {chunk_node[0]})
grpc_client.object.delete(container, oid_second, cluster_nodes[0].storage_node.get_rpc_endpoint())
with reporter.step("Check that delete all chunk second object."):
for chunk in chunks_second_object:
with pytest.raises(RuntimeError, match="object already removed|object not found"):
grpc_client.object.head(container, chunk.object_id, cluster_nodes[0].storage_node.get_rpc_endpoint())
@allure.title("Request LOCK (size={object_size})")
@pytest.mark.failover
@requires_container(PUBLIC_WITH_POLICY("EC 2.1"))
def test_lock_object_in_ec_cnr(
self,
container: str,
test_file: TestFile,
grpc_client: GrpcClientWrapper,
remote_grpc_client: GrpcClientWrapper,
frostfs_cli: FrostfsCli,
object_size: ObjectSize,
cluster_state_controller: ClusterStateController,
include_excluded_nodes: None,
) -> None:
with reporter.step("Put object in container."):
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Check object chunks nodes."):
chunks_object = remote_grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, oid)
replication_count = 3 if object_size.name == "simple" else 3 * 4
assert len(chunks_object) == replication_count
with reporter.step("Put LOCK in object."):
# TODO Rework for the grpc_client when the netmap methods are implemented
epoch = frostfs_cli.netmap.epoch(self.cluster.default_rpc_endpoint, timeout=CLI_DEFAULT_TIMEOUT).stdout.strip()
grpc_client.object.lock(container, oid, self.cluster.default_rpc_endpoint, expire_at=(int(epoch) + 5))
with reporter.step("Check don`t delete chunk"):
for chunk in chunks_object:
with pytest.raises(RuntimeError, match="Lock EC chunk failed"):
grpc_client.object.delete(container, chunk.object_id, self.cluster.default_rpc_endpoint)
with reporter.step("Check enable LOCK object"):
with pytest.raises(RuntimeError, match="object is locked"):
grpc_client.object.delete(container, oid, self.cluster.default_rpc_endpoint)
with reporter.step("Remove node in netmap."):
chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, chunks_object[0])[0]
alive_node = list(set(self.cluster.cluster_nodes) - {chunk_node})[0]
cluster_state_controller.remove_node_from_netmap([chunk_node.storage_node])
with reporter.step("Check don`t delete chunk."):
for chunk in chunks_object:
with pytest.raises(RuntimeError, match="Lock EC chunk failed|object not found"):
grpc_client.object.delete(container, chunk.object_id, alive_node.storage_node.get_rpc_endpoint())
with reporter.step("Check enable LOCK object"):
with pytest.raises(RuntimeError, match="object is locked"):
grpc_client.object.delete(container, oid, alive_node.storage_node.get_rpc_endpoint())
with reporter.step("Include node in netmap"):
cluster_state_controller.include_node_to_netmap(chunk_node.storage_node, alive_node.storage_node)
@allure.title("Output MaxEC* params in frostf-scli (type={type_shards})")
@pytest.mark.parametrize("type_shards", ["Maximum count of data shards", "Maximum count of parity shards"])
def test_maxec_info_with_output_cli(self, frostfs_cli: FrostfsCli, type_shards: str) -> None:
with reporter.step("Get and check params"):
# TODO Rework for the grpc_client when the netmap methods are implemented
net_info = frostfs_cli.netmap.netinfo(self.cluster.default_rpc_endpoint).stdout
assert type_shards in net_info
@allure.title("Change MaxEC*Count params")
def test_change_max_data_shards_params(
self, frostfs_remote_adm: FrostfsAdm, frostfs_cli: FrostfsCli, restore_network_config: None
) -> None:
# TODO Rework for the grpc_client when the netmap methods are implemented
with reporter.step("Get now params MaxECDataCount and MaxECParityCount"):
node_netinfo = NetmapParser.netinfo(
frostfs_cli.netmap.netinfo(self.cluster.default_rpc_endpoint, timeout=CLI_DEFAULT_TIMEOUT).stdout
)
with reporter.step("Change params"):
frostfs_remote_adm.morph.set_config(set_key_value='"MaxECDataCount=5" "MaxECParityCount=3"')
with reporter.step("Get update params"):
update_net_info = NetmapParser.netinfo(
frostfs_cli.netmap.netinfo(self.cluster.default_rpc_endpoint, timeout=CLI_DEFAULT_TIMEOUT).stdout
)
with reporter.step("Check old and new params difference"):
assert (
update_net_info.maximum_count_of_data_shards not in node_netinfo.maximum_count_of_data_shards
and update_net_info.maximum_count_of_parity_shards not in node_netinfo.maximum_count_of_parity_shards
)
@allure.title("Check maximum count data and parity shards")
def test_change_over_max_parity_shards_params(self, frostfs_remote_adm: FrostfsAdm) -> None:
with reporter.step("Change over maximum params shards count."):
with pytest.raises(RuntimeError, match="MaxECDataCount and MaxECParityCount must be <= 256"):
frostfs_remote_adm.morph.set_config(set_key_value='"MaxECDataCount=130" "MaxECParityCount=130"')
@allure.title("Create container with EC policy and SELECT (SELECT={select})")
@pytest.mark.parametrize("select", [2, 4])
def test_create_container_with_select(self, select: int, grpc_client: GrpcClientWrapper) -> None:
with reporter.step("Create container"):
policy = f"EC 1.1 CBF 1 SELECT {select} FROM *"
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy=policy, await_mode=True)
with reporter.step("Check container nodes decomposed"):
container_nodes = grpc_client.container.nodes(self.cluster.default_rpc_endpoint, cid, self.cluster)
assert len(container_nodes) == select
@allure.title("Create container with EC policy and CBF (CBF={cbf})")
@pytest.mark.parametrize("cbf, expected_nodes", [(1, 2), (2, 4)])
def test_create_container_with_cbf(self, cbf: int, expected_nodes: int, grpc_client: GrpcClientWrapper) -> None:
with reporter.step("Create container."):
policy = f"EC 1.1 CBF {cbf}"
cid = grpc_client.container.create(self.cluster.default_rpc_endpoint, policy=policy, await_mode=True)
with reporter.step("Check expected container nodes."):
container_nodes = grpc_client.container.nodes(self.cluster.default_rpc_endpoint, cid, self.cluster)
assert len(container_nodes) == expected_nodes
@allure.title("Create container with EC policy and FILTER")
@requires_container(PUBLIC_WITH_POLICY("EC 1.1 IN RUS SELECT 2 FROM RU AS RUS FILTER Country EQ Russia AS RU"))
def test_create_container_with_filter(
self,
grpc_client: GrpcClientWrapper,
remote_grpc_client: GrpcClientWrapper,
simple_object_size: ObjectSize,
container: str,
) -> None:
with reporter.step("Put object in container."):
test_file = generate_file(simple_object_size.value)
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Check object is decomposed exclusively on Russian nodes"):
data_chunk = remote_grpc_client.object.chunks.get_first_data(self.cluster.default_rpc_endpoint, container, oid=oid)
parity_chunk = remote_grpc_client.object.chunks.get_parity(self.cluster.default_rpc_endpoint, container, oid=oid)
node_data_chunk = remote_grpc_client.object.chunks.get_chunk_node(self.cluster, data_chunk)
node_parity_chunk = remote_grpc_client.object.chunks.get_chunk_node(self.cluster, parity_chunk)
for node in [node_data_chunk[1], node_parity_chunk[1]]:
assert "Russia" in node.country
@allure.title("Evacuation shard with chunk (type={type})")
@pytest.mark.parametrize("type, get_chunk", [("data", get_data_chunk_object), ("parity", get_parity_chunk_object)])
@requires_container(PUBLIC_WITH_POLICY("EC 1.1 CBF 1"))
def test_evacuation_data_shard(
self,
container: str,
restore_nodes_shards_mode: None,
frostfs_cli: FrostfsCli,
remote_frostfs_cli: FrostfsCli,
grpc_client: GrpcClientWrapper,
max_object_size: int,
type: str,
get_chunk,
) -> None:
with reporter.step("Put object in container."):
test_file = generate_file(max_object_size - 1000)
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Get object chunks."):
chunk = get_chunk(self, remote_frostfs_cli, container, oid, self.cluster.default_rpc_endpoint)
chunk_node = grpc_client.object.chunks.get_chunk_node(self.cluster, chunk)
frostfs_node_cli = self.get_node_cli(chunk_node[0], config=chunk_node[0].storage_node.get_remote_wallet_config_path())
with reporter.step("Search shards chunk"):
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2)
shard_id = grpc_client.object.chunks.get_shard_chunk(chunk_node[0], chunk)
with reporter.step("Enable evacuation for shard"):
frostfs_node_cli.shards.set_mode(chunk_node[0].storage_node.get_control_endpoint(), mode="read-only", id=shard_id)
frostfs_node_cli.shards.evacuation_start(chunk_node[0].storage_node.get_control_endpoint(), shard_id, await_mode=True)
with reporter.step("Get object after evacuation shard"):
grpc_client.object.get(container, oid, self.cluster.default_rpc_endpoint)
@allure.title("[NEGATIVE] Don`t create more 1 EC policy")
def test_more_one_ec_policy(self, grpc_client: GrpcClientWrapper) -> None:
with reporter.step("Create container with policy - 'EC 2.1 EC 1.1'"):
with pytest.raises(RuntimeError, match="can't parse placement policy"):
grpc_client.container.create(
self.cluster.default_rpc_endpoint, policy="EC 2.1 EC 1.1 CBF 1 SELECT 4 FROM *", await_mode=True
)
@allure.title("Create bucket with EC policy (s3_client={s3_client})")
@pytest.mark.parametrize("s3_policy, s3_client", [(S3_POLICY_FILE_LOCATION, AwsCliClient)], indirect=True)
def test_create_bucket_with_ec_location(
self, s3_client: S3ClientWrapper, bucket_container_resolver: BucketContainerResolver, grpc_client: GrpcClientWrapper
) -> None:
with reporter.step("Create bucket with EC location constrain"):
bucket = s3_client.create_bucket(location_constraint="ec3.1")
with reporter.step("Resolve container bucket"):
cid = bucket_container_resolver.resolve(self.cluster.cluster_nodes[0], bucket)
with reporter.step("Validate container policy"):
container = grpc_client.container.get(self.cluster.default_rpc_endpoint, cid, json_mode=True, timeout=CLI_DEFAULT_TIMEOUT)
assert container
@allure.title("Bucket object count chunks (s3_client={s3_client}, size={object_size})")
@pytest.mark.parametrize("s3_policy, s3_client", [(S3_POLICY_FILE_LOCATION, AwsCliClient)], indirect=True)
def test_count_chunks_bucket_with_ec_location(
self,
test_file: TestFile,
s3_client: S3ClientWrapper,
bucket_container_resolver: BucketContainerResolver,
remote_grpc_client: GrpcClientWrapper,
object_size: ObjectSize,
) -> None:
with reporter.step("Create bucket with EC location constrain"):
bucket = s3_client.create_bucket(location_constraint="ec3.1")
with reporter.step("Enable versioning object"):
s3_client.put_bucket_versioning(bucket, VersioningStatus.ENABLED)
bucket_status = s3_client.get_bucket_versioning_status(bucket)
assert bucket_status == VersioningStatus.ENABLED.value
with reporter.step("Put object in bucket"):
bucket_object = s3_client.put_object(bucket, test_file)
with reporter.step("Watch replication count chunks"):
cid = bucket_container_resolver.resolve(self.cluster.cluster_nodes[0], bucket)
chunks = remote_grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, cid, bucket_object)
expect_chunks = 4 if object_size.name == "simple" else 16
assert len(chunks) == expect_chunks
@allure.title("Replication chunk after drop (size={object_size})")
@requires_container(PUBLIC_WITH_POLICY("EC 2.1 CBF 1"))
def test_drop_chunk_and_replication(
self, test_file: TestFile, container: str, grpc_client: GrpcClientWrapper, remote_grpc_client: GrpcClientWrapper, rep_count: int
) -> None:
with reporter.step("Put object"):
oid = grpc_client.object.put(test_file, container, self.cluster.default_rpc_endpoint)
with reporter.step("Get all chunks"):
data_chunk = remote_grpc_client.object.chunks.get_first_data(self.cluster.default_rpc_endpoint, container, oid=oid)
with reporter.step("Search chunk node"):
chunk_node = remote_grpc_client.object.chunks.get_chunk_node(self.cluster, data_chunk)
shell_chunk_node = chunk_node[0].host.get_shell()
with reporter.step("Get replication count"):
assert self.check_replication(rep_count, remote_grpc_client, container, oid)
with reporter.step("Delete chunk"):
frostfs_node_cli = FrostfsCli(
shell_chunk_node,
frostfs_cli_exec_path=FROSTFS_CLI_EXEC,
config_file=chunk_node[0].storage_node.get_remote_wallet_config_path(),
)
frostfs_node_cli.control.drop_objects(chunk_node[0].storage_node.get_control_endpoint(), f"{container}/{data_chunk.object_id}")
with reporter.step("Wait replication count after drop one chunk"):
self.wait_replication(rep_count, remote_grpc_client, container, oid)

View file

@ -1,78 +0,0 @@
import logging
import random
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import delete_container
from frostfs_testlib.steps.cli.object import delete_object, head_object, put_object
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.failover_utils import wait_object_replication
from frostfs_testlib.utils.file_utils import TestFile
from ...helpers.container_request import PUBLIC_WITH_POLICY, requires_container
logger = logging.getLogger("NeoLogger")
OBJECT_ATTRIBUTES = {"common_key": "common_value"}
WAIT_FOR_REPLICATION = 60
# Adding failover mark because it may make cluster unhealthy
@pytest.mark.failover
@pytest.mark.replication
class TestReplication(ClusterTestBase):
@allure.title("Replication (obj_size={object_size})")
@requires_container(PUBLIC_WITH_POLICY("REP %NODE_COUNT% CBF 1", short_name="Public_all_except_one_node"))
def test_replication(
self,
wallet: WalletInfo,
client_shell: Shell,
cluster: Cluster,
container: str,
test_file: TestFile,
cluster_state_controller: ClusterStateController,
):
nodes_count = len(cluster.cluster_nodes)
node_for_rep = random.choice(cluster.cluster_nodes)
alive_nodes = [node for node in cluster.cluster_nodes if node != node_for_rep]
with reporter.step("Stop container node host"):
cluster_state_controller.stop_node_host(node_for_rep, mode="hard")
with reporter.step("Put object"):
oid = put_object(
wallet=wallet,
path=test_file,
cid=container,
shell=client_shell,
attributes=OBJECT_ATTRIBUTES,
copies_number=nodes_count - 1,
endpoint=random.choice(alive_nodes).storage_node.get_rpc_endpoint(),
timeout="45s",
)
with reporter.step("Start container node host"):
cluster_state_controller.start_node_host(node_for_rep)
with reporter.step(f"Wait for replication"):
object_nodes = wait_object_replication(container, oid, nodes_count, client_shell, self.cluster.storage_nodes)
with reporter.step("Check attributes"):
for node in object_nodes:
header_info = head_object(wallet, container, oid, client_shell, node.get_rpc_endpoint(), is_direct=True).get("header", {})
attributes = header_info.get("attributes", {})
for attribute_key, attribute_value in OBJECT_ATTRIBUTES.items():
assert attribute_key in attributes, f"{attribute_key} not found in {header_info}"
assert header_info["attributes"].get(attribute_key) == str(attribute_value), (
f"{attribute_key} value not equal: "
f"got attribute value: {attributes.get(attribute_key)}"
f"expected attribute value: {attribute_value}"
)
with reporter.step("Cleanup"):
delete_object(wallet, container, oid, client_shell, cluster.default_rpc_endpoint)
delete_container(wallet, container, client_shell, cluster.default_rpc_endpoint)

View file

@ -1,75 +0,0 @@
import logging
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
from frostfs_testlib.steps.acl import bearer_token_base64_from_file
from frostfs_testlib.steps.http_gate import upload_via_http_gate_curl, verify_object_hash
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses import ape
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
from ....helpers.bearer_token import create_bearer_token
from ....helpers.container_request import APE_EVERYONE_ALLOW_ALL, ContainerRequest, requires_container
logger = logging.getLogger("NeoLogger")
@pytest.mark.http_gate
@pytest.mark.http_put
class Test_http_bearer(ClusterTestBase):
PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
OWNER_ROLE = ape.Condition.by_role(ape.Role.OWNER)
CUSTOM_APE_RULE = ape.Rule(ape.Verb.DENY, ape.ObjectOperations.PUT, OWNER_ROLE)
@pytest.fixture()
def bearer_token(self, frostfs_cli: FrostfsCli, container: str, temp_directory: str, cluster: Cluster) -> str:
with reporter.step(f"Create bearer token for {ape.Role.OTHERS} with all operations allowed"):
role_condition = ape.Condition.by_role(ape.Role.OTHERS)
rule = ape.Rule(ape.Verb.ALLOW, ape.ObjectOperations.WILDCARD_ALL, role_condition)
bearer = create_bearer_token(frostfs_cli, temp_directory, container, rule, cluster.default_rpc_endpoint)
return bearer_token_base64_from_file(bearer)
@allure.title(f"[NEGATIVE] Put object without bearer token for {ape.Role.OTHERS}")
def test_unable_put_without_bearer_token(self, simple_object_size: ObjectSize, container: str):
upload_via_http_gate_curl(
cid=container,
filepath=generate_file(simple_object_size.value),
endpoint=self.cluster.default_http_gate_endpoint,
error_pattern="access to object operation denied",
)
@allure.title("Put object via HTTP using bearer token (object_size={object_size})")
@requires_container(
ContainerRequest(PLACEMENT_RULE, [APE_EVERYONE_ALLOW_ALL, CUSTOM_APE_RULE], short_name="custom with denied owner put")
)
def test_put_with_bearer_when_ape_restrict(
self,
object_size: ObjectSize,
default_wallet: WalletInfo,
container: str,
bearer_token: str,
):
file_path = generate_file(object_size.value)
with reporter.step(f"Put object with bearer token for {ape.Role.OTHERS}, then get and verify hashes"):
headers = [f" -H 'Authorization: Bearer {bearer_token}'"]
oid = upload_via_http_gate_curl(
cid=container,
filepath=file_path,
endpoint=self.cluster.default_http_gate_endpoint,
headers=headers,
)
verify_object_hash(
oid=oid,
file_name=file_path,
wallet=default_wallet,
cid=container,
shell=self.shell,
nodes=self.cluster.storage_nodes,
request_node=self.cluster.cluster_nodes[0],
)

View file

@ -1,239 +0,0 @@
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.steps.epoch import get_epoch
from frostfs_testlib.steps.http_gate import (
attr_into_header,
get_object_by_attr_and_verify_hashes,
get_via_http_curl,
get_via_http_gate,
get_via_zip_http_gate,
try_to_get_object_and_expect_error,
upload_via_http_gate,
upload_via_http_gate_curl,
verify_object_hash,
)
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import TestFile, generate_file, get_file_hash
from ....helpers.container_request import REP_2_2_2_PUBLIC, requires_container
from ....helpers.utility import wait_for_gc_pass_on_storage_nodes
OBJECT_NOT_FOUND_ERROR = "not found"
@allure.link(
"https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#frostfs-http-gateway",
name="frostfs-http-gateway",
)
@allure.link("https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#uploading", name="uploading")
@allure.link("https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#downloading", name="downloading")
@pytest.mark.http_gate
@pytest.mark.http_put
class TestHttpPut(ClusterTestBase):
@allure.link("https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#uploading", name="uploading")
@allure.link("https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#downloading", name="downloading")
@allure.title("Put over HTTP, Get over HTTP (object_size={object_size})")
@pytest.mark.smoke
@requires_container(REP_2_2_2_PUBLIC)
def test_put_http_get_http(self, container: str, default_wallet: WalletInfo, test_file: TestFile):
"""
Test that object can be put and get using HTTP interface.
Steps:
1. Create simple and large objects.
2. Upload objects using HTTP (https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#uploading).
3. Download objects using HTTP gate (https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#downloading).
4. Compare hashes for got and original objects.
Expected result:
Hashes must be the same.
"""
with reporter.step("Put object using HTTP"):
object_id = upload_via_http_gate(container, test_file.path, self.cluster.default_http_gate_endpoint)
with reporter.step("Get object and check hash"):
verify_object_hash(
object_id,
test_file.path,
default_wallet,
container,
self.shell,
self.cluster.storage_nodes,
self.cluster.cluster_nodes[0],
)
@allure.link(
"https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#by-attributes",
name="download by attributes",
)
@allure.title("Put over HTTP, Get over HTTP with {id} header")
@pytest.mark.parametrize(
"attributes,id",
[
({"fileName": "simple_obj_filename"}, "simple"),
({"file-Name": "simple obj filename"}, "hyphen"),
({"cat%jpeg": "cat%jpeg"}, "percent"),
],
ids=["simple", "hyphen", "percent"],
)
@requires_container(REP_2_2_2_PUBLIC)
def test_put_http_get_http_with_headers(self, container: str, attributes: dict, simple_object_size: ObjectSize, id: str):
"""
Test that object can be downloaded using different attributes in HTTP header.
Steps:
1. Create simple and large objects.
2. Upload objects using HTTP with particular attributes in the header.
3. Download objects by attributes using HTTP gate (https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#by-attributes).
4. Compare hashes for got and original objects.
Expected result:
Hashes must be the same.
"""
file_path = generate_file(simple_object_size.value)
with reporter.step("Put objects using HTTP with attribute"):
headers = attr_into_header(attributes)
oid = upload_via_http_gate(container, file_path, self.cluster.default_http_gate_endpoint, headers)
get_object_by_attr_and_verify_hashes(
oid,
file_path,
container,
attributes,
self.cluster.cluster_nodes[0],
)
@allure.title("Expiration-Epoch in HTTP header (epoch_gap={epoch_gap})")
@pytest.mark.parametrize("epoch_gap", [0, 1])
@requires_container(REP_2_2_2_PUBLIC)
def test_expiration_epoch_in_http(self, container: str, simple_object_size: ObjectSize, epoch_gap: int):
http_endpoint = self.cluster.default_http_gate_endpoint
min_valid_epoch = get_epoch(self.shell, self.cluster) + epoch_gap
file_path = generate_file(simple_object_size.value)
oids_to_be_expired = []
oids_to_be_valid = []
for gap_until in (0, 1, 2, 100):
valid_until = min_valid_epoch + gap_until
headers = {"X-Attribute-System-Expiration-Epoch": str(valid_until)}
with reporter.step("Put objects using HTTP with attribute Expiration-Epoch"):
oid = upload_via_http_gate(
cid=container,
path=file_path,
headers=headers,
endpoint=http_endpoint,
)
if get_epoch(self.shell, self.cluster) + 1 <= valid_until:
oids_to_be_valid.append(oid)
else:
oids_to_be_expired.append(oid)
with reporter.step("This object can be got"):
get_via_http_gate(container, oid, self.cluster.cluster_nodes[0])
self.tick_epoch()
# Wait for GC, because object with expiration is counted as alive until GC removes it
wait_for_gc_pass_on_storage_nodes()
for oid in oids_to_be_expired:
with reporter.step(f"{oid} shall be expired and cannot be got"):
try_to_get_object_and_expect_error(
cid=container,
oid=oid,
node=self.cluster.cluster_nodes[0],
error_pattern=OBJECT_NOT_FOUND_ERROR,
)
for oid in oids_to_be_valid:
with reporter.step(f"{oid} shall be valid and can be got"):
get_via_http_gate(cid=container, oid=oid, node=self.cluster.cluster_nodes[0])
@allure.title("Zip in HTTP header")
@requires_container(REP_2_2_2_PUBLIC)
def test_zip_in_http(self, container: str, complex_object_size: ObjectSize, simple_object_size: ObjectSize):
file_path_simple = generate_file(simple_object_size.value)
file_path_large = generate_file(complex_object_size.value)
common_prefix = "my_files"
headers1 = {"X-Attribute-FilePath": f"{common_prefix}/file1"}
headers2 = {"X-Attribute-FilePath": f"{common_prefix}/file2"}
upload_via_http_gate(
cid=container,
path=file_path_simple,
headers=headers1,
endpoint=self.cluster.default_http_gate_endpoint,
)
upload_via_http_gate(container, file_path_large, headers2, self.cluster.default_http_gate_endpoint)
upload_via_http_gate(container, file_path_large, headers2, self.cluster.default_http_gate_endpoint)
dir_path = get_via_zip_http_gate(cid=container, prefix=common_prefix, node=self.cluster.cluster_nodes[0])
with reporter.step("Verify hashes"):
assert get_file_hash(f"{dir_path}/file1") == get_file_hash(file_path_simple)
assert get_file_hash(f"{dir_path}/file2") == get_file_hash(file_path_large)
@allure.title("Put over HTTP/Curl, Get over HTTP/Curl for large object")
@requires_container(REP_2_2_2_PUBLIC)
def test_put_http_get_http_large_file(self, default_wallet: WalletInfo, container: str, complex_object_size: ObjectSize):
"""
This test checks upload and download using curl with 'large' object.
Large is object with size up to 20Mb.
"""
file_path = generate_file(complex_object_size.value)
with reporter.step("Put objects using HTTP"):
oid_gate = upload_via_http_gate(cid=container, path=file_path, endpoint=self.cluster.default_http_gate_endpoint)
oid_curl = upload_via_http_gate_curl(
cid=container,
filepath=file_path,
endpoint=self.cluster.default_http_gate_endpoint,
)
verify_object_hash(
oid=oid_gate,
file_name=file_path,
wallet=default_wallet,
cid=container,
shell=self.shell,
nodes=self.cluster.storage_nodes,
request_node=self.cluster.cluster_nodes[0],
)
verify_object_hash(
oid=oid_curl,
file_name=file_path,
wallet=default_wallet,
cid=container,
shell=self.shell,
nodes=self.cluster.storage_nodes,
request_node=self.cluster.cluster_nodes[0],
object_getter=get_via_http_curl,
)
@allure.title("Put/Get over HTTP using Curl utility (object_size={object_size})")
@requires_container(REP_2_2_2_PUBLIC)
def test_put_http_get_http_curl(self, default_wallet: WalletInfo, container: str, test_file: TestFile):
"""
Test checks upload and download over HTTP using curl utility.
"""
with reporter.step("Put object using curl utility"):
object_id = upload_via_http_gate_curl(container, test_file.path, self.cluster.default_http_gate_endpoint)
with reporter.step("Get object and check hash"):
verify_object_hash(
object_id,
test_file.path,
default_wallet,
container,
self.shell,
self.cluster.storage_nodes,
self.cluster.cluster_nodes[0],
get_via_http_curl,
)

View file

@ -1,201 +0,0 @@
import logging
import os
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.steps.cli.container import delete_container, list_containers, wait_for_container_deletion
from frostfs_testlib.steps.cli.object import delete_object
from frostfs_testlib.steps.http_gate import (
attr_into_str_header_curl,
get_object_by_attr_and_verify_hashes,
try_to_get_object_and_expect_error,
try_to_get_object_via_passed_request_and_expect_error,
upload_via_http_gate_curl,
)
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
from ....helpers.container_request import REP_2_1_4_PUBLIC, requires_container
OBJECT_ALREADY_REMOVED_ERROR = "object already removed"
logger = logging.getLogger("NeoLogger")
@pytest.mark.http_gate
@pytest.mark.http_put
class Test_http_headers(ClusterTestBase):
obj1_keys = ["Writer", "Chapter1", "Chapter2"]
obj2_keys = ["Writer", "Ch@pter1", "chapter2"]
values = ["Leo Tolstoy", "peace", "w@r"]
OBJECT_ATTRIBUTES = [
{obj1_keys[0]: values[0], obj1_keys[1]: values[1], obj1_keys[2]: values[2]},
{obj2_keys[0]: values[0], obj2_keys[1]: values[1], obj2_keys[2]: values[2]},
]
@pytest.fixture
def storage_objects_with_attributes(self, container: str, wallet: WalletInfo, object_size: ObjectSize) -> list[StorageObjectInfo]:
# TODO: Deal with http tests
if object_size.value > 1000:
pytest.skip("Complex objects for HTTP temporarly disabled for v0.37")
storage_objects = []
file_path = generate_file(object_size.value)
for attributes in self.OBJECT_ATTRIBUTES:
storage_object_id = upload_via_http_gate_curl(
cid=container,
filepath=file_path,
endpoint=self.cluster.default_http_gate_endpoint,
headers=attr_into_str_header_curl(attributes),
)
storage_object = StorageObjectInfo(container, storage_object_id)
storage_object.size = os.path.getsize(file_path)
storage_object.wallet = wallet
storage_object.file_path = file_path
storage_object.attributes = attributes
storage_objects.append(storage_object)
return storage_objects
@allure.title("Get object1 by attribute (object_size={object_size})")
@requires_container(REP_2_1_4_PUBLIC)
def test_object1_can_be_get_by_attr(self, storage_objects_with_attributes: list[StorageObjectInfo]):
"""
Test to get object#1 by attribute and comapre hashes
Steps:
1. Download object#1 with attributes [Chapter2=w@r] and compare hashes
"""
storage_object_1 = storage_objects_with_attributes[0]
with reporter.step(
f'Download object#1 via wget with attributes Chapter2: {storage_object_1.attributes["Chapter2"]} and compare hashes'
):
get_object_by_attr_and_verify_hashes(
oid=storage_object_1.oid,
file_name=storage_object_1.file_path,
cid=storage_object_1.cid,
attrs={"Chapter2": storage_object_1.attributes["Chapter2"]},
node=self.cluster.cluster_nodes[0],
)
@allure.title("Get object2 with different attributes, then delete object2 and get object1 (object_size={object_size})")
@requires_container(REP_2_1_4_PUBLIC)
def test_object2_can_be_get_by_attr(self, default_wallet: WalletInfo, storage_objects_with_attributes: list[StorageObjectInfo]):
"""
Test to get object2 with different attributes, then delete object2 and get object1 using 1st attribute. Note: obj1 and obj2 have the same attribute#1,
and when obj2 is deleted you can get obj1 by 1st attribute
Steps:
1. Download object#2 with attributes [chapter2=w@r] and compare hashes
2. Download object#2 with attributes [Ch@pter1=peace] and compare hashes
3. Delete object#2
4. Download object#1 with attributes [Writer=Leo Tolstoy] and compare hashes
"""
storage_object_1 = storage_objects_with_attributes[0]
storage_object_2 = storage_objects_with_attributes[1]
with reporter.step(
f'Download object#2 via wget with attributes [chapter2={storage_object_2.attributes["chapter2"]}] / [Ch@pter1={storage_object_2.attributes["Ch@pter1"]}] and compare hashes'
):
selected_attributes_object2 = [
{"chapter2": storage_object_2.attributes["chapter2"]},
{"Ch@pter1": storage_object_2.attributes["Ch@pter1"]},
]
for attributes in selected_attributes_object2:
get_object_by_attr_and_verify_hashes(
oid=storage_object_2.oid,
file_name=storage_object_2.file_path,
cid=storage_object_2.cid,
attrs=attributes,
node=self.cluster.cluster_nodes[0],
)
with reporter.step("Delete object#2 and verify is the container deleted"):
delete_object(
wallet=default_wallet,
cid=storage_object_2.cid,
oid=storage_object_2.oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
try_to_get_object_and_expect_error(
cid=storage_object_2.cid,
oid=storage_object_2.oid,
node=self.cluster.cluster_nodes[0],
error_pattern=OBJECT_ALREADY_REMOVED_ERROR,
)
storage_objects_with_attributes.remove(storage_object_2)
with reporter.step(f'Download object#1 with attributes [Writer={storage_object_1.attributes["Writer"]}] and compare hashes'):
key_value_pair = {"Writer": storage_object_1.attributes["Writer"]}
get_object_by_attr_and_verify_hashes(
oid=storage_object_1.oid,
file_name=storage_object_1.file_path,
cid=storage_object_1.cid,
attrs=key_value_pair,
node=self.cluster.cluster_nodes[0],
)
@allure.title("[NEGATIVE] Put object and get right after container is deleted (object_size={object_size})")
@requires_container(REP_2_1_4_PUBLIC)
def test_negative_put_and_get_object3(self, default_wallet: WalletInfo, storage_objects_with_attributes: list[StorageObjectInfo]):
"""
Test to attempt to put object and try to download it right after the container has been deleted
Steps:
1. [Negative] Allocate and attempt to put object#3 via http with attributes: [Writer=Leo Tolstoy, Writer=peace, peace=peace]
Expected: "Error duplication of attributes detected"
2. Delete container
3. [Negative] Try to download object with attributes [peace=peace]
Expected: "HTTP request sent, awaiting response... 404 Not Found"
"""
storage_object_1 = storage_objects_with_attributes[0]
with reporter.step(
"[Negative] Allocate and attemt to put object#3 via http with attributes: [Writer=Leo Tolstoy, Writer=peace, peace=peace]"
):
file_path_3 = generate_file(storage_object_1.size)
attrs_obj3 = {"Writer": "Leo Tolstoy", "peace": "peace"}
headers = attr_into_str_header_curl(attrs_obj3)
headers.append(" ".join(attr_into_str_header_curl({"Writer": "peace"})))
error_pattern = f"key duplication error: X-Attribute-Writer"
upload_via_http_gate_curl(
cid=storage_object_1.cid,
filepath=file_path_3,
endpoint=self.cluster.default_http_gate_endpoint,
headers=headers,
error_pattern=error_pattern,
)
with reporter.step("Delete container and verify container deletion"):
delete_container(
wallet=default_wallet,
cid=storage_object_1.cid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
await_mode=True,
)
self.tick_epoch()
wait_for_container_deletion(
default_wallet,
storage_object_1.cid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
assert storage_object_1.cid not in list_containers(default_wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
with reporter.step("[Negative] Try to download (wget) object via wget with attributes [peace=peace]"):
request = f"/get/{storage_object_1.cid}/peace/peace"
error_pattern = "404 Not Found"
try_to_get_object_via_passed_request_and_expect_error(
cid=storage_object_1.cid,
oid="",
node=self.cluster.cluster_nodes[0],
error_pattern=error_pattern,
attrs=attrs_obj3,
http_request_path=request,
)

View file

@ -1,176 +0,0 @@
import logging
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.clients import S3ClientWrapper
from frostfs_testlib.steps import s3_helper
from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.steps.http_gate import (
assert_hashes_are_equal,
get_object_by_attr_and_verify_hashes,
get_via_http_gate,
try_to_get_object_via_passed_request_and_expect_error,
verify_object_hash,
)
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import TestFile, generate_file, split_file
from ....helpers.container_request import REP_2_1_4_PUBLIC, requires_container
logger = logging.getLogger("NeoLogger")
PART_SIZE = 5 * 1024 * 1024
@pytest.mark.nightly
@pytest.mark.sanity
@pytest.mark.http_gate
class Test_http_object(ClusterTestBase):
@allure.title("Put over gRPC, Get over HTTP with attributes (obj_size={object_size})")
@requires_container(REP_2_1_4_PUBLIC)
def test_object_put_get_attributes(self, default_wallet: WalletInfo, container: str, test_file: TestFile):
"""
Test that object can be put using gRPC interface and got using HTTP.
Steps:
1. Create an object;
2. Put object(s) using gRPC (frostfs-cli) with attributes [--attributes chapter1=peace,chapter2=war];
3. Download the object using HTTP gate (https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#downloading);
4. Compare hashes of the original and the downloaded object;
5. [Negative] Try to the get the object with the specified attributes and `get` request: [get/$CID/chapter1/peace];
6. Download the object with the specified attributes and `get_by_attribute` request: [get_by_attribute/$CID/chapter1/peace];
7. Compare hashes of the original and the downloaded object;
8. [Negative] Try to the get the object via `get_by_attribute` request: [get_by_attribute/$CID/$OID];
Expected result:
Hashes must be the same.
"""
# List of Key=Value attributes
obj_key1 = "chapter1"
obj_value1 = "peace"
obj_key2 = "chapter2"
obj_value2 = "war"
# Prepare for grpc PUT request
key_value1 = obj_key1 + "=" + obj_value1
key_value2 = obj_key2 + "=" + obj_value2
with reporter.step("Put objects using gRPC [--attributes chapter1=peace,chapter2=war]"):
oid = put_object_to_random_node(
wallet=default_wallet,
path=test_file.path,
cid=container,
shell=self.shell,
cluster=self.cluster,
attributes=f"{key_value1},{key_value2}",
)
with reporter.step("Get object and verify hashes [ get/$CID/$OID ]"):
verify_object_hash(
oid=oid,
file_name=test_file.path,
wallet=default_wallet,
cid=container,
shell=self.shell,
nodes=self.cluster.storage_nodes,
request_node=self.cluster.cluster_nodes[0],
)
with reporter.step("[Negative] try to get object: [get/$CID/chapter1/peace]"):
attrs = {obj_key1: obj_value1, obj_key2: obj_value2}
request = f"/get/{container}/{obj_key1}/{obj_value1}"
expected_err_msg = "Failed to get object via HTTP gate:"
try_to_get_object_via_passed_request_and_expect_error(
cid=container,
oid=oid,
node=self.cluster.cluster_nodes[0],
error_pattern=expected_err_msg,
http_request_path=request,
attrs=attrs,
)
with reporter.step("Download the object with attribute [get_by_attribute/$CID/chapter1/peace]"):
get_object_by_attr_and_verify_hashes(oid, test_file, container, attrs, self.cluster.cluster_nodes[0])
with reporter.step("[Negative] try to get object: get_by_attribute/$CID/$OID"):
request = f"/get_by_attribute/{container}/{oid}"
try_to_get_object_via_passed_request_and_expect_error(
container,
oid,
self.cluster.cluster_nodes[0],
error_pattern=expected_err_msg,
http_request_path=request,
)
@allure.title("Put object over S3, get over HTTP with bucket name and key (s3_client={s3_client}, object_size={object_size})")
def test_object_put_get_bucketname_key(self, test_file: TestFile, s3_client: S3ClientWrapper):
"""
Test that object can be put using s3-gateway interface and got via HTTP with bucket name and object key.
Steps:
1. Create an object;
2. Create a bucket via s3;
3. Put the object via s3;
4. Download the object using HTTP gate with the bucket name and the object key;
5. Compare hashes of the original and the downloaded objects;
Expected result:
Hashes must be the same.
"""
object_key = s3_helper.object_key_from_file_path(test_file)
with reporter.step("Create public bucket"):
bucket = s3_client.create_bucket(acl="public-read-write")
with reporter.step("Put object"):
s3_client.put_object(bucket, test_file, object_key)
with reporter.step("Get object via S3 gate"):
obj_s3 = s3_client.get_object(bucket, object_key)
with reporter.step("Get object via HTTP gate"):
obj_http = get_via_http_gate(bucket, object_key, node=self.cluster.cluster_nodes[0])
with reporter.step("Make sure the hashes of both objects are the same"):
assert_hashes_are_equal(test_file.path, obj_http, obj_s3)
@allure.title("Put multipart object over S3, get over HTTP with bucket name and key (s3_client={s3_client})")
def test_object_put_get_bucketname_key_multipart(self, s3_client: S3ClientWrapper):
parts = []
parts_count = 5
original_size = PART_SIZE * parts_count
with reporter.step("Create public container"):
bucket = s3_client.create_bucket(acl="public-read-write")
with reporter.step("Generate original object and split it into parts"):
original_file = generate_file(original_size)
file_parts = split_file(original_file, parts_count)
object_key = s3_helper.object_key_from_file_path(original_file)
with reporter.step("Create multipart and upload parts"):
upload_id = s3_client.create_multipart_upload(bucket, object_key)
for part_id, file_path in enumerate(file_parts, start=1):
etag = s3_client.upload_part(bucket, object_key, upload_id, part_id, file_path)
parts.append((part_id, etag))
with reporter.step("Check all parts are visible in bucket"):
got_parts = s3_client.list_parts(bucket, object_key, upload_id)
assert len(got_parts) == len(file_parts), f"Expected {parts_count} parts, got:\n{got_parts}"
with reporter.step("Complete multipart upload"):
s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts)
with reporter.step("Get multipart object via S3 gate"):
obj_s3 = s3_client.get_object(bucket, object_key)
with reporter.step("Get multipart object via HTTP gate"):
obj_http = get_via_http_gate(bucket, object_key, self.cluster.cluster_nodes[0])
with reporter.step("Make sure the hashes of both objects are the same"):
assert_hashes_are_equal(original_file, obj_http, obj_s3)

View file

@ -1,52 +0,0 @@
import logging
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.steps.http_gate import upload_via_http_gate_curl, verify_object_hash
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
from ....helpers.container_request import REP_2_1_4_PUBLIC, requires_container
logger = logging.getLogger("NeoLogger")
@pytest.mark.http_gate
@pytest.mark.http_put
class Test_http_streaming(ClusterTestBase):
@allure.title("Put via pipe (streaming), Get over HTTP and verify hashes")
@requires_container(REP_2_1_4_PUBLIC)
def test_object_can_be_put_get_by_streaming(self, default_wallet: WalletInfo, container: str, complex_object_size: ObjectSize):
"""
Test that object can be put using gRPC interface and get using HTTP.
Steps:
1. Create big object;
2. Put object using curl with pipe (streaming);
3. Download object using HTTP gate (https://git.frostfs.info/TrueCloudLab/frostfs-http-gw#downloading);
4. Compare hashes between original and downloaded object;
Expected result:
Hashes must be the same.
"""
with reporter.step("Allocate big object"):
# Generate file
file_path = generate_file(complex_object_size.value)
with reporter.step("Put objects using curl utility"):
oid = upload_via_http_gate_curl(container, file_path, self.cluster.default_http_gate_endpoint)
with reporter.step("Get object and verify hashes [ get/$CID/$OID ]"):
verify_object_hash(
oid=oid,
file_name=file_path,
wallet=default_wallet,
cid=container,
shell=self.shell,
nodes=self.cluster.storage_nodes,
request_node=self.cluster.cluster_nodes[0],
)

View file

@ -1,337 +0,0 @@
import calendar
import datetime
import logging
from typing import Optional
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
from frostfs_testlib.steps.cli.object import get_netmap_netinfo, get_object_from_random_node, head_object
from frostfs_testlib.steps.epoch import get_epoch, wait_for_epochs_align
from frostfs_testlib.steps.http_gate import (
attr_into_str_header_curl,
try_to_get_object_and_expect_error,
upload_via_http_gate_curl,
verify_object_hash,
)
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
from ....helpers.container_request import REP_2_1_2_PUBLIC, requires_container
logger = logging.getLogger("NeoLogger")
EXPIRATION_TIMESTAMP_HEADER = "__SYSTEM__EXPIRATION_TIMESTAMP"
# TODO: Depreacated. Use EXPIRATION_EPOCH_ATTRIBUTE from testlib
EXPIRATION_EPOCH_HEADER = "__SYSTEM__EXPIRATION_EPOCH"
EXPIRATION_DURATION_HEADER = "__SYSTEM__EXPIRATION_DURATION"
EXPIRATION_EXPIRATION_RFC = "__SYSTEM__EXPIRATION_RFC3339"
SYSTEM_EXPIRATION_EPOCH = "System-Expiration-Epoch"
SYSTEM_EXPIRATION_DURATION = "System-Expiration-Duration"
SYSTEM_EXPIRATION_TIMESTAMP = "System-Expiration-Timestamp"
SYSTEM_EXPIRATION_RFC3339 = "System-Expiration-RFC3339"
@pytest.mark.http_gate
@pytest.mark.http_put
class Test_http_system_header(ClusterTestBase):
@pytest.fixture(scope="class")
@allure.title("epoch_duration in seconds")
def epoch_duration(self, default_wallet: WalletInfo) -> int:
net_info = get_netmap_netinfo(
wallet=default_wallet,
endpoint=self.cluster.default_rpc_endpoint,
shell=self.shell,
)
epoch_duration_in_blocks = net_info["epoch_duration"]
time_per_block = net_info["time_per_block"]
return int(epoch_duration_in_blocks * time_per_block)
@allure.title("Return N-epoch count in minutes")
def epoch_count_into_mins(self, epoch_duration: int, epoch: int) -> str:
mins = epoch_duration * epoch / 60
return f"{mins}m"
@allure.title("Return future timestamp after N epochs are passed")
def epoch_count_into_timestamp(self, epoch_duration: int, epoch: int, rfc3339: Optional[bool] = False) -> str:
current_datetime = datetime.datetime.utcnow()
epoch_count_in_seconds = epoch_duration * epoch
future_datetime = current_datetime + datetime.timedelta(seconds=epoch_count_in_seconds)
if rfc3339:
return future_datetime.isoformat("T") + "Z"
else:
return str(calendar.timegm(future_datetime.timetuple()))
@allure.title("Check if (header_output) Key=Value exists and equal in passed (header_to_find)")
def check_key_value_presented_header(self, header_output: dict, header_to_find: dict) -> bool:
header_att = header_output["header"]["attributes"]
for key_to_check, val_to_check in header_to_find.items():
if key_to_check not in header_att or val_to_check != header_att[key_to_check]:
logger.info(f"Unable to find {key_to_check}: '{val_to_check}' in {header_att}")
return False
return True
@allure.title(f"Validate that only {EXPIRATION_EPOCH_HEADER} exists in header and other headers are abesent")
def validation_for_http_header_attr(self, head_info: dict, expected_epoch: int) -> None:
# check that __SYSTEM__EXPIRATION_EPOCH attribute has corresponding epoch
assert self.check_key_value_presented_header(
head_info, {EXPIRATION_EPOCH_HEADER: str(expected_epoch)}
), f'Expected to find {EXPIRATION_EPOCH_HEADER}: {expected_epoch} in: {head_info["header"]["attributes"]}'
# check that {EXPIRATION_EPOCH_HEADER} absents in header output
assert not (
self.check_key_value_presented_header(head_info, {EXPIRATION_DURATION_HEADER: ""})
), f"Only {EXPIRATION_EPOCH_HEADER} can be displayed in header attributes"
# check that {EXPIRATION_TIMESTAMP_HEADER} absents in header output
assert not (
self.check_key_value_presented_header(head_info, {EXPIRATION_TIMESTAMP_HEADER: ""})
), f"Only {EXPIRATION_TIMESTAMP_HEADER} can be displayed in header attributes"
# check that {EXPIRATION_EXPIRATION_RFC} absents in header output
assert not (
self.check_key_value_presented_header(head_info, {EXPIRATION_EXPIRATION_RFC: ""})
), f"Only {EXPIRATION_EXPIRATION_RFC} can be displayed in header attributes"
@allure.title("Put / get / verify object and return head command result to invoker")
def oid_header_info_for_object(self, default_wallet: WalletInfo, container: str, test_file: str, attributes: dict):
oid = upload_via_http_gate_curl(
cid=container,
filepath=test_file,
endpoint=self.cluster.default_http_gate_endpoint,
headers=attr_into_str_header_curl(attributes),
)
verify_object_hash(
oid=oid,
file_name=test_file,
wallet=default_wallet,
cid=container,
shell=self.shell,
nodes=self.cluster.storage_nodes,
request_node=self.cluster.cluster_nodes[0],
)
head = head_object(
wallet=default_wallet,
cid=container,
oid=oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
return oid, head
@allure.title("[NEGATIVE] Put object with expired epoch")
@requires_container(REP_2_1_2_PUBLIC)
def test_unable_put_expired_epoch(self, container: str, simple_object_size: ObjectSize):
headers = attr_into_str_header_curl({"System-Expiration-Epoch": str(get_epoch(self.shell, self.cluster) - 1)})
file_path = generate_file(simple_object_size.value)
with reporter.step("Put object using HTTP with attribute Expiration-Epoch where epoch is expired"):
upload_via_http_gate_curl(
cid=container,
filepath=file_path,
endpoint=self.cluster.default_http_gate_endpoint,
headers=headers,
error_pattern="must be greater than current epoch",
)
@allure.title("[NEGATIVE] Put object with negative System-Expiration-Duration")
@requires_container(REP_2_1_2_PUBLIC)
def test_unable_put_negative_duration(self, container: str, simple_object_size: ObjectSize):
headers = attr_into_str_header_curl({"System-Expiration-Duration": "-1h"})
file_path = generate_file(simple_object_size.value)
with reporter.step("Put object using HTTP with attribute System-Expiration-Duration where duration is negative"):
upload_via_http_gate_curl(
cid=container,
filepath=file_path,
endpoint=self.cluster.default_http_gate_endpoint,
headers=headers,
error_pattern=f"{EXPIRATION_DURATION_HEADER} must be positive",
)
@allure.title("[NEGATIVE] Put object with System-Expiration-Timestamp value in the past")
@requires_container(REP_2_1_2_PUBLIC)
def test_unable_put_expired_timestamp(self, container: str, simple_object_size: ObjectSize):
headers = attr_into_str_header_curl({"System-Expiration-Timestamp": "1635075727"})
file_path = generate_file(simple_object_size.value)
with reporter.step("Put object using HTTP with attribute System-Expiration-Timestamp where duration is in the past"):
upload_via_http_gate_curl(
cid=container,
filepath=file_path,
endpoint=self.cluster.default_http_gate_endpoint,
headers=headers,
error_pattern=f"{EXPIRATION_TIMESTAMP_HEADER} must be in the future",
)
@allure.title("[NEGATIVE] Put object using HTTP with attribute System-Expiration-RFC3339 where duration is in the past")
@requires_container(REP_2_1_2_PUBLIC)
def test_unable_put_expired_rfc(self, container: str, simple_object_size: ObjectSize):
headers = attr_into_str_header_curl({"System-Expiration-RFC3339": "2021-11-22T09:55:49Z"})
file_path = generate_file(simple_object_size.value)
upload_via_http_gate_curl(
cid=container,
filepath=file_path,
endpoint=self.cluster.default_http_gate_endpoint,
headers=headers,
error_pattern=f"{EXPIRATION_EXPIRATION_RFC} must be in the future",
)
@allure.title("Priority of attributes epoch>duration (obj_size={object_size})")
@requires_container(REP_2_1_2_PUBLIC)
def test_http_attr_priority_epoch_duration(
self, default_wallet: WalletInfo, container: str, object_size: ObjectSize, epoch_duration: int
):
self.tick_epoch()
epoch_count = 1
expected_epoch = get_epoch(self.shell, self.cluster) + epoch_count
logger.info(
f"epoch duration={epoch_duration}, current_epoch= {get_epoch(self.shell, self.cluster)} expected_epoch {expected_epoch}"
)
attributes = {SYSTEM_EXPIRATION_EPOCH: expected_epoch, SYSTEM_EXPIRATION_DURATION: "1m"}
file_path = generate_file(object_size.value)
with reporter.step(
f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
):
oid, head_info = self.oid_header_info_for_object(default_wallet, file_path, attributes, container)
self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch)
with reporter.step("Check that object becomes unavailable when epoch is expired"):
for _ in range(0, epoch_count + 1):
self.tick_epoch()
assert (
get_epoch(self.shell, self.cluster) == expected_epoch + 1
), f"Epochs should be equal: {get_epoch(self.shell, self.cluster)} != {expected_epoch + 1}"
with reporter.step("Check object deleted because it expires-on epoch"):
wait_for_epochs_align(self.shell, self.cluster)
try_to_get_object_and_expect_error(
cid=container,
oid=oid,
node=self.cluster.cluster_nodes[0],
error_pattern="404 Not Found",
)
# check that object is not available via grpc
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_object_from_random_node(default_wallet, container, oid, self.shell, self.cluster)
@allure.title("Priority of attributes duration>timestamp (obj_size={object_size})")
@requires_container(REP_2_1_2_PUBLIC)
def test_http_attr_priority_dur_timestamp(
self, default_wallet: WalletInfo, container: str, object_size: ObjectSize, epoch_duration: int
):
self.tick_epoch()
epoch_count = 2
expected_epoch = get_epoch(self.shell, self.cluster) + epoch_count
logger.info(
f"epoch duration={epoch_duration}, current_epoch= {get_epoch(self.shell, self.cluster)} expected_epoch {expected_epoch}"
)
attributes = {
SYSTEM_EXPIRATION_DURATION: self.epoch_count_into_mins(epoch_duration=epoch_duration, epoch=2),
SYSTEM_EXPIRATION_TIMESTAMP: self.epoch_count_into_timestamp(epoch_duration=epoch_duration, epoch=1),
}
file_path = generate_file(object_size.value)
with reporter.step(
f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
):
oid, head_info = self.oid_header_info_for_object(default_wallet, file_path, attributes, container)
self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch)
with reporter.step("Check that object becomes unavailable when epoch is expired"):
for _ in range(0, epoch_count + 1):
self.tick_epoch()
assert (
get_epoch(self.shell, self.cluster) == expected_epoch + 1
), f"Epochs should be equal: {get_epoch(self.shell, self.cluster)} != {expected_epoch + 1}"
with reporter.step("Check object deleted because it expires-on epoch"):
wait_for_epochs_align(self.shell, self.cluster)
try_to_get_object_and_expect_error(
cid=container,
oid=oid,
node=self.cluster.cluster_nodes[0],
error_pattern="404 Not Found",
)
# check that object is not available via grpc
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_object_from_random_node(default_wallet, container, oid, self.shell, self.cluster)
@allure.title("Priority of attributes timestamp>Expiration-RFC (obj_size={object_size})")
@requires_container(REP_2_1_2_PUBLIC)
def test_http_attr_priority_timestamp_rfc(
self, default_wallet: WalletInfo, container: str, object_size: ObjectSize, epoch_duration: int
):
self.tick_epoch()
epoch_count = 2
expected_epoch = get_epoch(self.shell, self.cluster) + epoch_count
logger.info(
f"epoch duration={epoch_duration}, current_epoch= {get_epoch(self.shell, self.cluster)} expected_epoch {expected_epoch}"
)
attributes = {
SYSTEM_EXPIRATION_TIMESTAMP: self.epoch_count_into_timestamp(epoch_duration=epoch_duration, epoch=2),
SYSTEM_EXPIRATION_RFC3339: self.epoch_count_into_timestamp(epoch_duration=epoch_duration, epoch=1, rfc3339=True),
}
file_path = generate_file(object_size.value)
with reporter.step(
f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
):
oid, head_info = self.oid_header_info_for_object(default_wallet, file_path, attributes, container)
self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch)
with reporter.step("Check that object becomes unavailable when epoch is expired"):
for _ in range(0, epoch_count + 1):
self.tick_epoch()
assert (
get_epoch(self.shell, self.cluster) == expected_epoch + 1
), f"Epochs should be equal: {get_epoch(self.shell, self.cluster)} != {expected_epoch + 1}"
with reporter.step("Check object deleted because it expires-on epoch"):
wait_for_epochs_align(self.shell, self.cluster)
try_to_get_object_and_expect_error(
cid=container,
oid=oid,
node=self.cluster.cluster_nodes[0],
error_pattern="404 Not Found",
)
# check that object is not available via grpc
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_object_from_random_node(default_wallet, container, oid, self.shell, self.cluster)
@allure.title("Object should be deleted when expiration passed (obj_size={object_size})")
@pytest.mark.parametrize(
"object_size",
# TODO: "complex" temporarly disabled for v0.37
["simple"],
indirect=True,
)
@requires_container(REP_2_1_2_PUBLIC)
def test_http_rfc_object_unavailable_after_expir(
self, default_wallet: WalletInfo, container: str, object_size: ObjectSize, epoch_duration: int
):
self.tick_epoch()
epoch_count = 2
expected_epoch = get_epoch(self.shell, self.cluster) + epoch_count
logger.info(
f"epoch duration={epoch_duration}, current_epoch= {get_epoch(self.shell, self.cluster)} expected_epoch {expected_epoch}"
)
attributes = {SYSTEM_EXPIRATION_RFC3339: self.epoch_count_into_timestamp(epoch_duration=epoch_duration, epoch=2, rfc3339=True)}
file_path = generate_file(object_size.value)
with reporter.step(
f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
):
oid, head_info = self.oid_header_info_for_object(default_wallet, file_path, attributes, container)
self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch)
with reporter.step("Check that object becomes unavailable when epoch is expired"):
for _ in range(0, epoch_count + 1):
self.tick_epoch()
# check that {EXPIRATION_EXPIRATION_RFC} absents in header output
assert (
get_epoch(self.shell, self.cluster) == expected_epoch + 1
), f"Epochs should be equal: {get_epoch(self.shell, self.cluster)} != {expected_epoch + 1}"
with reporter.step("Check object deleted because it expires-on epoch"):
wait_for_epochs_align(self.shell, self.cluster)
try_to_get_object_and_expect_error(
cid=container,
oid=oid,
node=self.cluster.cluster_nodes[0],
error_pattern="404 Not Found",
)
# check that object is not available via grpc
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_object_from_random_node(default_wallet, container, oid, self.shell, self.cluster)

Some files were not shown because too many files have changed in this diff Show more