Compare commits

..

3 commits

Author SHA1 Message Date
anastasia prasolova
157602ca06 [nspcc-dev/nspcc-infra#840]: add DCO check
Signed-off-by: anastasia prasolova <anastasia@nspcc.ru>
2022-09-22 19:12:38 +03:00
anastasia prasolova
dd19303442 --lifetime key and ACL fixes for Storage Groups
Signed-off-by: anastasia prasolova <anastasia@nspcc.ru>
2022-08-18 16:46:16 +03:00
anastasia prasolova
e09498bc4e tests duplications removed from acl_extended_filters testsuite
Signed-off-by: anastasia prasolova <anastasia@nspcc.ru>
2022-08-10 13:30:33 +03:00
176 changed files with 9857 additions and 12362 deletions

View file

@ -1,110 +0,0 @@
hosts:
- address: localhost
plugin_name: docker
services:
- name: s01
attributes:
container_name: s01
config_path: ../frostfs-dev-env/services/storage/.storage.env
wallet_path: ../frostfs-dev-env/services/storage/wallet01.json
local_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../frostfs-dev-env/services/storage/wallet01.json
wallet_password: ""
volume_name: storage_storage_s01
endpoint_data0: s01.frostfs.devenv:8080
control_endpoint: s01.frostfs.devenv:8081
un_locode: "RU MOW"
http_hostname: "no_hostname"
s3_hostname: "no_hostname"
- name: s02
attributes:
container_name: s02
config_path: ../frostfs-dev-env/services/storage/.storage.env
wallet_path: ../frostfs-dev-env/services/storage/wallet02.json
local_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../frostfs-dev-env/services/storage/wallet02.json
wallet_password: ""
volume_name: storage_storage_s02
endpoint_data0: s02.frostfs.devenv:8080
control_endpoint: s02.frostfs.devenv:8081
un_locode: "RU LED"
http_hostname: "no_hostname"
s3_hostname: "no_hostname"
- name: s03
attributes:
container_name: s03
config_path: ../frostfs-dev-env/services/storage/.storage.env
wallet_path: ../frostfs-dev-env/services/storage/wallet03.json
local_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../frostfs-dev-env/services/storage/wallet03.json
wallet_password: ""
volume_name: storage_storage_s03
endpoint_data0: s03.frostfs.devenv:8080
control_endpoint: s03.frostfs.devenv:8081
un_locode: "SE STO"
http_hostname: "no_hostname"
s3_hostname: "no_hostname"
- name: s04
attributes:
container_name: s04
config_path: ../frostfs-dev-env/services/storage/.storage.env
wallet_path: ../frostfs-dev-env/services/storage/wallet04.json
local_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../frostfs-dev-env/services/storage/wallet04.json
wallet_password: ""
volume_name: storage_storage_s04
endpoint_data0: s04.frostfs.devenv:8080
control_endpoint: s04.frostfs.devenv:8081
un_locode: "FI HEL"
http_hostname: "no_hostname"
s3_hostname: "no_hostname"
- name: s3-gate01
attributes:
container_name: s3_gate
config_path: ../frostfs-dev-env/services/s3_gate/.s3.env
wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json
local_config_path: ./TemporaryDir/password-s3.yml
local_wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json
wallet_password: "s3"
endpoint_data0: https://s3.frostfs.devenv:8080
- name: http-gate01
attributes:
container_name: http_gate
config_path: ../frostfs-dev-env/services/http_gate/.http.env
wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json
local_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json
wallet_password: "one"
endpoint_data0: http://http.frostfs.devenv
- name: ir01
attributes:
container_name: ir01
config_path: ../frostfs-dev-env/services/ir/.ir.env
wallet_path: ../frostfs-dev-env/services/ir/az.json
local_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../frostfs-dev-env/services/ir/az.json
wallet_password: "one"
- name: morph-chain01
attributes:
container_name: morph_chain
config_path: ../frostfs-dev-env/services/morph_chain/protocol.privnet.yml
wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json
local_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json
wallet_password: "one"
endpoint_internal0: http://morph-chain.frostfs.devenv:30333
- name: main-chain01
attributes:
container_name: main_chain
config_path: ../frostfs-dev-env/services/chain/protocol.privnet.yml
wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json
local_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json
wallet_password: "one"
endpoint_internal0: http://main-chain.frostfs.devenv:30333
- name: coredns01
attributes:
container_name: coredns
clis:
- name: frostfs-cli
exec_path: frostfs-cli

View file

@ -8,5 +8,5 @@ exclude =
per-file-ignores =
# imported but unused
__init__.py: F401
max-line-length = 100
max-line-length = 120
disable-noqa

1
.github/CODEOWNERS vendored
View file

@ -1 +0,0 @@
* @vdomnich-yadro @dansingjulia @yadro-vavdeev @alexchetaev @abereziny

View file

@ -4,7 +4,6 @@ on:
pull_request:
branches:
- master
- develop
jobs:
commits_check_job:

21
.gitignore vendored
View file

@ -1,29 +1,12 @@
# ignore IDE files
.vscode
.idea
.DS_Store
venv.*
venv_macos
# ignore test results
# ignore test result files under any path
**/log.html
**/output.xml
**/report.html
**/dockerlogs*.tar.gz
allure_results/*
xunit_results.xml
# ignore caches under any path
# ignore pycache under any path
**/__pycache__
**/.pytest_cache
# ignore work directories and setup files
.setup
.env
TemporaryDir/*
artifacts/*
docs/*
venv.*/*
wallet_config.yml

4
.gitmodules vendored Normal file
View file

@ -0,0 +1,4 @@
[submodule "neofs-keywords"]
path = neofs-keywords
url = ssh://git@github.com/nspcc-dev/neofs-keywords.git
ignore = all

View file

@ -1,15 +0,0 @@
repos:
- repo: https://github.com/psf/black
rev: 22.8.0
hooks:
- id: black
language_version: python3.10
- repo: https://github.com/pycqa/isort
rev: 5.12.0
hooks:
- id: isort
name: isort (python)
ci:
autofix_prs: false
autoupdate_schedule: quarterly

View file

@ -1,177 +0,0 @@
# Contribution guide
First, thank you for contributing! We love and encourage pull requests from
everyone. Please follow the guidelines:
- Check the open [issues](https://github.com/TrueCloudLab/frostfs-testcases/issues) and
[pull requests](https://github.com/TrueCloudLab/frostfs-testcases/pulls) for existing
discussions.
- Open an issue first, to discuss a new feature or enhancement.
- Write tests, and make sure the test suite passes locally.
- Open a pull request, and reference the relevant issue(s).
- Make sure your commits are logically separated and have good comments
explaining the details of your change.
- After receiving feedback, amend your commits or add new ones as appropriate.
- **Have fun!**
## Development Workflow
Start by forking the `frostfs-testcases` repository, make changes in a branch and then
send a pull request. We encourage pull requests to discuss code changes. Here
are the steps in details:
### Set up your GitHub Repository
Fork [FrosfFS testcases upstream](https://github.com/TrueCloudLab/frostfs-testcases/fork) source
repository to your own personal repository. Copy the URL of your fork and clone it:
```shell
$ git clone <url of your fork>
```
### Set up git remote as ``upstream``
```sh
$ cd frostfs-testcases
$ git remote add upstream https://github.com/TrueCloudLab/frostfs-testcases
$ git fetch upstream
```
### Set up development environment
To setup development environment for `frosfs-testcases`, please, take the following steps:
1. Prepare virtualenv
```shell
$ make venv
$ source frostfs-testcases-3.10/bin/activate
```
Optionally you might want to integrate code formatters with your code editor to apply formatters to code files as you go:
* isort is supported by [PyCharm](https://plugins.jetbrains.com/plugin/15434-isortconnect), [VS Code](https://cereblanco.medium.com/setup-black-and-isort-in-vscode-514804590bf9). Plugins exist for other IDEs/editors as well.
* black can be integrated with multiple editors, please, instructions are available [here](https://black.readthedocs.io/en/stable/integrations/editors.html).
### Create your feature branch
Before making code changes, make sure you create a separate branch for these
changes. Maybe you will find it convenient to name branch in
`<type>/<issue>-<changes_topic>` format.
```shell
$ git checkout -b feature/123-something_awesome
```
### Commit changes
After verification, commit your changes. There is a [great
post](https://chris.beams.io/posts/git-commit/) on how to write useful commit
messages. Try following this template:
```
[#Issue] Summary
Description
<Macros>
<Sign-Off>
```
```shell
$ git commit -am '[#123] Add some feature'
```
### Push to the branch
Push your locally committed changes to the remote origin (your fork):
```shell
$ git push origin feature/123-something_awesome
```
### Create a Pull Request
Pull requests can be created via GitHub. Refer to [this
document](https://help.github.com/articles/creating-a-pull-request/) for
detailed steps on how to create a pull request. After a Pull Request gets peer
reviewed and approved, it will be merged.
## Code Style
The names of Python variables, functions and classes must comply with [PEP8](https://peps.python.org/pep-0008) rules, in particular:
* Name of a variable/function must be in snake_case (lowercase, with words separated by underscores as necessary to improve readability).
* Name of a global variable must be in UPPER_SNAKE_CASE, the underscore (`_`) symbol must be used as a separator between words.
* Name of a class must be in PascalCase (the first letter of each compound word in a variable name is capitalized).
* Names of other variables should not be ended with the underscore symbol.
Line length limit is set as 100 characters.
Imports should be ordered in accordance with [isort default rules](https://pycqa.github.io/isort/).
We use `black` and `isort` for code formatting. Please, refer to [Black code style](https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html) for details.
Type hints are mandatory for library's code:
- class attributes;
- function or method's parameters;
- function or method's return type.
The only exception is return type of test functions or methods - there's no much use in specifying `None` as return type for each test function.
Do not use relative imports. Even if the module is in the same package, use the full package name.
To format docstrings, please, use [Google Style Docstrings](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html). Type annotations should be specified in the code and not in docstrings (please, refer to [this sample](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/index.html#type-annotations)).
## DCO Sign off
All authors to the project retain copyright to their work. However, to ensure
that they are only submitting work that they have rights to, we are requiring
everyone to acknowledge this by signing their work.
Any copyright notices in this repository should specify the authors as "the
contributors".
To sign your work, just add a line like this at the end of your commit message:
```
Signed-off-by: Samii Sakisaka <samii@nspcc.ru>
```
This can easily be done with the `--signoff` option to `git commit`.
By doing this you state that you can certify the following (from [The Developer
Certificate of Origin](https://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
1 Letterman Drive
Suite D4700
San Francisco, CA, 94129
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```

674
LICENSE
View file

@ -1,674 +0,0 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.

View file

@ -1,42 +1,38 @@
SHELL := /bin/bash
PYTHON_VERSION := 3.10
VENV_NAME = frostfs-testcases-${PYTHON_VERSION}
VENV_DIR := venv.${VENV_NAME}
#!/usr/bin/make -f
current_dir := $(shell pwd)
FROM_VENV := . ${VENV_DIR}/bin/activate &&
.DEFAULT_GOAL := help
venv: create requirements paths precommit
@echo Ready
SHELL = bash
precommit:
@echo Isntalling pre-commit hooks
${FROM_VENV} pre-commit install
OUTPUT_DIR = artifacts/
KEYWORDS_REPO = git@github.com:nspcc-dev/neofs-keywords.git
VENVS = $(shell ls -1d venv/*/ | sort -u | xargs basename -a)
paths:
@echo Append paths for project
@echo Virtual environment: ${VENV_DIR}
@rm -rf ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth
@touch ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth
@echo ${current_dir} | tee ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth
.PHONY: all
all: venvs
create: ${VENV_DIR}
include venv_template.mk
${VENV_DIR}:
@echo Create virtual environment ${VENV_DIR}
virtualenv --python=python${PYTHON_VERSION} --prompt=frostfs-testlib ${VENV_DIR}
run: venvs
@echo "⇒ Test Run"
@robot --timestampoutputs --outputdir $(OUTPUT_DIR) robot/testsuites/integration/
requirements:
@echo Isntalling pip requirements
${FROM_VENV} pip install -e ../frostfs-testlib
${FROM_VENV} pip install -Ur requirements.txt
${FROM_VENV} pip install -Ur requirements_dev.txt
.PHONY: venvs
venvs:
$(foreach venv,$(VENVS),venv.$(venv))
$(foreach venv,$(VENVS),$(eval $(call VENV_template,$(venv))))
#### VALIDATION SECTION ####
lint: create requirements
${FROM_VENV} pip install -e ../frostfs-testlib;
${FROM_VENV} pylint --disable R,C,W pytest_tests
submodules:
@git submodule init
@git submodule update --recursive --remote
validation: lint
${FROM_VENV} pytest --collect-only
clean:
rm -rf venv.*
pytest-local:
@echo "⇒ Run Pytest"
python -m pytest pytest_tests/testsuites/
help:
@echo "⇒ run Run testcases ${R}"

191
README.md
View file

@ -1,132 +1,163 @@
## Testcases structure
Tests written with PyTest Framework are located under `pytest_tests/testsuites` directory.
These tests rely on resources and utility modules that have been originally developed for Pytest Framework.
## Testcases execution
### Initial preparation
1. Install frostfs-cli
- `git clone git@github.com:TrueCloudLab/frostfs-node.git`
- `cd frostfs-node`
1. Install neofs-cli
- `git clone git@github.com:nspcc-dev/neofs-node.git`
- `cd neofs-node`
- `make`
- `sudo cp bin/frostfs-cli /usr/local/bin/frostfs-cli`
- `sudo cp bin/neofs-cli /usr/local/bin/neofs-cli`
2. Install frostfs-s3-authmate
- `git clone git@github.com:TrueCloudLab/frostfs-s3-gw.git`
- `cd frostfs-s3-gw`
2. Install neofs-authmate
- `git clone git@github.com:nspcc-dev/neofs-s3-gw.git`
- `cd neofs-s3-gw`
- `make`
- `sudo cp bin/frostfs-s3-authmate /usr/local/bin/frostfs-s3-authmate`
- `sudo cp bin/neofs-authmate /usr/local/bin/neofs-authmate`
3. Install neo-go
- `git clone git@github.com:nspcc-dev/neo-go.git`
- `cd neo-go`
- `git checkout v0.101.0` (or the current version in the frostfs-dev-env)
- `git checkout v0.92.0` (or the current version in the neofs-dev-env)
- `make`
- `sudo cp bin/neo-go /usr/local/bin/neo-go`
or download binary from releases: https://github.com/nspcc-dev/neo-go/releases
4. Clone frostfs-dev-env
`git clone git@github.com:TrueCloudLab/frostfs-dev-env.git`
4. Clone neofs-dev-env
`git clone git@github.com:nspcc-dev/neofs-dev-env.git`
Note that we expect frostfs-dev-env to be located under
the `<testcases_root_dir>/../frostfs-dev-env` directory. If you put this repo in any other place,
manually set the full path to frostfs-dev-env in the environment variable `DEVENV_PATH` at this step.
Note that we expect neofs-dev-env to be located under
the `<testcases_root_dir>/../neofs-dev-env` directory. If you put this repo in any other place,
manually set the full path to neofs-dev-env in the environment variable `DEVENV_PATH` at this step.
5. Make sure you have installed all the following prerequisites on your machine
5. Make sure you have installed all of the following prerequisites on your machine
```
make
python3.10
python3.10-dev
python3.9
python3.9-dev
libssl-dev
```
As we use frostfs-dev-env, you'll also need to install
[prerequisites](https://github.com/TrueCloudLab/frostfs-dev-env#prerequisites) of this repository.
As we use neofs-dev-env, you'll also need to install
[prerequisites](https://github.com/nspcc-dev/neofs-dev-env#prerequisites) of this repository.
6. Prepare virtualenv
## Robot Framework
```shell
$ make venv
$ source venv.frostfs-testcases-3.10/bin/activate
### Run
1. Prepare virtualenv
```
$ make venv.localtest
$ . venv.localtest/bin/activate
```
7. Optionally you might want to integrate code formatters with your code editor to apply formatters to code files as you go:
* isort is supported by [PyCharm](https://plugins.jetbrains.com/plugin/15434-isortconnect), [VS Code](https://cereblanco.medium.com/setup-black-and-isort-in-vscode-514804590bf9). Plugins exist for other IDEs/editors as well.
* black can be integrated with multiple editors, please, instructions are available [here](https://black.readthedocs.io/en/stable/integrations/editors.html).
2. Run tests
8. Install Allure CLI
In the activated virtualenv, execute the following command(s) to run a singular testsuite or all the suites in the directory
```
$ robot --outputdir artifacts/ robot/testsuites/integration/<UserScenario>
$ robot --outputdir artifacts/ robot/testsuites/integration/<UserScenario>/<testcase>.robot
```
Allure CLI installation is not an easy task, so a better option might be to run allure from
docker container (please, refer to p.2 of the next section for instructions).
To install Allure CLI you may take one of the following ways:
### Generation of documentation
To generate Keywords documentation:
```
python3 -m robot.libdoc robot/resources/lib/neofs.py docs/NeoFS_Library.html
python3 -m robot.libdoc robot/resources/lib/payment_neogo.py docs/Payment_Library.html
```
To generate testcases documentation:
```
python3 -m robot.testdoc robot/testsuites/integration/ docs/testcases.html
```
### Source code overview
`robot/` - Files related/depended on Robot Framework.
`robot/resources/` - All resources (Robot Framework Keywords, Python Libraries, etc) which could be used for creating test suites.
`robot/resources/lib/` - Common Python Libraries depended on Robot Framework (with Keywords). For example neofs.py, payment.py.
`robot/variables/` - All variables for tests. It is possible to add the auto-loading logic of parameters from the smart-contract in the future. Contain python files.
`robot/testsuites/` - Robot TestSuites and TestCases.
`robot/testsuites/integration/` - Integration test suites and testcases
### Code style
Robot Framework keyword should use space as a separator between particular words
The name of the library function in Robot Framework keyword usage and the name of the same function in the Python library must be identical.
The name of GLOBAL VARIABLE must be in UPPER CASE, the underscore ('_')' symbol must be used as a separator between words.
The name of local variable must be in lower case, the underscore symbol must be used as a separator between words.
The names of Python variables, functions and classes must comply with accepted rules, in particular:
Name of variable/function must be in lower case with underscore symbol between words
Name of class must start with a capital letter. It is not allowed to use underscore symbol in name, use capital for each particular word.
For example: NeoFSConf
Name of other variables should not be ended with underscore symbol
On keywords definition, one should specify variable type, e.g. path: str
### Robot style
You should always complete the [Tags] and [Documentation] sections for Testcases and Documentation for Test Suites.
### Robot-framework User Guide
http://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html
## PyTest
Tests written with PyTest framework are located under `pytest_tests/testsuites` directory.
### Run and get report
1. Prepare virtualenv
```
$ make venv.local-pytest
$ . venv.local-pytest/bin/activate
```
2. Install Allure CLI
Allure CLI installation is not an easy task. You may select one of the following ways. If none of the options would help you please complete the instruction with your approach:
- Follow the [instruction](https://docs.qameta.io/allure/#_linux) from the official website
- Consult [the thread](https://github.com/allure-framework/allure2/issues/989)
- Download release from the Github
```shell
```
$ wget https://github.com/allure-framework/allure2/releases/download/2.18.1/allure_2.18.1-1_all.deb
$ sudo apt install ./allure_2.18.1-1_all.deb
```
You also need the `default-jre` package installed.
If none of the options worked for you, please complete the instruction with your approach.
3. Run tests
### Run and get report
1. Run tests
Make sure that the virtualenv is activated, then execute the following command to run a singular test suite or all the suites in the directory
```shell
In the activated virtualenv, execute the following command(s) to run a singular testsuite or all the suites in the directory
```
$ pytest --alluredir my-allure-123 pytest_tests/testsuites/object/test_object_api.py
$ pytest --alluredir my-allure-123 pytest_tests/testsuites/
```
2. Generate report
4. Generate report
If you opted to install Allure CLI, you can generate a report using the command `allure generate`. The web representation of the report will be under `allure-report` directory:
```shell
To generate a report, execute the command `allure generate`. The report will be under the `allure-report` directory.
```
$ allure generate my-allure-123
$ ls allure-report/
app.js data export favicon.ico history index.html plugins styles.css widgets
```
To inspect the report in a browser, run
```shell
```
$ allure serve my-allure-123
```
If you prefer to run allure from Docker, you can use the following command:
```shell
$ mkdir -p $PWD/allure-reports
$ docker run -p 5050:5050 -e CHECK_RESULTS_EVERY_SECONDS=30 -e KEEP_HISTORY=1 \
-v $PWD/my-allure-123:/app/allure-results \
-v $PWD/allure-reports:/app/default-reports \
frankescobar/allure-docker-service
```
Then, you can check the allure report in your browser [by this link](http://localhost:5050/allure-docker-service/projects/default/reports/latest/index.html?redirect=false)
NOTE: feel free to select a different location for `allure-reports` directory, there is no requirement to have it inside `frostfs-testcases`. For example, you can place it under `/tmp` path.
# Contributing
Feel free to contribute to this project after reading the [contributing
guidelines](CONTRIBUTING.md).
Before starting to work on a certain topic, create a new issue first, describing
the feature/topic you are going to implement.
# License
- [GNU General Public License v3.0](LICENSE)
## Pytest marks
Custom pytest marks used in tests:
* `sanity` - Tests must be runs in sanity testruns.
* `smoke` - Tests must be runs in smoke testruns.

View file

@ -0,0 +1,31 @@
diff -urN bin.orig/activate bin/activate
--- bin.orig/activate 2018-12-27 14:55:13.916461020 +0900
+++ bin/activate 2018-12-27 20:38:35.223248728 +0900
@@ -30,6 +30,15 @@
unset _OLD_VIRTUAL_PS1
fi
+ # Unset exported dev-env variables
+ pushd ${DEVENV_PATH} > /dev/null
+ unset `make env | awk -F= '{print $1}'`
+ popd > /dev/null
+
+ # Unset external env variables
+ declare -f env_deactivate > /dev/null && env_deactivate
+ declare -f venv_deactivate > /dev/null && venv_deactivate
+
unset VIRTUAL_ENV
if [ ! "${1-}" = "nondestructive" ] ; then
# Self destruct!
@@ -47,6 +56,11 @@
PATH="$VIRTUAL_ENV/bin:$PATH"
export PATH
+# Set external variables
+if [ -f ${VIRTUAL_ENV}/bin/environment.sh ] ; then
+ . ${VIRTUAL_ENV}/bin/environment.sh
+fi
+
# unset PYTHONHOME if set
if ! [ -z "${PYTHONHOME+_}" ] ; then
_OLD_VIRTUAL_PYTHONHOME="$PYTHONHOME"

81
configuration/devenv.yml Normal file
View file

@ -0,0 +1,81 @@
---
# Object sizes
simple_obj_size: 1000
complex_obj_size: 2000
# Timeouts
container_wait_interval: '1m'
mainnet_block_time: '1s'
mainnet_timeout: '1min'
morph_block_time: '1s'
neofs_contract_cache_timeout: '30s'
shard_remove_interval: '1m'
# Services endpoints
neofs_endpoint: 's01.neofs.devenv:8080'
neo_mainnet_endpoint: 'http://main-chain.neofs.devenv:30333'
morph_endpoint: 'http://morph-chain.neofs.devenv:30333'
http_gate: 'http://http.neofs.devenv'
s3_gate: 'https://s3.neofs.devenv:8080'
storage_node_1: 's01.neofs.devenv:8080'
storage_node_2: 's02.neofs.devenv:8080'
storage_node_3: 's03.neofs.devenv:8080'
storage_node_4: 's04.neofs.devenv:8080'
neofs_netmap:
s01:
rpc: 's01.neofs.devenv:8080'
control: 's01.neofs.devenv:8081'
wallet_path: '../neofs-dev-env/services/storage/wallet01.json'
un_locode: 'RU MOW'
s02:
rpc: 's02.neofs.devenv:8080'
control: 's02.neofs.devenv:8081'
wallet_path: '../neofs-dev-env/services/storage/wallet02.json'
un_locode: 'RU LED'
s03:
rpc: 's03.neofs.devenv:8080'
control: 's03.neofs.devenv:8081'
wallet_path: '../neofs-dev-env/services/storage/wallet03.json'
un_locode: 'SE STO'
s04:
rpc: 's04.neofs.devenv:8080'
control: 's04.neofs.devenv:8081'
wallet_path: '../neofs-dev-env/services/storage/wallet04.json'
un_locode: 'FI HEL'
# Paths to binaries
neogo_cli_exec: 'neo-go'
neogo_executable: 'neo-go'
neofs_cli_exec: 'neofs-cli'
# Neo Blockchain configuration
gas_hash: '0xd2a4cff31913016155e38e474a2c06d08be276cf'
neofs_contract: 'd07ec2a43d2f8638934d340bfb60b6c23afce106'
morph_magic: '15405'
# NeoFS common parameters
common_placement_rule: "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
# TODO: remove within the scope of
# https://github.com/nspcc-dev/neofs-testcases/issues/246
gate_pub_key: '0313b1ac3a8076e155a7e797b24f0b650cccad5941ea59d7cfd51a024a8b2a06bf'
# Wallets
devenv_services_path: '../neofs-dev-env/services'
wallet_config: 'neofs_cli_configs/empty_passwd.yml'
mainnet_wallet_path: '../neofs-dev-env/services/chain/node-wallet.json'
mainnet_wallet_config: 'neofs_cli_configs/one_wallet_password.yml'
mainnet_single_addr: 'NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP'
mainnet_wallet_pass: 'one'
ir_wallet_path: '../neofs-dev-env/services/ir/wallet01.json'
ir_wallet_config: 'neofs_cli_configs/one_wallet_password.yml'
ir_wallet_pass: 'one'
storage_wallet_path: '../neofs-dev-env/services/storage/wallet01.json'

1
neofs-keywords Submodule

@ -0,0 +1 @@
Subproject commit f66be076acb102a80e9f8abd5d1cde104673464e

View file

@ -0,0 +1 @@
password: ""

View file

@ -0,0 +1 @@
password: "one"

View file

@ -1,8 +0,0 @@
[tool.isort]
profile = "black"
src_paths = ["pytest_tests"]
line_length = 100
[tool.black]
line-length = 100
target-version = ["py310"]

View file

@ -1,149 +0,0 @@
from typing import List, Optional
from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.acl import EACLOperation
from pytest_tests.helpers.object_access import (
can_delete_object,
can_get_head_object,
can_get_object,
can_get_range_hash_of_object,
can_get_range_of_object,
can_put_object,
can_search_object,
)
def check_full_access_to_container(
wallet: str,
cid: str,
oid: str,
file_name: str,
shell: Shell,
cluster: Cluster,
bearer: Optional[str] = None,
wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None,
):
endpoint = cluster.default_rpc_endpoint
assert can_put_object(wallet, cid, file_name, shell, cluster, bearer, wallet_config, xhdr)
assert can_get_head_object(wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr)
assert can_get_range_of_object(wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr)
assert can_get_range_hash_of_object(
wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr
)
assert can_search_object(wallet, cid, shell, endpoint, oid, bearer, wallet_config, xhdr)
assert can_get_object(wallet, cid, oid, file_name, shell, cluster, bearer, wallet_config, xhdr)
assert can_delete_object(wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr)
def check_no_access_to_container(
wallet: str,
cid: str,
oid: str,
file_name: str,
shell: Shell,
cluster: Cluster,
bearer: Optional[str] = None,
wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None,
):
endpoint = cluster.default_rpc_endpoint
assert not can_put_object(wallet, cid, file_name, shell, cluster, bearer, wallet_config, xhdr)
assert not can_get_head_object(wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr)
assert not can_get_range_of_object(
wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr
)
assert not can_get_range_hash_of_object(
wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr
)
assert not can_search_object(wallet, cid, shell, endpoint, oid, bearer, wallet_config, xhdr)
assert not can_get_object(
wallet, cid, oid, file_name, shell, cluster, bearer, wallet_config, xhdr
)
assert not can_delete_object(wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr)
def check_custom_access_to_container(
wallet: str,
cid: str,
oid: str,
file_name: str,
shell: Shell,
cluster: Cluster,
deny_operations: Optional[List[EACLOperation]] = None,
ignore_operations: Optional[List[EACLOperation]] = None,
bearer: Optional[str] = None,
wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None,
):
endpoint = cluster.default_rpc_endpoint
deny_operations = [op.value for op in deny_operations or []]
ignore_operations = [op.value for op in ignore_operations or []]
checks: dict = {}
if EACLOperation.PUT.value not in ignore_operations:
checks[EACLOperation.PUT.value] = can_put_object(
wallet, cid, file_name, shell, cluster, bearer, wallet_config, xhdr
)
if EACLOperation.HEAD.value not in ignore_operations:
checks[EACLOperation.HEAD.value] = can_get_head_object(
wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr
)
if EACLOperation.GET_RANGE.value not in ignore_operations:
checks[EACLOperation.GET_RANGE.value] = can_get_range_of_object(
wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr
)
if EACLOperation.GET_RANGE_HASH.value not in ignore_operations:
checks[EACLOperation.GET_RANGE_HASH.value] = can_get_range_hash_of_object(
wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr
)
if EACLOperation.SEARCH.value not in ignore_operations:
checks[EACLOperation.SEARCH.value] = can_search_object(
wallet, cid, shell, endpoint, oid, bearer, wallet_config, xhdr
)
if EACLOperation.GET.value not in ignore_operations:
checks[EACLOperation.GET.value] = can_get_object(
wallet, cid, oid, file_name, shell, cluster, bearer, wallet_config, xhdr
)
if EACLOperation.DELETE.value not in ignore_operations:
checks[EACLOperation.DELETE.value] = can_delete_object(
wallet, cid, oid, shell, endpoint, bearer, wallet_config, xhdr
)
failed_checks = [
f"allowed {action} failed"
for action, success in checks.items()
if not success and action not in deny_operations
] + [
f"denied {action} succeeded"
for action, success in checks.items()
if success and action in deny_operations
]
assert not failed_checks, ", ".join(failed_checks)
def check_read_only_container(
wallet: str,
cid: str,
oid: str,
file_name: str,
shell: Shell,
cluster: Cluster,
bearer: Optional[str] = None,
wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None,
):
return check_custom_access_to_container(
wallet,
cid,
oid,
file_name,
deny_operations=[EACLOperation.PUT, EACLOperation.DELETE],
bearer=bearer,
wallet_config=wallet_config,
xhdr=xhdr,
shell=shell,
cluster=cluster,
)

View file

@ -1,13 +0,0 @@
from frostfs_testlib.shell import Shell
class IpTablesHelper:
@staticmethod
def drop_input_traffic_to_port(shell: Shell, ports: list[str]) -> None:
for port in ports:
shell.exec(f"sudo iptables -A INPUT -p tcp --dport {port} -j DROP")
@staticmethod
def restore_input_traffic_to_port(shell: Shell, ports: list[str]) -> None:
for port in ports:
shell.exec(f"sudo iptables -D INPUT -p tcp --dport {port} -j DROP")

View file

@ -1,245 +0,0 @@
from typing import Optional
import allure
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
from frostfs_testlib.resources.error_patterns import OBJECT_ACCESS_DENIED
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import (
delete_object,
get_object_from_random_node,
get_range,
get_range_hash,
head_object,
put_object_to_random_node,
search_object,
)
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.utils import string_utils
from frostfs_testlib.utils.file_utils import get_file_hash
OPERATION_ERROR_TYPE = RuntimeError
def can_get_object(
wallet: str,
cid: str,
oid: str,
file_name: str,
shell: Shell,
cluster: Cluster,
bearer: Optional[str] = None,
wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None,
) -> bool:
with allure.step("Try get object from container"):
try:
got_file_path = get_object_from_random_node(
wallet,
cid,
oid,
bearer=bearer,
wallet_config=wallet_config,
xhdr=xhdr,
shell=shell,
cluster=cluster,
)
except OPERATION_ERROR_TYPE as err:
assert string_utils.is_str_match_pattern(
err, OBJECT_ACCESS_DENIED
), f"Expected {err} to match {OBJECT_ACCESS_DENIED}"
return False
assert get_file_hash(file_name) == get_file_hash(got_file_path)
return True
def can_put_object(
wallet: str,
cid: str,
file_name: str,
shell: Shell,
cluster: Cluster,
bearer: Optional[str] = None,
wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None,
attributes: Optional[dict] = None,
) -> bool:
with allure.step("Try put object to container"):
try:
put_object_to_random_node(
wallet,
file_name,
cid,
bearer=bearer,
wallet_config=wallet_config,
xhdr=xhdr,
attributes=attributes,
shell=shell,
cluster=cluster,
)
except OPERATION_ERROR_TYPE as err:
assert string_utils.is_str_match_pattern(
err, OBJECT_ACCESS_DENIED
), f"Expected {err} to match {OBJECT_ACCESS_DENIED}"
return False
return True
def can_delete_object(
wallet: str,
cid: str,
oid: str,
shell: Shell,
endpoint: str,
bearer: Optional[str] = None,
wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None,
) -> bool:
with allure.step("Try delete object from container"):
try:
delete_object(
wallet,
cid,
oid,
bearer=bearer,
wallet_config=wallet_config,
xhdr=xhdr,
shell=shell,
endpoint=endpoint,
)
except OPERATION_ERROR_TYPE as err:
assert string_utils.is_str_match_pattern(
err, OBJECT_ACCESS_DENIED
), f"Expected {err} to match {OBJECT_ACCESS_DENIED}"
return False
return True
def can_get_head_object(
wallet: str,
cid: str,
oid: str,
shell: Shell,
endpoint: str,
bearer: Optional[str] = None,
wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> bool:
with allure.step("Try get head of object"):
try:
head_object(
wallet,
cid,
oid,
bearer=bearer,
wallet_config=wallet_config,
xhdr=xhdr,
shell=shell,
endpoint=endpoint,
timeout=timeout,
)
except OPERATION_ERROR_TYPE as err:
assert string_utils.is_str_match_pattern(
err, OBJECT_ACCESS_DENIED
), f"Expected {err} to match {OBJECT_ACCESS_DENIED}"
return False
return True
def can_get_range_of_object(
wallet: str,
cid: str,
oid: str,
shell: Shell,
endpoint: str,
bearer: Optional[str] = None,
wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> bool:
with allure.step("Try get range of object"):
try:
get_range(
wallet,
cid,
oid,
bearer=bearer,
range_cut="0:10",
wallet_config=wallet_config,
xhdr=xhdr,
shell=shell,
endpoint=endpoint,
timeout=timeout,
)
except OPERATION_ERROR_TYPE as err:
assert string_utils.is_str_match_pattern(
err, OBJECT_ACCESS_DENIED
), f"Expected {err} to match {OBJECT_ACCESS_DENIED}"
return False
return True
def can_get_range_hash_of_object(
wallet: str,
cid: str,
oid: str,
shell: Shell,
endpoint: str,
bearer: Optional[str] = None,
wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> bool:
with allure.step("Try get range hash of object"):
try:
get_range_hash(
wallet,
cid,
oid,
bearer=bearer,
range_cut="0:10",
wallet_config=wallet_config,
xhdr=xhdr,
shell=shell,
endpoint=endpoint,
timeout=timeout,
)
except OPERATION_ERROR_TYPE as err:
assert string_utils.is_str_match_pattern(
err, OBJECT_ACCESS_DENIED
), f"Expected {err} to match {OBJECT_ACCESS_DENIED}"
return False
return True
def can_search_object(
wallet: str,
cid: str,
shell: Shell,
endpoint: str,
oid: Optional[str] = None,
bearer: Optional[str] = None,
wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> bool:
with allure.step("Try search object in container"):
try:
oids = search_object(
wallet,
cid,
bearer=bearer,
wallet_config=wallet_config,
xhdr=xhdr,
shell=shell,
endpoint=endpoint,
timeout=timeout,
)
except OPERATION_ERROR_TYPE as err:
assert string_utils.is_str_match_pattern(
err, OBJECT_ACCESS_DENIED
), f"Expected {err} to match {OBJECT_ACCESS_DENIED}"
return False
if oid:
return oid in oids
return True

View file

@ -0,0 +1,220 @@
import logging
import socket
import tempfile
import textwrap
from contextlib import contextmanager
from dataclasses import dataclass
from datetime import datetime
from functools import wraps
from time import sleep
from typing import ClassVar, Optional
import allure
from paramiko import AutoAddPolicy, SFTPClient, SSHClient, SSHException, ssh_exception, RSAKey
from paramiko.ssh_exception import AuthenticationException
class HostIsNotAvailable(Exception):
"""Raises when host is not reachable."""
def __init__(self, ip: str = None, exc: Exception = None):
msg = f'Host is not available{f" by ip: {ip}" if ip else ""}'
if exc:
msg = f'{msg}. {exc}'
super().__init__(msg)
def log_command(func):
@wraps(func)
def wrapper(host: 'HostClient', command: str, *args, **kwargs):
display_length = 60
short = command.removeprefix("$ProgressPreference='SilentlyContinue'\n")
short = short[:display_length]
short += '...' if short != command else ''
with allure.step(f'SSH: {short}'):
logging.info(f'Execute command "{command}" on "{host.ip}"')
start_time = datetime.utcnow()
cmd_result = func(host, command, *args, **kwargs)
end_time = datetime.utcnow()
log_message = f'HOST: {host.ip}\n' \
f'COMMAND:\n{textwrap.indent(command, " ")}\n' \
f'RC:\n {cmd_result.rc}\n' \
f'STDOUT:\n{textwrap.indent(cmd_result.stdout, " ")}\n' \
f'STDERR:\n{textwrap.indent(cmd_result.stderr, " ")}\n' \
f'Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {end_time - start_time}'
logging.info(log_message)
allure.attach(log_message, 'SSH command', allure.attachment_type.TEXT)
return cmd_result
return wrapper
@dataclass
class SSHCommand:
stdout: str
stderr: str
rc: int
class HostClient:
ssh_client: SSHClient
SSH_CONNECTION_ATTEMPTS: ClassVar[int] = 3
CONNECTION_TIMEOUT = 30
TIMEOUT_RESTORE_CONNECTION = 10, 24
def __init__(self, ip: str, login: str, password: Optional[str],
private_key_path: Optional[str] = None, init_ssh_client=True) -> None:
self.ip = ip
self.login = login
self.password = password
self.private_key_path = private_key_path
if init_ssh_client:
self.create_connection(self.SSH_CONNECTION_ATTEMPTS)
def exec(self, cmd: str, verify=True, timeout=30) -> SSHCommand:
cmd_result = self._inner_exec(cmd, timeout)
if verify:
assert cmd_result.rc == 0, f'Non zero rc from command: "{cmd}"'
return cmd_result
@log_command
def exec_with_confirmation(self, cmd: str, confirmation: list, verify=True, timeout=10) -> SSHCommand:
ssh_stdin, ssh_stdout, ssh_stderr = self.ssh_client.exec_command(cmd, timeout=timeout)
for line in confirmation:
if not line.endswith('\n'):
line = f'{line}\n'
try:
ssh_stdin.write(line)
except OSError as err:
logging.error(f'Got error {err} executing command {cmd}')
ssh_stdin.close()
output = SSHCommand(stdout=ssh_stdout.read().decode(errors='ignore'),
stderr=ssh_stderr.read().decode(errors='ignore'),
rc=ssh_stdout.channel.recv_exit_status())
if verify:
debug_info = f'\nSTDOUT: {output.stdout}\nSTDERR: {output.stderr}\nRC: {output.rc}'
assert output.rc == 0, f'Non zero rc from command: "{cmd}"{debug_info}'
return output
@contextmanager
def as_user(self, user: str, password: str):
keep_user, keep_password = self.login, self.password
self.login, self.password = user, password
self.create_connection()
yield
self.login, self.password = keep_user, keep_password
self.create_connection()
@allure.step('Restore connection')
def restore_ssh_connection(self):
retry_time, retry_count = self.TIMEOUT_RESTORE_CONNECTION
for _ in range(retry_count):
try:
self.create_connection()
except AssertionError:
logging.warning(f'Host: Cant reach host: {self.ip}.')
sleep(retry_time)
else:
logging.info(f'Host: Cant reach host: {self.ip}.')
return
raise AssertionError(f'Host: Cant reach host: {self.ip} after 240 seconds..')
@allure.step('Copy file {host_path_to_file} to local file {path_to_file}')
def copy_file_from_host(self, host_path_to_file: str, path_to_file: str):
with self._sftp_client() as sftp_client:
sftp_client.get(host_path_to_file, path_to_file)
def copy_file_to_host(self, path_to_file: str, host_path_to_file: str):
with allure.step(f'Copy local file {path_to_file} to remote file {host_path_to_file} on host {self.ip}'):
with self._sftp_client() as sftp_client:
sftp_client.put(path_to_file, host_path_to_file)
@allure.step('Save string to remote file {host_path_to_file}')
def copy_str_to_host_file(self, string: str, host_path_to_file: str):
with tempfile.NamedTemporaryFile(mode='r+') as temp:
temp.writelines(string)
temp.flush()
with self._sftp_client() as client:
client.put(temp.name, host_path_to_file)
self.exec(f'cat {host_path_to_file}', verify=False)
def create_connection(self, attempts=SSH_CONNECTION_ATTEMPTS):
exc_err = None
for attempt in range(attempts):
self.ssh_client = SSHClient()
self.ssh_client.set_missing_host_key_policy(AutoAddPolicy())
try:
if self.private_key_path:
logging.info(
f"Trying to connect to host {self.ip} using SSH key "
f"{self.private_key_path} (attempt {attempt})"
)
self.ssh_client.connect(
hostname=self.ip,
pkey=RSAKey.from_private_key_file(self.private_key_path, self.password),
timeout=self.CONNECTION_TIMEOUT
)
else:
logging.info(
f"Trying to connect to host {self.ip} as {self.login} using password "
f"{self.password[:2] + '***' if self.password else ''} (attempt {attempt})"
)
self.ssh_client.connect(
hostname=self.ip,
username=self.login,
password=self.password,
timeout=self.CONNECTION_TIMEOUT
)
return True
except AuthenticationException as auth_err:
logging.error(f'Host: {self.ip}. {auth_err}')
raise auth_err
except (
SSHException,
ssh_exception.NoValidConnectionsError,
AttributeError,
socket.timeout,
OSError
) as ssh_err:
exc_err = ssh_err
logging.error(f'Host: {self.ip}, connection error. {exc_err}')
raise HostIsNotAvailable(self.ip, exc_err)
def drop(self):
self.ssh_client.close()
@log_command
def _inner_exec(self, cmd: str, timeout: int) -> SSHCommand:
if not self.ssh_client:
self.create_connection()
for _ in range(self.SSH_CONNECTION_ATTEMPTS):
try:
_, stdout, stderr = self.ssh_client.exec_command(cmd, timeout=timeout)
return SSHCommand(
stdout=stdout.read().decode(errors='ignore'),
stderr=stderr.read().decode(errors='ignore'),
rc=stdout.channel.recv_exit_status()
)
except (
SSHException,
TimeoutError,
ssh_exception.NoValidConnectionsError,
ConnectionResetError,
AttributeError,
socket.timeout,
) as ssh_err:
logging.error(f'Host: {self.ip}, exec command error {ssh_err}')
self.create_connection()
raise HostIsNotAvailable(f'Host: {self.ip} is not reachable.')
@contextmanager
def _sftp_client(self) -> SFTPClient:
with self.ssh_client.open_sftp() as sftp:
yield sftp

View file

@ -1,37 +1,48 @@
import time
import os
import uuid
import allure
from frostfs_testlib.resources.common import STORAGE_GC_TIME
from frostfs_testlib.utils import datetime_utils
from common import ASSETS_DIR, SIMPLE_OBJ_SIZE
def placement_policy_from_container(container_info: str) -> str:
"""
Get placement policy from container info:
def create_file_with_content(file_path: str = None, content: str = None) -> str:
mode = 'w+'
if not content:
content = os.urandom(SIMPLE_OBJ_SIZE)
mode = 'wb'
container ID: j7k4auNHRmiPMSmnH2qENLECD2au2y675fvTX6csDwd
version: 2.12
owner ID: NQ8HUxE5qEj7UUvADj7z9Z7pcvJdjtPwuw
basic ACL: 0fbfbfff (eacl-public-read-write)
attribute: Timestamp=1656340345 (2022-06-27 17:32:25 +0300 MSK)
nonce: 1c511e88-efd7-4004-8dbf-14391a5d375a
placement policy:
REP 1 IN LOC_PLACE
CBF 1
SELECT 1 FROM LOC_SW AS LOC_PLACE
FILTER Country EQ Sweden AS LOC_SW
if not file_path:
file_path = f"{os.getcwd()}/{ASSETS_DIR}/{str(uuid.uuid4())}"
else:
if not os.path.exists(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
Args:
container_info: output from frostfs-cli container get command
with open(file_path, mode) as out_file:
out_file.write(content)
Returns:
placement policy as a string
"""
assert ":" in container_info, f"Could not find placement rule in the output {container_info}"
return container_info.split(":")[-1].replace("\n", " ").strip()
return file_path
def wait_for_gc_pass_on_storage_nodes() -> None:
wait_time = datetime_utils.parse_time(STORAGE_GC_TIME)
with allure.step(f"Wait {wait_time}s until GC completes on storage nodes"):
time.sleep(wait_time)
def get_file_content(file_path: str) -> str:
with open(file_path, 'r') as out_file:
content = out_file.read()
return content
def split_file(file_path: str, parts: int) -> list[str]:
files = []
with open(file_path, 'rb') as in_file:
data = in_file.read()
content_size = len(data)
chunk_size = int((content_size + parts) / parts)
part_id = 1
for start_position in range(0, content_size + 1, chunk_size):
part_file_name = f'{file_path}_part_{part_id}'
files.append(part_file_name)
with open(part_file_name, 'wb') as out_file:
out_file.write(data[start_position:start_position + chunk_size])
part_id += 1
return files

View file

@ -7,39 +7,11 @@ log_cli_date_format = %Y-%m-%d %H:%M:%S
log_date_format = %H:%M:%S
markers =
# special markers
sanity: small tests subset
staging: test to be excluded from run in verifier/pr-validation/sanity jobs and run test in staging job
sanity: test runs in sanity testrun
smoke: test runs in smoke testrun
# functional markers
container: tests for container creation
grpc_api: standard gRPC API tests
grpc_control: tests related to using frostfs-cli control commands
grpc_object_lock: gRPC lock tests
http_gate: HTTP gate contract
s3_gate: All S3 gate tests
s3_gate_base: Base S3 gate tests
s3_gate_bucket: Bucket S3 gate tests
s3_gate_locking: Locking S3 gate tests
s3_gate_multipart: S3 gate tests with multipart object
s3_gate_object: Object S3 gate tests
s3_gate_tagging: Tagging S3 gate tests
s3_gate_versioning: Versioning S3 gate tests
long: long tests (with long execution time)
node_mgmt: frostfs control commands
session_token: tests for operations with session token
static_session: tests for operations with static session token
bearer: tests for bearer tokens
acl: All tests for ACL
acl_basic: tests for basic ACL
acl_bearer: tests for ACL with bearer
acl_extended: tests for extended ACL
acl_filters: tests for extended ACL with filters and headers
storage_group: tests for storage groups
failover: tests for system recovery after a failure
failover_panic: tests for system recovery after panic reboot of a node
failover_network: tests for network failure
failover_reboot: tests for system recovery after reboot of a node
add_nodes: add nodes to cluster
check_binaries: check frostfs installed binaries versions
payments: tests for payment associated operations
load: performance tests
s3_gate: S3 gate tests
curl: tests for HTTP gate with curl utility
long: long tests (with long execution time)

View file

@ -1 +1,63 @@
-r ../requirements.txt
aiodns==3.0.0
aiohttp==3.7.4.post0
aioresponses==0.7.2
allure-pytest==2.9.45
allure-python-commons==2.9.45
async-timeout==3.0.1
asynctest==0.13.0
attrs==21.4.0
base58==2.1.0
bitarray==2.3.4
boto3==1.16.33
botocore==1.19.33
certifi==2022.5.18
cffi==1.15.0
chardet==4.0.0
charset-normalizer==2.0.12
coverage==6.3.3
docker==4.4.0
docutils==0.17.1
Events==0.4
flake8==4.0.1
idna==3.3
iniconfig==1.1.1
isort==5.10.1
jmespath==0.10.0
jsonschema==4.5.1
lz4==3.1.3
mccabe==0.6.1
mmh3==3.0.0
multidict==6.0.2
mypy==0.950
mypy-extensions==0.4.3
neo-mamba==0.10.0
neo3crypto==0.2.1
neo3vm==0.9.0
neo3vm-stubs==0.9.0
netaddr==0.8.0
orjson==3.6.8
packaging==21.3
pexpect==4.8.0
pluggy==1.0.0
ptyprocess==0.7.0
py==1.11.0
pybiginteger==1.2.6
pybiginteger-stubs==1.2.6
pycares==4.1.2
pycodestyle==2.8.0
pycparser==2.21
pycryptodome==3.11.0
pyflakes==2.4.0
pyparsing==3.0.9
pyrsistent==0.18.1
pytest==7.1.2
python-dateutil==2.8.2
requests==2.27.1
robotframework==4.1.2
s3transfer==0.3.7
six==1.16.0
tomli==2.0.1
typing-extensions==4.2.0
urllib3==1.26.9
websocket-client==1.3.2
yarl==1.7.2

View file

@ -1,7 +0,0 @@
import os
TEST_CYCLES_COUNT = int(os.getenv("TEST_CYCLES_COUNT", "1"))
BIN_VERSIONS_FILE = os.getenv("BIN_VERSIONS_FILE")
DEVENV_PATH = os.getenv("DEVENV_PATH", os.path.join("..", "frostfs-dev-env"))
HOSTING_CONFIG_FILE = os.getenv("HOSTING_CONFIG_FILE", ".devenv.hosting.yaml")

View file

@ -1,4 +0,0 @@
{
"rep-3": "REP 3",
"complex": "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
}

View file

@ -1,89 +0,0 @@
{
"records":
[
{
"operation":"PUT",
"action":"ALLOW",
"filters":[],
"targets":
[
{
"role":"OTHERS",
"keys":[]
}
]
},
{
"operation":"HEAD",
"action":"ALLOW",
"filters":[],
"targets":
[
{
"role":"OTHERS",
"keys":[]
}
]
},
{
"operation":"DELETE",
"action":"ALLOW",
"filters":[],
"targets":
[
{
"role":"OTHERS",
"keys":[]
}
]
},
{
"operation":"SEARCH",
"action":"ALLOW",
"filters":[],
"targets":
[
{
"role":"OTHERS",
"keys":[]
}
]
},
{
"operation":"GET",
"action":"ALLOW",
"filters":[],
"targets":
[
{
"role":"OTHERS",
"keys":[]
}
]
},
{
"operation":"GETRANGE",
"action":"ALLOW",
"filters":[],
"targets":
[
{
"role":"OTHERS",
"keys":[]
}
]
},
{
"operation":"GETRANGEHASH",
"action":"ALLOW",
"filters":[],
"targets":
[
{
"role":"OTHERS",
"keys":[]
}
]
}
]
}

View file

@ -1,105 +0,0 @@
import os
import uuid
from dataclasses import dataclass
from typing import Optional
import allure
import pytest
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG, DEFAULT_WALLET_PASS
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.acl import EACLRole
from frostfs_testlib.storage.dataclasses.frostfs_services import InnerRing, StorageNode
from frostfs_testlib.utils import wallet_utils
from frostfs_testlib.utils.file_utils import generate_file
OBJECT_COUNT = 5
@dataclass
class Wallet:
wallet_path: Optional[str] = None
config_path: Optional[str] = None
@dataclass
class Wallets:
wallets: dict[EACLRole, list[Wallet]]
def get_wallet(self, role: EACLRole = EACLRole.USER) -> Wallet:
return self.wallets[role][0]
def get_wallets_list(self, role: EACLRole = EACLRole.USER) -> list[Wallet]:
return self.wallets[role]
@pytest.fixture(scope="module")
def wallets(default_wallet: str, temp_directory: str, cluster: Cluster) -> Wallets:
other_wallets_paths = [
os.path.join(temp_directory, f"{str(uuid.uuid4())}.json") for _ in range(2)
]
for other_wallet_path in other_wallets_paths:
wallet_utils.init_wallet(other_wallet_path, DEFAULT_WALLET_PASS)
ir_node: InnerRing = cluster.ir_nodes[0]
storage_node: StorageNode = cluster.storage_nodes[0]
ir_wallet_path = ir_node.get_wallet_path()
ir_wallet_config = ir_node.get_wallet_config_path()
storage_wallet_path = storage_node.get_wallet_path()
storage_wallet_config = storage_node.get_wallet_config_path()
yield Wallets(
wallets={
EACLRole.USER: [Wallet(wallet_path=default_wallet, config_path=DEFAULT_WALLET_CONFIG)],
EACLRole.OTHERS: [
Wallet(wallet_path=other_wallet_path, config_path=DEFAULT_WALLET_CONFIG)
for other_wallet_path in other_wallets_paths
],
EACLRole.SYSTEM: [
Wallet(wallet_path=ir_wallet_path, config_path=ir_wallet_config),
Wallet(wallet_path=storage_wallet_path, config_path=storage_wallet_config),
],
}
)
@pytest.fixture(scope="module")
def file_path(simple_object_size: int) -> str:
yield generate_file(simple_object_size)
@pytest.fixture(scope="function")
def eacl_container_with_objects(
wallets: Wallets, client_shell: Shell, cluster: Cluster, file_path: str
) -> tuple[str, list[str], str]:
user_wallet = wallets.get_wallet()
with allure.step("Create eACL public container"):
cid = create_container(
user_wallet.wallet_path,
basic_acl=PUBLIC_ACL,
shell=client_shell,
endpoint=cluster.default_rpc_endpoint,
)
with allure.step("Add test objects to container"):
objects_oids = [
put_object_to_random_node(
user_wallet.wallet_path,
file_path,
cid,
attributes={"key1": "val1", "key": val, "key2": "abc"},
shell=client_shell,
cluster=cluster,
)
for val in range(OBJECT_COUNT)
]
yield cid, objects_oids, file_path
# with allure.step('Delete eACL public container'):
# delete_container(user_wallet, cid)

View file

@ -1,185 +0,0 @@
import allure
import pytest
from frostfs_testlib.resources.wellknown_acl import PRIVATE_ACL_F, PUBLIC_ACL_F, READONLY_ACL_F
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.storage.dataclasses.acl import EACLRole
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from pytest_tests.helpers.container_access import (
check_full_access_to_container,
check_no_access_to_container,
check_read_only_container,
)
from pytest_tests.testsuites.acl.conftest import Wallets
@pytest.mark.sanity
@pytest.mark.smoke
@pytest.mark.acl
@pytest.mark.acl_basic
class TestACLBasic(ClusterTestBase):
@pytest.fixture(scope="function")
def public_container(self, wallets):
user_wallet = wallets.get_wallet()
with allure.step("Create public container"):
cid_public = create_container(
user_wallet.wallet_path,
basic_acl=PUBLIC_ACL_F,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
yield cid_public
# with allure.step('Delete public container'):
# delete_container(user_wallet.wallet_path, cid_public)
@pytest.fixture(scope="function")
def private_container(self, wallets: Wallets):
user_wallet = wallets.get_wallet()
with allure.step("Create private container"):
cid_private = create_container(
user_wallet.wallet_path,
basic_acl=PRIVATE_ACL_F,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
yield cid_private
# with allure.step('Delete private container'):
# delete_container(user_wallet.wallet_path, cid_private)
@pytest.fixture(scope="function")
def read_only_container(self, wallets: Wallets):
user_wallet = wallets.get_wallet()
with allure.step("Create public readonly container"):
cid_read_only = create_container(
user_wallet.wallet_path,
basic_acl=READONLY_ACL_F,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
yield cid_read_only
# with allure.step('Delete public readonly container'):
# delete_container(user_wallet.wallet_path, cid_read_only)
@allure.title("Test basic ACL on public container")
def test_basic_acl_public(self, wallets: Wallets, public_container: str, file_path: str):
"""
Test basic ACL set during public container creation.
"""
user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(role=EACLRole.OTHERS)
cid = public_container
for wallet, desc in ((user_wallet, "owner"), (other_wallet, "other users")):
with allure.step("Add test objects to container"):
# We create new objects for each wallet because check_full_access_to_container
# deletes the object
owner_object_oid = put_object_to_random_node(
user_wallet.wallet_path,
file_path,
cid,
shell=self.shell,
cluster=self.cluster,
attributes={"created": "owner"},
)
other_object_oid = put_object_to_random_node(
other_wallet.wallet_path,
file_path,
cid,
shell=self.shell,
cluster=self.cluster,
attributes={"created": "other"},
)
with allure.step(f"Check {desc} has full access to public container"):
check_full_access_to_container(
wallet.wallet_path,
cid,
owner_object_oid,
file_path,
shell=self.shell,
cluster=self.cluster,
)
check_full_access_to_container(
wallet.wallet_path,
cid,
other_object_oid,
file_path,
shell=self.shell,
cluster=self.cluster,
)
@allure.title("Test basic ACL on private container")
def test_basic_acl_private(self, wallets: Wallets, private_container: str, file_path: str):
"""
Test basic ACL set during private container creation.
"""
user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(role=EACLRole.OTHERS)
cid = private_container
with allure.step("Add test objects to container"):
owner_object_oid = put_object_to_random_node(
user_wallet.wallet_path, file_path, cid, shell=self.shell, cluster=self.cluster
)
with allure.step("Check only owner has full access to private container"):
with allure.step("Check no one except owner has access to operations with container"):
check_no_access_to_container(
other_wallet.wallet_path,
cid,
owner_object_oid,
file_path,
shell=self.shell,
cluster=self.cluster,
)
with allure.step("Check owner has full access to private container"):
check_full_access_to_container(
user_wallet.wallet_path,
cid,
owner_object_oid,
file_path,
shell=self.shell,
cluster=self.cluster,
)
@allure.title("Test basic ACL on readonly container")
def test_basic_acl_readonly(
self, wallets: Wallets, client_shell: Shell, read_only_container: str, file_path: str
):
"""
Test basic ACL Operations for Read-Only Container.
"""
user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(role=EACLRole.OTHERS)
cid = read_only_container
with allure.step("Add test objects to container"):
object_oid = put_object_to_random_node(
user_wallet.wallet_path, file_path, cid, shell=client_shell, cluster=self.cluster
)
with allure.step("Check other has read-only access to operations with container"):
check_read_only_container(
other_wallet.wallet_path,
cid,
object_oid,
file_path,
shell=client_shell,
cluster=self.cluster,
)
with allure.step("Check owner has full access to public container"):
check_full_access_to_container(
user_wallet.wallet_path,
cid,
object_oid,
file_path,
shell=client_shell,
cluster=self.cluster,
)

View file

@ -1,238 +0,0 @@
import allure
import pytest
from frostfs_testlib.steps.acl import (
create_eacl,
form_bearertoken_file,
set_eacl,
wait_for_cache_expired,
)
from frostfs_testlib.storage.dataclasses.acl import EACLAccess, EACLOperation, EACLRole, EACLRule
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from pytest_tests.helpers.container_access import (
check_custom_access_to_container,
check_full_access_to_container,
check_no_access_to_container,
)
from pytest_tests.testsuites.acl.conftest import Wallets
@pytest.mark.sanity
@pytest.mark.acl
@pytest.mark.acl_bearer
class TestACLBearer(ClusterTestBase):
@pytest.mark.parametrize("role", [EACLRole.USER, EACLRole.OTHERS])
def test_bearer_token_operations(
self,
wallets: Wallets,
eacl_container_with_objects: tuple[str, list[str], str],
role: EACLRole,
):
allure.dynamic.title(
f"Testcase to validate FrostFS operations with {role.value} BearerToken"
)
cid, objects_oids, file_path = eacl_container_with_objects
user_wallet = wallets.get_wallet()
deny_wallet = wallets.get_wallet(role)
endpoint = self.cluster.default_rpc_endpoint
with allure.step(f"Check {role.value} has full access to container without bearer token"):
check_full_access_to_container(
deny_wallet.wallet_path,
cid,
objects_oids.pop(),
file_path,
wallet_config=deny_wallet.config_path,
shell=self.shell,
cluster=self.cluster,
)
with allure.step(f"Set deny all operations for {role.value} via eACL"):
eacl = [
EACLRule(access=EACLAccess.DENY, role=role, operation=op) for op in EACLOperation
]
eacl_file = create_eacl(cid, eacl, shell=self.shell)
set_eacl(user_wallet.wallet_path, cid, eacl_file, shell=self.shell, endpoint=endpoint)
wait_for_cache_expired()
with allure.step(f"Create bearer token for {role.value} with all operations allowed"):
bearer = form_bearertoken_file(
user_wallet.wallet_path,
cid,
[
EACLRule(operation=op, access=EACLAccess.ALLOW, role=role)
for op in EACLOperation
],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
with allure.step(
f"Check {role.value} without token has no access to all operations with container"
):
check_no_access_to_container(
deny_wallet.wallet_path,
cid,
objects_oids.pop(),
file_path,
wallet_config=deny_wallet.config_path,
shell=self.shell,
cluster=self.cluster,
)
with allure.step(
f"Check {role.value} with token has access to all operations with container"
):
check_full_access_to_container(
deny_wallet.wallet_path,
cid,
objects_oids.pop(),
file_path,
bearer=bearer,
wallet_config=deny_wallet.config_path,
shell=self.shell,
cluster=self.cluster,
)
with allure.step(f"Set allow all operations for {role.value} via eACL"):
eacl = [
EACLRule(access=EACLAccess.ALLOW, role=role, operation=op) for op in EACLOperation
]
eacl_file = create_eacl(cid, eacl, shell=self.shell)
set_eacl(user_wallet.wallet_path, cid, eacl_file, shell=self.shell, endpoint=endpoint)
wait_for_cache_expired()
with allure.step(
f"Check {role.value} without token has access to all operations with container"
):
check_full_access_to_container(
deny_wallet.wallet_path,
cid,
objects_oids.pop(),
file_path,
wallet_config=deny_wallet.config_path,
shell=self.shell,
cluster=self.cluster,
)
@allure.title("BearerToken Operations for compound Operations")
def test_bearer_token_compound_operations(self, wallets, eacl_container_with_objects):
endpoint = self.cluster.default_rpc_endpoint
cid, objects_oids, file_path = eacl_container_with_objects
user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(role=EACLRole.OTHERS)
# Operations that we will deny for each role via eACL
deny_map = {
EACLRole.USER: [EACLOperation.DELETE],
EACLRole.OTHERS: [
EACLOperation.SEARCH,
EACLOperation.GET_RANGE_HASH,
EACLOperation.GET_RANGE,
],
}
# Operations that we will allow for each role with bearer token
bearer_map = {
EACLRole.USER: [
EACLOperation.DELETE,
EACLOperation.PUT,
EACLOperation.GET_RANGE,
],
EACLRole.OTHERS: [
EACLOperation.GET,
EACLOperation.GET_RANGE,
EACLOperation.GET_RANGE_HASH,
],
}
deny_map_with_bearer = {
EACLRole.USER: [
op for op in deny_map[EACLRole.USER] if op not in bearer_map[EACLRole.USER]
],
EACLRole.OTHERS: [
op for op in deny_map[EACLRole.OTHERS] if op not in bearer_map[EACLRole.OTHERS]
],
}
eacl_deny = []
for role, operations in deny_map.items():
eacl_deny += [
EACLRule(access=EACLAccess.DENY, role=role, operation=op) for op in operations
]
set_eacl(
user_wallet.wallet_path,
cid,
eacl_table_path=create_eacl(cid, eacl_deny, shell=self.shell),
shell=self.shell,
endpoint=endpoint,
)
wait_for_cache_expired()
with allure.step("Check rule consistency without bearer"):
check_custom_access_to_container(
user_wallet.wallet_path,
cid,
objects_oids.pop(),
file_path,
deny_operations=deny_map[EACLRole.USER],
wallet_config=user_wallet.config_path,
shell=self.shell,
cluster=self.cluster,
)
check_custom_access_to_container(
other_wallet.wallet_path,
cid,
objects_oids.pop(),
file_path,
deny_operations=deny_map[EACLRole.OTHERS],
wallet_config=other_wallet.config_path,
shell=self.shell,
cluster=self.cluster,
)
with allure.step("Check rule consistency using bearer token"):
bearer_user = form_bearertoken_file(
user_wallet.wallet_path,
cid,
[
EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.USER)
for op in bearer_map[EACLRole.USER]
],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
bearer_other = form_bearertoken_file(
user_wallet.wallet_path,
cid,
[
EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.OTHERS)
for op in bearer_map[EACLRole.OTHERS]
],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
check_custom_access_to_container(
user_wallet.wallet_path,
cid,
objects_oids.pop(),
file_path,
deny_operations=deny_map_with_bearer[EACLRole.USER],
bearer=bearer_user,
wallet_config=user_wallet.config_path,
shell=self.shell,
cluster=self.cluster,
)
check_custom_access_to_container(
other_wallet.wallet_path,
cid,
objects_oids.pop(),
file_path,
deny_operations=deny_map_with_bearer[EACLRole.OTHERS],
bearer=bearer_other,
wallet_config=other_wallet.config_path,
shell=self.shell,
cluster=self.cluster,
)

View file

@ -1,681 +0,0 @@
import allure
import pytest
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.acl import create_eacl, set_eacl, wait_for_cache_expired
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.steps.node_management import drop_object
from frostfs_testlib.storage.dataclasses.acl import EACLAccess, EACLOperation, EACLRole, EACLRule
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.failover_utils import wait_object_replication
from pytest_tests.helpers.container_access import (
check_full_access_to_container,
check_no_access_to_container,
)
from pytest_tests.helpers.object_access import (
can_delete_object,
can_get_head_object,
can_get_object,
can_get_range_hash_of_object,
can_get_range_of_object,
can_put_object,
can_search_object,
)
from pytest_tests.testsuites.acl.conftest import Wallets
@pytest.mark.sanity
@pytest.mark.acl
@pytest.mark.acl_extended
class TestEACLContainer(ClusterTestBase):
@pytest.fixture(scope="function")
def eacl_full_placement_container_with_object(self, wallets: Wallets, file_path: str) -> str:
user_wallet = wallets.get_wallet()
storage_nodes = self.cluster.storage_nodes
node_count = len(storage_nodes)
with allure.step("Create eACL public container with full placement rule"):
full_placement_rule = f"REP {node_count} IN X CBF 1 SELECT {node_count} FROM * AS X"
cid = create_container(
wallet=user_wallet.wallet_path,
rule=full_placement_rule,
basic_acl=PUBLIC_ACL,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
with allure.step("Add test object to container"):
oid = put_object_to_random_node(
user_wallet.wallet_path, file_path, cid, shell=self.shell, cluster=self.cluster
)
wait_object_replication(
cid,
oid,
node_count,
shell=self.shell,
nodes=storage_nodes,
)
yield cid, oid, file_path
@pytest.mark.parametrize("deny_role", [EACLRole.USER, EACLRole.OTHERS])
def test_extended_acl_deny_all_operations(
self,
wallets: Wallets,
eacl_container_with_objects: tuple[str, list[str], str],
deny_role: EACLRole,
):
user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(EACLRole.OTHERS)
deny_role_wallet = other_wallet if deny_role == EACLRole.OTHERS else user_wallet
not_deny_role_wallet = user_wallet if deny_role == EACLRole.OTHERS else other_wallet
deny_role_str = "all others" if deny_role == EACLRole.OTHERS else "user"
not_deny_role_str = "user" if deny_role == EACLRole.OTHERS else "all others"
allure.dynamic.title(f"Testcase to deny FrostFS operations for {deny_role_str}.")
cid, object_oids, file_path = eacl_container_with_objects
with allure.step(f"Deny all operations for {deny_role_str} via eACL"):
eacl_deny = [
EACLRule(access=EACLAccess.DENY, role=deny_role, operation=op)
for op in EACLOperation
]
set_eacl(
user_wallet.wallet_path,
cid,
create_eacl(cid, eacl_deny, shell=self.shell),
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
wait_for_cache_expired()
with allure.step(f"Check only {not_deny_role_str} has full access to container"):
with allure.step(
f"Check {deny_role_str} has not access to any operations with container"
):
check_no_access_to_container(
deny_role_wallet.wallet_path,
cid,
object_oids[0],
file_path,
shell=self.shell,
cluster=self.cluster,
)
with allure.step(
f"Check {not_deny_role_wallet} has full access to eACL public container"
):
check_full_access_to_container(
not_deny_role_wallet.wallet_path,
cid,
object_oids.pop(),
file_path,
shell=self.shell,
cluster=self.cluster,
)
with allure.step(f"Allow all operations for {deny_role_str} via eACL"):
eacl_deny = [
EACLRule(access=EACLAccess.ALLOW, role=deny_role, operation=op)
for op in EACLOperation
]
set_eacl(
user_wallet.wallet_path,
cid,
create_eacl(cid, eacl_deny, shell=self.shell),
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
wait_for_cache_expired()
with allure.step("Check all have full access to eACL public container"):
check_full_access_to_container(
user_wallet.wallet_path,
cid,
object_oids.pop(),
file_path,
shell=self.shell,
cluster=self.cluster,
)
check_full_access_to_container(
other_wallet.wallet_path,
cid,
object_oids.pop(),
file_path,
shell=self.shell,
cluster=self.cluster,
)
@allure.title("Testcase to allow FrostFS operations for only one other pubkey.")
def test_extended_acl_deny_all_operations_exclude_pubkey(
self, wallets: Wallets, eacl_container_with_objects: tuple[str, list[str], str]
):
user_wallet = wallets.get_wallet()
other_wallet, other_wallet_allow = wallets.get_wallets_list(EACLRole.OTHERS)[0:2]
cid, object_oids, file_path = eacl_container_with_objects
with allure.step("Deny all operations for others except single wallet via eACL"):
eacl = [
EACLRule(
access=EACLAccess.ALLOW,
role=other_wallet_allow.wallet_path,
operation=op,
)
for op in EACLOperation
]
eacl += [
EACLRule(access=EACLAccess.DENY, role=EACLRole.OTHERS, operation=op)
for op in EACLOperation
]
set_eacl(
user_wallet.wallet_path,
cid,
create_eacl(cid, eacl, shell=self.shell),
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
wait_for_cache_expired()
with allure.step("Check only owner and allowed other have full access to public container"):
with allure.step("Check other has not access to operations with container"):
check_no_access_to_container(
other_wallet.wallet_path,
cid,
object_oids[0],
file_path,
shell=self.shell,
cluster=self.cluster,
)
with allure.step("Check owner has full access to public container"):
check_full_access_to_container(
user_wallet.wallet_path,
cid,
object_oids.pop(),
file_path,
shell=self.shell,
cluster=self.cluster,
)
with allure.step("Check allowed other has full access to public container"):
check_full_access_to_container(
other_wallet_allow.wallet_path,
cid,
object_oids.pop(),
file_path,
shell=self.shell,
cluster=self.cluster,
)
@allure.title("Testcase to validate FrostFS replication with eACL deny rules.")
def test_extended_acl_deny_replication(
self,
wallets: Wallets,
eacl_full_placement_container_with_object: tuple[str, list[str], str],
):
user_wallet = wallets.get_wallet()
cid, oid, file_path = eacl_full_placement_container_with_object
storage_nodes = self.cluster.storage_nodes
storage_node = self.cluster.storage_nodes[0]
with allure.step("Deny all operations for user via eACL"):
eacl_deny = [
EACLRule(access=EACLAccess.DENY, role=EACLRole.USER, operation=op)
for op in EACLOperation
]
eacl_deny += [
EACLRule(access=EACLAccess.DENY, role=EACLRole.OTHERS, operation=op)
for op in EACLOperation
]
set_eacl(
user_wallet.wallet_path,
cid,
create_eacl(cid, eacl_deny, shell=self.shell),
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
wait_for_cache_expired()
with allure.step("Drop object to check replication"):
drop_object(storage_node, cid=cid, oid=oid)
storage_wallet_path = storage_node.get_wallet_path()
with allure.step("Wait for dropped object replicated"):
wait_object_replication(
cid,
oid,
len(storage_nodes),
self.shell,
storage_nodes,
)
@allure.title("Testcase to validate FrostFS system operations with extended ACL")
def test_extended_actions_system(
self, wallets: Wallets, eacl_container_with_objects: tuple[str, list[str], str]
):
user_wallet = wallets.get_wallet()
ir_wallet, storage_wallet = wallets.get_wallets_list(role=EACLRole.SYSTEM)[:2]
cid, object_oids, file_path = eacl_container_with_objects
endpoint = self.cluster.default_rpc_endpoint
with allure.step("Check IR and STORAGE rules compliance"):
assert not can_put_object(
ir_wallet.wallet_path,
cid,
file_path,
shell=self.shell,
cluster=self.cluster,
wallet_config=ir_wallet.config_path,
)
assert can_put_object(
storage_wallet.wallet_path,
cid,
file_path,
shell=self.shell,
cluster=self.cluster,
wallet_config=storage_wallet.config_path,
)
assert can_get_object(
ir_wallet.wallet_path,
cid,
object_oids[0],
file_path,
shell=self.shell,
cluster=self.cluster,
wallet_config=ir_wallet.config_path,
)
assert can_get_object(
storage_wallet.wallet_path,
cid,
object_oids[0],
file_path,
shell=self.shell,
cluster=self.cluster,
wallet_config=storage_wallet.config_path,
)
assert can_get_head_object(
ir_wallet.wallet_path,
cid,
object_oids[0],
shell=self.shell,
endpoint=endpoint,
wallet_config=ir_wallet.config_path,
)
assert can_get_head_object(
storage_wallet.wallet_path,
cid,
object_oids[0],
shell=self.shell,
endpoint=endpoint,
wallet_config=storage_wallet.config_path,
)
assert can_search_object(
ir_wallet.wallet_path,
cid,
shell=self.shell,
endpoint=endpoint,
oid=object_oids[0],
wallet_config=ir_wallet.config_path,
)
assert can_search_object(
storage_wallet.wallet_path,
cid,
shell=self.shell,
endpoint=endpoint,
oid=object_oids[0],
wallet_config=storage_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_get_range_of_object(
wallet=ir_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
wallet_config=ir_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_get_range_of_object(
wallet=storage_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
wallet_config=storage_wallet.config_path,
)
assert can_get_range_hash_of_object(
wallet=ir_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
wallet_config=ir_wallet.config_path,
)
assert can_get_range_hash_of_object(
wallet=storage_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
wallet_config=storage_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_delete_object(
wallet=ir_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
wallet_config=ir_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_delete_object(
wallet=storage_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
wallet_config=storage_wallet.config_path,
)
with allure.step("Deny all operations for SYSTEM via eACL"):
set_eacl(
user_wallet.wallet_path,
cid,
create_eacl(
cid=cid,
rules_list=[
EACLRule(access=EACLAccess.DENY, role=EACLRole.SYSTEM, operation=op)
for op in EACLOperation
],
shell=self.shell,
),
shell=self.shell,
endpoint=endpoint,
)
wait_for_cache_expired()
with allure.step("Check IR and STORAGE rules compliance with deny eACL"):
assert not can_put_object(
wallet=ir_wallet.wallet_path,
cid=cid,
file_name=file_path,
shell=self.shell,
cluster=self.cluster,
wallet_config=ir_wallet.config_path,
)
assert not can_put_object(
wallet=storage_wallet.wallet_path,
cid=cid,
file_name=file_path,
shell=self.shell,
cluster=self.cluster,
wallet_config=storage_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_get_object(
wallet=ir_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
file_name=file_path,
shell=self.shell,
cluster=self.cluster,
wallet_config=ir_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_get_object(
wallet=storage_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
file_name=file_path,
shell=self.shell,
cluster=self.cluster,
wallet_config=storage_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_get_head_object(
wallet=ir_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
wallet_config=ir_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_get_head_object(
wallet=storage_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
wallet_config=storage_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_search_object(
wallet=ir_wallet.wallet_path,
cid=cid,
shell=self.shell,
endpoint=endpoint,
oid=object_oids[0],
wallet_config=ir_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_search_object(
wallet=storage_wallet.wallet_path,
cid=cid,
shell=self.shell,
endpoint=endpoint,
oid=object_oids[0],
wallet_config=storage_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_get_range_of_object(
wallet=ir_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
wallet_config=ir_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_get_range_of_object(
wallet=storage_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
wallet_config=storage_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_get_range_hash_of_object(
wallet=ir_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
wallet_config=ir_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_get_range_hash_of_object(
wallet=storage_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
wallet_config=storage_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_delete_object(
wallet=ir_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
wallet_config=ir_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_delete_object(
wallet=storage_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
wallet_config=storage_wallet.config_path,
)
with allure.step("Allow all operations for SYSTEM via eACL"):
set_eacl(
user_wallet.wallet_path,
cid,
create_eacl(
cid=cid,
rules_list=[
EACLRule(access=EACLAccess.ALLOW, role=EACLRole.SYSTEM, operation=op)
for op in EACLOperation
],
shell=self.shell,
),
shell=self.shell,
endpoint=endpoint,
)
wait_for_cache_expired()
with allure.step("Check IR and STORAGE rules compliance with allow eACL"):
assert not can_put_object(
wallet=ir_wallet.wallet_path,
cid=cid,
file_name=file_path,
shell=self.shell,
cluster=self.cluster,
wallet_config=ir_wallet.config_path,
)
assert can_put_object(
wallet=storage_wallet.wallet_path,
cid=cid,
file_name=file_path,
shell=self.shell,
cluster=self.cluster,
wallet_config=storage_wallet.config_path,
)
assert can_get_object(
wallet=ir_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
file_name=file_path,
shell=self.shell,
cluster=self.cluster,
wallet_config=ir_wallet.config_path,
)
assert can_get_object(
wallet=storage_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
file_name=file_path,
shell=self.shell,
cluster=self.cluster,
wallet_config=storage_wallet.config_path,
)
assert can_get_head_object(
wallet=ir_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
wallet_config=ir_wallet.config_path,
)
assert can_get_head_object(
wallet=storage_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
wallet_config=storage_wallet.config_path,
)
assert can_search_object(
wallet=ir_wallet.wallet_path,
cid=cid,
shell=self.shell,
oid=object_oids[0],
endpoint=endpoint,
wallet_config=ir_wallet.config_path,
)
assert can_search_object(
wallet=storage_wallet.wallet_path,
cid=cid,
shell=self.shell,
oid=object_oids[0],
endpoint=endpoint,
wallet_config=storage_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_get_range_of_object(
wallet=ir_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
wallet_config=ir_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_get_range_of_object(
wallet=storage_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
wallet_config=storage_wallet.config_path,
)
assert can_get_range_hash_of_object(
wallet=ir_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
wallet_config=ir_wallet.config_path,
)
assert can_get_range_hash_of_object(
wallet=storage_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
wallet_config=storage_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_delete_object(
wallet=ir_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
wallet_config=ir_wallet.config_path,
)
with pytest.raises(AssertionError):
assert can_delete_object(
wallet=storage_wallet.wallet_path,
cid=cid,
oid=object_oids[0],
shell=self.shell,
endpoint=endpoint,
wallet_config=storage_wallet.config_path,
)

View file

@ -1,644 +0,0 @@
import allure
import pytest
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.acl import (
create_eacl,
form_bearertoken_file,
set_eacl,
wait_for_cache_expired,
)
from frostfs_testlib.steps.cli.container import create_container, delete_container
from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.storage.dataclasses.acl import (
EACLAccess,
EACLFilter,
EACLFilters,
EACLHeaderType,
EACLMatchType,
EACLOperation,
EACLRole,
EACLRule,
)
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from pytest_tests.helpers.container_access import (
check_full_access_to_container,
check_no_access_to_container,
)
from pytest_tests.helpers.object_access import can_get_head_object, can_get_object, can_put_object
from pytest_tests.testsuites.acl.conftest import Wallets
@pytest.mark.sanity
@pytest.mark.acl
@pytest.mark.acl_filters
class TestEACLFilters(ClusterTestBase):
# SPEC: https://github.com/nspcc-dev/neofs-spec/blob/master/01-arch/07-acl.md
ATTRIBUTE = {"check_key": "check_value"}
OTHER_ATTRIBUTE = {"check_key": "other_value"}
SET_HEADERS = {
"key_one": "check_value",
"x_key": "xvalue",
"check_key": "check_value",
}
OTHER_HEADERS = {
"key_one": "check_value",
"x_key": "other_value",
"check_key": "other_value",
}
REQ_EQUAL_FILTER = EACLFilter(
key="check_key", value="check_value", header_type=EACLHeaderType.REQUEST
)
NOT_REQ_EQUAL_FILTER = EACLFilter(
key="check_key",
value="other_value",
match_type=EACLMatchType.STRING_NOT_EQUAL,
header_type=EACLHeaderType.REQUEST,
)
OBJ_EQUAL_FILTER = EACLFilter(
key="check_key", value="check_value", header_type=EACLHeaderType.OBJECT
)
NOT_OBJ_EQUAL_FILTER = EACLFilter(
key="check_key",
value="other_value",
match_type=EACLMatchType.STRING_NOT_EQUAL,
header_type=EACLHeaderType.OBJECT,
)
OBJECT_COUNT = 5
OBJECT_ATTRIBUTES_FILTER_SUPPORTED_OPERATIONS = [
EACLOperation.GET,
EACLOperation.HEAD,
EACLOperation.PUT,
]
@pytest.fixture(scope="function")
def eacl_container_with_objects(self, wallets: Wallets, file_path: str):
user_wallet = wallets.get_wallet()
with allure.step("Create eACL public container"):
cid = create_container(
user_wallet.wallet_path,
basic_acl=PUBLIC_ACL,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
with allure.step("Add test objects to container"):
objects_with_header = [
put_object_to_random_node(
user_wallet.wallet_path,
file_path,
cid,
shell=self.shell,
cluster=self.cluster,
attributes={**self.SET_HEADERS, "key": val},
)
for val in range(self.OBJECT_COUNT)
]
objects_with_other_header = [
put_object_to_random_node(
user_wallet.wallet_path,
file_path,
cid,
shell=self.shell,
cluster=self.cluster,
attributes={**self.OTHER_HEADERS, "key": val},
)
for val in range(self.OBJECT_COUNT)
]
objects_without_header = [
put_object_to_random_node(
user_wallet.wallet_path,
file_path,
cid,
shell=self.shell,
cluster=self.cluster,
)
for _ in range(self.OBJECT_COUNT)
]
yield cid, objects_with_header, objects_with_other_header, objects_without_header, file_path
with allure.step("Delete eACL public container"):
delete_container(
user_wallet.wallet_path,
cid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
@pytest.mark.parametrize(
"match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL]
)
def test_extended_acl_filters_request(
self,
wallets: Wallets,
eacl_container_with_objects: tuple[str, list[str], str],
match_type: EACLMatchType,
):
allure.dynamic.title(f"Validate FrostFS operations with request filter: {match_type.name}")
user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(EACLRole.OTHERS)
(
cid,
objects_with_header,
objects_with_other_header,
objects_without_header,
file_path,
) = eacl_container_with_objects
with allure.step("Deny all operations for other with eACL request filter"):
equal_filter = EACLFilter(**self.REQ_EQUAL_FILTER.__dict__)
equal_filter.match_type = match_type
eacl_deny = [
EACLRule(
access=EACLAccess.DENY,
role=EACLRole.OTHERS,
filters=EACLFilters([equal_filter]),
operation=op,
)
for op in EACLOperation
]
set_eacl(
user_wallet.wallet_path,
cid,
create_eacl(cid, eacl_deny, shell=self.shell),
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
wait_for_cache_expired()
# Filter denies requests where "check_key {match_type} ATTRIBUTE", so when match_type
# is STRING_EQUAL, then requests with "check_key=OTHER_ATTRIBUTE" will be allowed while
# requests with "check_key=ATTRIBUTE" will be denied, and vice versa
allow_headers = (
self.OTHER_ATTRIBUTE if match_type == EACLMatchType.STRING_EQUAL else self.ATTRIBUTE
)
deny_headers = (
self.ATTRIBUTE if match_type == EACLMatchType.STRING_EQUAL else self.OTHER_ATTRIBUTE
)
# We test on 3 groups of objects with various headers,
# but eACL rule should ignore object headers and
# work only based on request headers
for oid in (
objects_with_header,
objects_with_other_header,
objects_without_header,
):
with allure.step("Check other has full access when sending request without headers"):
check_full_access_to_container(
other_wallet.wallet_path,
cid,
oid.pop(),
file_path,
shell=self.shell,
cluster=self.cluster,
)
with allure.step(
"Check other has full access when sending request with allowed headers"
):
check_full_access_to_container(
other_wallet.wallet_path,
cid,
oid.pop(),
file_path,
shell=self.shell,
cluster=self.cluster,
xhdr=allow_headers,
)
with allure.step("Check other has no access when sending request with denied headers"):
check_no_access_to_container(
other_wallet.wallet_path,
cid,
oid.pop(),
file_path,
shell=self.shell,
cluster=self.cluster,
xhdr=deny_headers,
)
with allure.step(
"Check other has full access when sending request "
"with denied headers and using bearer token"
):
bearer_other = form_bearertoken_file(
user_wallet.wallet_path,
cid,
[
EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.OTHERS)
for op in EACLOperation
],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
check_full_access_to_container(
other_wallet.wallet_path,
cid,
oid.pop(),
file_path,
shell=self.shell,
cluster=self.cluster,
xhdr=deny_headers,
bearer=bearer_other,
)
@pytest.mark.parametrize(
"match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL]
)
def test_extended_acl_deny_filters_object(
self,
wallets: Wallets,
eacl_container_with_objects: tuple[str, list[str], str],
match_type: EACLMatchType,
):
allure.dynamic.title(
f"Validate FrostFS operations with deny user headers filter: {match_type.name}"
)
user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(EACLRole.OTHERS)
(
cid,
objects_with_header,
objects_with_other_header,
objs_without_header,
file_path,
) = eacl_container_with_objects
with allure.step("Deny all operations for other with object filter"):
equal_filter = EACLFilter(**self.OBJ_EQUAL_FILTER.__dict__)
equal_filter.match_type = match_type
eacl_deny = [
EACLRule(
access=EACLAccess.DENY,
role=EACLRole.OTHERS,
filters=EACLFilters([equal_filter]),
operation=op,
)
for op in self.OBJECT_ATTRIBUTES_FILTER_SUPPORTED_OPERATIONS
]
set_eacl(
user_wallet.wallet_path,
cid,
create_eacl(cid, eacl_deny, shell=self.shell),
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
wait_for_cache_expired()
allow_objects = (
objects_with_other_header
if match_type == EACLMatchType.STRING_EQUAL
else objects_with_header
)
deny_objects = (
objects_with_header
if match_type == EACLMatchType.STRING_EQUAL
else objects_with_other_header
)
# We will attempt requests with various headers,
# but eACL rule should ignore request headers and validate
# only object headers
for xhdr in (self.ATTRIBUTE, self.OTHER_ATTRIBUTE, None):
with allure.step("Check other have full access to objects without attributes"):
check_full_access_to_container(
other_wallet.wallet_path,
cid,
objs_without_header.pop(),
file_path,
shell=self.shell,
cluster=self.cluster,
xhdr=xhdr,
)
with allure.step("Check other have full access to objects without deny attribute"):
check_full_access_to_container(
other_wallet.wallet_path,
cid,
allow_objects.pop(),
file_path,
shell=self.shell,
cluster=self.cluster,
xhdr=xhdr,
)
with allure.step("Check other have no access to objects with deny attribute"):
with pytest.raises(AssertionError):
assert can_get_head_object(
other_wallet.wallet_path,
cid,
deny_objects[0],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
xhdr=xhdr,
)
with pytest.raises(AssertionError):
assert can_get_object(
other_wallet.wallet_path,
cid,
deny_objects[0],
file_path,
shell=self.shell,
cluster=self.cluster,
xhdr=xhdr,
)
with allure.step(
"Check other have access to objects with deny attribute and using bearer token"
):
bearer_other = form_bearertoken_file(
user_wallet.wallet_path,
cid,
[
EACLRule(
operation=op,
access=EACLAccess.ALLOW,
role=EACLRole.OTHERS,
)
for op in EACLOperation
],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
check_full_access_to_container(
other_wallet.wallet_path,
cid,
deny_objects.pop(),
file_path,
shell=self.shell,
cluster=self.cluster,
xhdr=xhdr,
bearer=bearer_other,
)
allow_attribute = (
self.OTHER_ATTRIBUTE if match_type == EACLMatchType.STRING_EQUAL else self.ATTRIBUTE
)
with allure.step("Check other can PUT objects without denied attribute"):
assert can_put_object(
other_wallet.wallet_path,
cid,
file_path,
shell=self.shell,
cluster=self.cluster,
attributes=allow_attribute,
)
assert can_put_object(
other_wallet.wallet_path, cid, file_path, shell=self.shell, cluster=self.cluster
)
deny_attribute = (
self.ATTRIBUTE if match_type == EACLMatchType.STRING_EQUAL else self.OTHER_ATTRIBUTE
)
with allure.step("Check other can not PUT objects with denied attribute"):
with pytest.raises(AssertionError):
assert can_put_object(
other_wallet.wallet_path,
cid,
file_path,
shell=self.shell,
cluster=self.cluster,
attributes=deny_attribute,
)
with allure.step(
"Check other can PUT objects with denied attribute and using bearer token"
):
bearer_other_for_put = form_bearertoken_file(
user_wallet.wallet_path,
cid,
[
EACLRule(
operation=EACLOperation.PUT,
access=EACLAccess.ALLOW,
role=EACLRole.OTHERS,
)
],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
assert can_put_object(
other_wallet.wallet_path,
cid,
file_path,
shell=self.shell,
cluster=self.cluster,
attributes=deny_attribute,
bearer=bearer_other_for_put,
)
@pytest.mark.parametrize(
"match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL]
)
def test_extended_acl_allow_filters_object(
self,
wallets: Wallets,
eacl_container_with_objects: tuple[str, list[str], str],
match_type: EACLMatchType,
):
allure.dynamic.title(
"Testcase to validate FrostFS operation with allow eACL user headers filters:"
f"{match_type.name}"
)
user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(EACLRole.OTHERS)
(
cid,
objects_with_header,
objects_with_other_header,
objects_without_header,
file_path,
) = eacl_container_with_objects
with allure.step(
"Deny all operations for others except few operations allowed by object filter"
):
equal_filter = EACLFilter(**self.OBJ_EQUAL_FILTER.__dict__)
equal_filter.match_type = match_type
eacl = [
EACLRule(
access=EACLAccess.ALLOW,
role=EACLRole.OTHERS,
filters=EACLFilters([equal_filter]),
operation=op,
)
for op in self.OBJECT_ATTRIBUTES_FILTER_SUPPORTED_OPERATIONS
] + [
EACLRule(access=EACLAccess.DENY, role=EACLRole.OTHERS, operation=op)
for op in self.OBJECT_ATTRIBUTES_FILTER_SUPPORTED_OPERATIONS
]
set_eacl(
user_wallet.wallet_path,
cid,
create_eacl(cid, eacl, shell=self.shell),
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
wait_for_cache_expired()
if match_type == EACLMatchType.STRING_EQUAL:
allow_objects = objects_with_header
deny_objects = objects_with_other_header
allow_attribute = self.ATTRIBUTE
deny_attribute = self.OTHER_ATTRIBUTE
else:
allow_objects = objects_with_other_header
deny_objects = objects_with_header
allow_attribute = self.OTHER_ATTRIBUTE
deny_attribute = self.ATTRIBUTE
with allure.step("Check other cannot get and put objects without attributes"):
oid = objects_without_header.pop()
with pytest.raises(AssertionError):
assert can_get_head_object(
other_wallet.wallet_path,
cid,
oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
with pytest.raises(AssertionError):
assert can_get_object(
other_wallet.wallet_path,
cid,
oid,
file_path,
shell=self.shell,
cluster=self.cluster,
)
with pytest.raises(AssertionError):
assert can_put_object(
other_wallet.wallet_path, cid, file_path, shell=self.shell, cluster=self.cluster
)
with allure.step(
"Check other can get and put objects without attributes and using bearer token"
):
bearer_other = form_bearertoken_file(
user_wallet.wallet_path,
cid,
[
EACLRule(
operation=op,
access=EACLAccess.ALLOW,
role=EACLRole.OTHERS,
)
for op in EACLOperation
],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
assert can_get_head_object(
other_wallet.wallet_path,
cid,
objects_without_header[0],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
bearer=bearer_other,
)
assert can_get_object(
other_wallet.wallet_path,
cid,
objects_without_header[0],
file_path,
shell=self.shell,
cluster=self.cluster,
bearer=bearer_other,
)
assert can_put_object(
other_wallet.wallet_path,
cid,
file_path,
shell=self.shell,
cluster=self.cluster,
bearer=bearer_other,
)
with allure.step("Check other can get objects with attributes matching the filter"):
oid = allow_objects.pop()
assert can_get_head_object(
other_wallet.wallet_path,
cid,
oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
assert can_get_object(
other_wallet.wallet_path,
cid,
oid,
file_path,
shell=self.shell,
cluster=self.cluster,
)
assert can_put_object(
other_wallet.wallet_path,
cid,
file_path,
shell=self.shell,
cluster=self.cluster,
attributes=allow_attribute,
)
with allure.step("Check other cannot get objects without attributes matching the filter"):
with pytest.raises(AssertionError):
assert can_get_head_object(
other_wallet.wallet_path,
cid,
deny_objects[0],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
with pytest.raises(AssertionError):
assert can_get_object(
other_wallet.wallet_path,
cid,
deny_objects[0],
file_path,
shell=self.shell,
cluster=self.cluster,
)
with pytest.raises(AssertionError):
assert can_put_object(
other_wallet.wallet_path,
cid,
file_path,
attributes=deny_attribute,
shell=self.shell,
cluster=self.cluster,
)
with allure.step(
"Check other can get objects without attributes matching the filter "
"and using bearer token"
):
oid = deny_objects.pop()
assert can_get_head_object(
other_wallet.wallet_path,
cid,
oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
bearer=bearer_other,
)
assert can_get_object(
other_wallet.wallet_path,
cid,
oid,
file_path,
shell=self.shell,
cluster=self.cluster,
bearer=bearer_other,
)
assert can_put_object(
other_wallet.wallet_path,
cid,
file_path,
shell=self.shell,
cluster=self.cluster,
attributes=deny_attribute,
bearer=bearer_other,
)

View file

@ -1,268 +1,62 @@
import logging
import os
import shutil
from datetime import datetime
from typing import Optional
from re import search
import allure
import pytest
import yaml
from frostfs_testlib.hosting import Hosting
from frostfs_testlib.reporter import AllureHandler, get_reporter
from frostfs_testlib.resources.common import (
ASSETS_DIR,
COMPLEX_OBJECT_CHUNKS_COUNT,
COMPLEX_OBJECT_TAIL_SIZE,
DEFAULT_WALLET_PASS,
SIMPLE_OBJECT_SIZE,
)
from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus
from frostfs_testlib.shell import LocalShell, Shell
from frostfs_testlib.steps.cli.container import list_containers
from frostfs_testlib.steps.cli.object import get_netmap_netinfo
from frostfs_testlib.steps.node_management import storage_node_healthcheck
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.dataclasses.wallet import WalletFactory, WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils import env_utils, version_utils
from robot.api import deco
from pytest_tests.resources.common import HOSTING_CONFIG_FILE, TEST_CYCLES_COUNT
import wallet
from cli_helpers import _cmd_run
from common import ASSETS_DIR, FREE_STORAGE, MAINNET_WALLET_PATH
from payment_neogo import neofs_deposit, transfer_mainnet_gas
logger = logging.getLogger("NeoLogger")
def robot_keyword_adapter(name=None, tags=(), types=()):
return allure.step(name)
deco.keyword = robot_keyword_adapter
logger = logging.getLogger('NeoLogger')
# Add logs check test even if it's not fit to mark selectors
def pytest_configure(config: pytest.Config):
markers = config.option.markexpr
if markers != "":
config.option.markexpr = f"logs_after_session or ({markers})"
@pytest.fixture(scope='session', autouse=True)
@allure.title('Check binary versions')
def check_binary_versions(request):
environment_dir = request.config.getoption('--alluredir')
binaries = ['neo-go', 'neofs-cli', 'neofs-authmate', 'aws']
env_out = {}
for binary in binaries:
out = _cmd_run(f'{binary} --version')
version = search(r'(v?\d.*)\s+', out)
version = version.group(1) if version else 'Unknown'
env_out[binary.upper()] = version
# pytest hook. Do not rename
def pytest_collection_modifyitems(items: list[pytest.Item]):
# Make network tests last based on @pytest.mark.node_mgmt and logs_test to be latest
def priority(item: pytest.Item) -> int:
is_node_mgmt_test = 1 if item.get_closest_marker("node_mgmt") else 0
is_logs_check_test = 100 if item.get_closest_marker("logs_after_session") else 0
return is_node_mgmt_test + is_logs_check_test
if environment_dir:
with open(f'{environment_dir}/environment.properties', 'w') as out_file:
for env, env_value in env_out.items():
out_file.write(f'{env}={env_value}\n')
items.sort(key=lambda item: priority(item))
@pytest.fixture(scope='session')
@allure.title('Init wallet with address')
def init_wallet_with_address():
full_path = f'{os.getcwd()}/{ASSETS_DIR}'
os.mkdir(full_path)
def pytest_generate_tests(metafunc: pytest.Metafunc):
if (
TEST_CYCLES_COUNT <= 1
or metafunc.definition.get_closest_marker("logs_after_session")
or metafunc.definition.get_closest_marker("no_cycles")
):
return
yield wallet.init_wallet(ASSETS_DIR)
metafunc.fixturenames.append("cycle")
metafunc.parametrize(
"cycle",
range(1, TEST_CYCLES_COUNT + 1),
ids=[f"cycle {cycle}" for cycle in range(1, TEST_CYCLES_COUNT + 1)],
)
shutil.rmtree(full_path)
@pytest.fixture(scope="session")
def configure_testlib():
get_reporter().register_handler(AllureHandler())
logging.getLogger("paramiko").setLevel(logging.INFO)
yield
@pytest.fixture(scope='session')
@allure.title('Prepare wallet and deposit')
def prepare_wallet_and_deposit(init_wallet_with_address):
wallet, addr, _ = init_wallet_with_address
logger.info(f'Init wallet: {wallet},\naddr: {addr}')
if not FREE_STORAGE:
deposit = 30
transfer_mainnet_gas(wallet, deposit + 1, wallet_path=MAINNET_WALLET_PATH)
neofs_deposit(wallet, deposit)
@pytest.fixture(scope="session")
def client_shell(configure_testlib) -> Shell:
yield LocalShell()
@pytest.fixture(scope="session")
def hosting(configure_testlib) -> Hosting:
with open(HOSTING_CONFIG_FILE, "r") as file:
hosting_config = yaml.full_load(file)
hosting_instance = Hosting()
hosting_instance.configure(hosting_config)
yield hosting_instance
@pytest.fixture(scope="session")
def require_multiple_hosts(hosting: Hosting):
"""Designates tests that require environment with multiple hosts.
These tests will be skipped on an environment that has only 1 host.
"""
if len(hosting.hosts) <= 1:
pytest.skip("Test only works with multiple hosts")
yield
@pytest.fixture(scope="session")
def max_object_size(cluster: Cluster, client_shell: Shell) -> int:
storage_node = cluster.storage_nodes[0]
net_info = get_netmap_netinfo(
wallet=storage_node.get_wallet_path(),
wallet_config=storage_node.get_wallet_config_path(),
endpoint=storage_node.get_rpc_endpoint(),
shell=client_shell,
)
yield net_info["maximum_object_size"]
@pytest.fixture(scope="session")
def simple_object_size(max_object_size: int) -> int:
yield int(SIMPLE_OBJECT_SIZE) if int(SIMPLE_OBJECT_SIZE) < max_object_size else max_object_size
@pytest.fixture(scope="session")
def complex_object_size(max_object_size: int) -> int:
return max_object_size * int(COMPLEX_OBJECT_CHUNKS_COUNT) + int(COMPLEX_OBJECT_TAIL_SIZE)
@pytest.fixture(scope="session")
def wallet_factory(temp_directory: str, client_shell: Shell, cluster: Cluster) -> WalletFactory:
return WalletFactory(temp_directory, client_shell, cluster)
@pytest.fixture(scope="session")
def cluster(temp_directory: str, hosting: Hosting, client_shell: Shell) -> Cluster:
cluster = Cluster(hosting)
if cluster.is_local_devenv():
cluster.create_wallet_configs(hosting)
ClusterTestBase.shell = client_shell
ClusterTestBase.cluster = cluster
yield cluster
@allure.step("[Class]: Provide S3 policy")
@pytest.fixture(scope="class")
def s3_policy(request: pytest.FixtureRequest):
policy = None
if "param" in request.__dict__:
policy = request.param
return policy
@pytest.fixture(scope="session")
def cluster_state_controller(client_shell: Shell, cluster: Cluster) -> ClusterStateController:
controller = ClusterStateController(client_shell, cluster)
yield controller
@allure.step("[Class]: Create S3 client")
@pytest.fixture(scope="class")
def s3_client(
default_wallet: str,
client_shell: Shell,
s3_policy: Optional[str],
cluster: Cluster,
request: pytest.FixtureRequest,
) -> S3ClientWrapper:
wallet = WalletInfo(path=default_wallet, password=DEFAULT_WALLET_PASS)
(cid, access_key_id, secret_access_key) = s3_helper.init_s3_credentials(
wallet,
client_shell,
cluster,
s3gates=[cluster_node.s3_gate for cluster_node in cluster.cluster_nodes],
policy=s3_policy,
)
containers_list = list_containers(
wallet.path, shell=client_shell, endpoint=cluster.default_rpc_endpoint
)
assert cid in containers_list, f"Expected cid {cid} in {containers_list}"
s3_client_cls = request.param
client = s3_client_cls(access_key_id, secret_access_key, cluster.default_s3_gate_endpoint)
yield client
@allure.step("Create/delete bucket")
@pytest.fixture
def bucket(s3_client: S3ClientWrapper, request: pytest.FixtureRequest):
bucket_name = s3_client.create_bucket()
versioning_status: Optional[VersioningStatus] = None
if "param" in request.__dict__:
versioning_status = request.param
if versioning_status:
s3_helper.set_bucket_versioning(s3_client, bucket_name, versioning_status)
yield bucket_name
s3_helper.delete_bucket_with_objects(s3_client, bucket_name)
@allure.step("Create two buckets")
@pytest.fixture
def two_buckets(s3_client: S3ClientWrapper):
bucket_1 = s3_client.create_bucket()
bucket_2 = s3_client.create_bucket()
yield bucket_1, bucket_2
for bucket_name in [bucket_1, bucket_2]:
s3_helper.delete_bucket_with_objects(s3_client, bucket_name)
@allure.step("Check binary versions")
@pytest.fixture(scope="session", autouse=True)
def check_binary_versions(hosting: Hosting, client_shell: Shell, request: pytest.FixtureRequest):
local_versions = version_utils.get_local_binaries_versions(client_shell)
remote_versions = version_utils.get_remote_binaries_versions(hosting)
all_versions = {**local_versions, **remote_versions}
environment_dir = request.config.getoption("--alluredir")
if not environment_dir:
return None
file_path = f"{environment_dir}/environment.properties"
env_utils.save_env_properties(file_path, all_versions)
@allure.step("Prepare tmp directory")
@pytest.fixture(scope="session")
def temp_directory():
with allure.step("Prepare tmp directory"):
full_path = os.path.join(os.getcwd(), ASSETS_DIR)
shutil.rmtree(full_path, ignore_errors=True)
os.mkdir(full_path)
yield full_path
with allure.step("Remove tmp directory"):
shutil.rmtree(full_path)
@allure.step("[Autouse/Session] Test session start time")
@pytest.fixture(scope="session", autouse=True)
def session_start_time():
start_time = datetime.utcnow()
return start_time
@allure.step("Run health check for all storage nodes")
@pytest.fixture(scope="session", autouse=True)
def run_health_check(session_start_time, cluster: Cluster):
failed_nodes = []
for node in cluster.storage_nodes:
health_check = storage_node_healthcheck(node)
if health_check.health_status != "READY" or health_check.network_status != "ONLINE":
failed_nodes.append(node)
if failed_nodes:
raise AssertionError(f"Nodes {failed_nodes} are not healthy")
@allure.step("Prepare wallet and deposit")
@pytest.fixture(scope="session")
def default_wallet(wallet_factory: WalletFactory) -> str:
wallet = wallet_factory.create_wallet(password=DEFAULT_WALLET_PASS)
allure.attach.file(wallet.path, os.path.basename(wallet.path), allure.attachment_type.JSON)
return wallet.path
return wallet

View file

@ -1,125 +0,0 @@
import json
import allure
import pytest
from frostfs_testlib.resources.wellknown_acl import PRIVATE_ACL_F
from frostfs_testlib.steps.cli.container import (
create_container,
delete_container,
get_container,
list_containers,
wait_for_container_creation,
wait_for_container_deletion,
)
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from pytest_tests.helpers.utility import placement_policy_from_container
@pytest.mark.container
@pytest.mark.sanity
class TestContainer(ClusterTestBase):
@pytest.mark.parametrize("name", ["", "test-container"], ids=["No name", "Set particular name"])
@pytest.mark.smoke
def test_container_creation(self, default_wallet: str, name: str):
scenario_title = f"with name {name}" if name else "without name"
allure.dynamic.title(f"User can create container {scenario_title}")
wallet = default_wallet
with open(wallet) as file:
json_wallet = json.load(file)
placement_rule = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
cid = create_container(
wallet,
rule=placement_rule,
name=name,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
containers = list_containers(
wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
assert cid in containers, f"Expected container {cid} in containers: {containers}"
container_info: str = get_container(
wallet,
cid,
json_mode=False,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
container_info = (
container_info.casefold()
) # To ignore case when comparing with expected values
info_to_check = {
f"basic ACL: {PRIVATE_ACL_F} (private)",
f"owner ID: {json_wallet.get('accounts')[0].get('address')}",
f"container ID: {cid}",
}
if name:
info_to_check.add(f"Name={name}")
with allure.step("Check container has correct information"):
expected_policy = placement_rule.casefold()
actual_policy = placement_policy_from_container(container_info)
assert (
actual_policy == expected_policy
), f"Expected policy\n{expected_policy} but got policy\n{actual_policy}"
for info in info_to_check:
expected_info = info.casefold()
assert (
expected_info in container_info
), f"Expected {expected_info} in container info:\n{container_info}"
with allure.step("Delete container and check it was deleted"):
delete_container(
wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
self.tick_epoch()
wait_for_container_deletion(
wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
@allure.title("Parallel container creation and deletion")
def test_container_creation_deletion_parallel(self, default_wallet: str):
containers_count = 3
wallet = default_wallet
placement_rule = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
cids: list[str] = []
with allure.step(f"Create {containers_count} containers"):
for _ in range(containers_count):
cids.append(
create_container(
wallet,
rule=placement_rule,
await_mode=False,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
wait_for_creation=False,
)
)
with allure.step("Wait for containers occur in container list"):
for cid in cids:
wait_for_container_creation(
wallet,
cid,
sleep_interval=containers_count,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
with allure.step("Delete containers and check they were deleted"):
for cid in cids:
delete_container(
wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
self.tick_epoch()
wait_for_container_deletion(
wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)

View file

@ -1,29 +0,0 @@
import random
from datetime import datetime
import allure
import pytest
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.controllers import ShardsWatcher
@pytest.fixture()
@allure.title("Select random node for testing")
def node_under_test(cluster: Cluster) -> ClusterNode:
selected_node = random.choice(cluster.cluster_nodes)
allure.attach(f"{selected_node}", "Selected node", allure.attachment_type.TEXT)
return selected_node
@pytest.fixture()
@allure.title("Provide Shards watcher")
def shards_watcher(node_under_test: ClusterNode) -> ShardsWatcher:
watcher = ShardsWatcher(node_under_test)
return watcher
@pytest.fixture()
@allure.title("Test start time")
def test_start_time() -> datetime:
start_time = datetime.utcnow()
return start_time

View file

@ -1,118 +0,0 @@
import logging
import random
from time import sleep
import allure
import pytest
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import get_object, put_object_to_random_node
from frostfs_testlib.storage.cluster import StorageNode
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.failover_utils import (
wait_all_storage_nodes_returned,
wait_object_replication,
)
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
from pytest_tests.helpers.iptables_helper import IpTablesHelper
logger = logging.getLogger("NeoLogger")
STORAGE_NODE_COMMUNICATION_PORT = "8080"
STORAGE_NODE_COMMUNICATION_PORT_TLS = "8082"
PORTS_TO_BLOCK = [STORAGE_NODE_COMMUNICATION_PORT, STORAGE_NODE_COMMUNICATION_PORT_TLS]
blocked_nodes: list[StorageNode] = []
@pytest.mark.failover
@pytest.mark.failover_network
class TestFailoverNetwork(ClusterTestBase):
@pytest.fixture(autouse=True)
@allure.title("Restore network")
def restore_network(self):
yield
with allure.step(f"Count blocked nodes {len(blocked_nodes)}"):
not_empty = len(blocked_nodes) != 0
for node in list(blocked_nodes):
with allure.step(f"Restore network at host for {node.label}"):
IpTablesHelper.restore_input_traffic_to_port(node.host.get_shell(), PORTS_TO_BLOCK)
blocked_nodes.remove(node)
if not_empty:
wait_all_storage_nodes_returned(self.shell, self.cluster)
@allure.title("Block Storage node traffic")
def test_block_storage_node_traffic(
self,
default_wallet: str,
require_multiple_hosts,
simple_object_size: int,
):
"""
Block storage nodes traffic using iptables and wait for replication for objects.
"""
wallet = default_wallet
placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
wakeup_node_timeout = 10 # timeout to let nodes detect that traffic has blocked
nodes_to_block_count = 2
source_file_path = generate_file(simple_object_size)
cid = create_container(
wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=placement_rule,
basic_acl=PUBLIC_ACL,
)
oid = put_object_to_random_node(
wallet, source_file_path, cid, shell=self.shell, cluster=self.cluster
)
nodes = wait_object_replication(
cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes
)
logger.info(f"Nodes are {nodes}")
nodes_to_block = nodes
if nodes_to_block_count > len(nodes):
# TODO: the intent of this logic is not clear, need to revisit
nodes_to_block = random.choices(nodes, k=2)
excluded_nodes = []
for node in nodes_to_block:
with allure.step(f"Block incoming traffic at node {node} on port {PORTS_TO_BLOCK}"):
blocked_nodes.append(node)
excluded_nodes.append(node)
IpTablesHelper.drop_input_traffic_to_port(node.host.get_shell(), PORTS_TO_BLOCK)
sleep(wakeup_node_timeout)
with allure.step(f"Check object is not stored on node {node}"):
new_nodes = wait_object_replication(
cid,
oid,
2,
shell=self.shell,
nodes=list(set(self.cluster.storage_nodes) - set(excluded_nodes)),
)
assert node not in new_nodes
with allure.step("Check object data is not corrupted"):
got_file_path = get_object(
wallet, cid, oid, endpoint=new_nodes[0].get_rpc_endpoint(), shell=self.shell
)
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
for node in nodes_to_block:
with allure.step(f"Unblock incoming traffic at host {node} on port {PORTS_TO_BLOCK}"):
IpTablesHelper.restore_input_traffic_to_port(node.host.get_shell(), PORTS_TO_BLOCK)
blocked_nodes.remove(node)
sleep(wakeup_node_timeout)
with allure.step("Check object data is not corrupted"):
new_nodes = wait_object_replication(
cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes
)
got_file_path = get_object(
wallet, cid, oid, shell=self.shell, endpoint=new_nodes[0].get_rpc_endpoint()
)
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)

View file

@ -1,232 +0,0 @@
import logging
import os.path
import random
import time
import allure
import pytest
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.cli.container import (
StorageContainer,
StorageContainerInfo,
create_container,
)
from frostfs_testlib.steps.cli.object import get_object
from frostfs_testlib.steps.node_management import check_node_in_map, check_node_not_in_map
from frostfs_testlib.storage.cluster import ClusterNode, StorageNode
from frostfs_testlib.storage.controllers import ClusterStateController
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils import datetime_utils
from frostfs_testlib.utils.failover_utils import wait_for_host_offline, wait_object_replication
from frostfs_testlib.utils.file_utils import get_file_hash
from pytest import FixtureRequest
logger = logging.getLogger("NeoLogger")
@pytest.mark.failover
@pytest.mark.failover_server
class TestFailoverServer(ClusterTestBase):
@wait_for_success(max_wait_time=120, interval=1)
def wait_node_not_in_map(self, *args, **kwargs):
check_node_not_in_map(*args, **kwargs)
@wait_for_success(max_wait_time=120, interval=1)
def wait_node_in_map(self, *args, **kwargs):
check_node_in_map(*args, **kwargs)
@allure.step("Create {count_containers} containers and {count_files} objects")
@pytest.fixture
def containers(
self,
request: FixtureRequest,
default_wallet: str,
) -> list[StorageContainer]:
placement_rule = "REP 2 CBF 2 SELECT 2 FROM * AS X"
containers = []
for _ in range(request.param):
cont_id = create_container(
default_wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=placement_rule,
basic_acl=PUBLIC_ACL,
)
wallet = WalletInfo(path=default_wallet)
storage_cont_info = StorageContainerInfo(id=cont_id, wallet_file=wallet)
containers.append(
StorageContainer(
storage_container_info=storage_cont_info, shell=self.shell, cluster=self.cluster
)
)
return containers
@allure.step("Create object and delete after test")
@pytest.fixture(scope="class")
def storage_objects(
self,
request: FixtureRequest,
containers: list[StorageContainer],
simple_object_size: int,
complex_object_size: int,
) -> StorageObjectInfo:
count_object = request.param
object_size = [simple_object_size, complex_object_size]
object_list = []
for cont in containers:
for _ in range(count_object):
object_list.append(cont.generate_object(size=random.choice(object_size)))
for storage_object in object_list:
os.remove(storage_object.file_path)
yield object_list
@allure.step("Select random node to stop and start it after test")
@pytest.fixture
def node_to_stop(
self, node_under_test: ClusterNode, cluster_state_controller: ClusterStateController
) -> ClusterNode:
yield node_under_test
with allure.step(f"start {node_under_test.storage_node}"):
cluster_state_controller.start_stopped_hosts()
@allure.step("Upload object with nodes and compare")
def get_corrupted_objects_list(
self, nodes: list[StorageNode], storage_objects: list[StorageObjectInfo]
) -> list[StorageObjectInfo]:
corrupted_objects = []
for node in nodes:
for storage_object in storage_objects:
got_file_path = get_object(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
endpoint=node.get_rpc_endpoint(),
shell=self.shell,
timeout="60s",
)
if storage_object.file_hash != get_file_hash(got_file_path):
corrupted_objects.append(storage_object)
os.remove(got_file_path)
return corrupted_objects
def check_objects_replication(
self, storage_objects: list[StorageObjectInfo], storage_nodes: list[StorageNode]
) -> None:
for storage_object in storage_objects:
wait_object_replication(
storage_object.cid,
storage_object.oid,
2,
shell=self.shell,
nodes=storage_nodes,
sleep_interval=45,
attempts=60,
)
@allure.title("Full shutdown node")
@pytest.mark.parametrize("containers, storage_objects", [(5, 10)], indirect=True)
def test_complete_node_shutdown(
self,
containers: list[StorageContainer],
storage_objects: list[StorageObjectInfo],
default_wallet: str,
node_to_stop: ClusterNode,
cluster_state_controller: ClusterStateController,
):
with allure.step(f"Remove {node_to_stop} from the list of nodes"):
alive_nodes = list(set(self.cluster.cluster_nodes) - {node_to_stop})
storage_nodes = [cluster.storage_node for cluster in alive_nodes]
with allure.step("Tick epoch"):
self.tick_epochs(1, storage_nodes[0])
with allure.step("Wait 2 block time"):
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2)
cluster_state_controller.stop_node_host(node=node_to_stop, mode="hard")
with allure.step(f"Check if the node {node_to_stop.storage_node} has stopped"):
wait_for_host_offline(self.shell, node_to_stop.storage_node)
with allure.step("Verify that there are no corrupted objects"):
corrupted_objects_list = self.get_corrupted_objects_list(storage_nodes, storage_objects)
assert not corrupted_objects_list
with allure.step(f"check {node_to_stop.storage_node} in map"):
self.wait_node_in_map(
node_to_stop.storage_node, self.shell, alive_node=storage_nodes[0]
)
count_tick_epoch = int(alive_nodes[0].ir_node.get_netmap_cleaner_threshold()) + 2
with allure.step(f"Tick {count_tick_epoch} epoch, in {storage_nodes[0]} node"):
for tick in range(count_tick_epoch):
self.tick_epoch(storage_nodes[0])
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2)
with allure.step(f"Check if the node {node_to_stop.storage_node} has stopped"):
wait_for_host_offline(self.shell, node_to_stop.storage_node)
with allure.step(f"Check {node_to_stop} in not map"):
self.wait_node_not_in_map(
node_to_stop.storage_node, self.shell, alive_node=storage_nodes[0]
)
with allure.step(
f"Verify that there are no corrupted objects after {count_tick_epoch} epoch"
):
corrupted_objects_list = self.get_corrupted_objects_list(storage_nodes, storage_objects)
assert not corrupted_objects_list
@allure.title("Temporarily disable a node")
@pytest.mark.parametrize("containers, storage_objects", [(5, 10)], indirect=True)
def test_temporarily_disable_a_node(
self,
containers: list[StorageContainer],
storage_objects: list[StorageObjectInfo],
default_wallet: str,
node_to_stop: ClusterNode,
cluster_state_controller: ClusterStateController,
):
with allure.step(f"Remove {node_to_stop} from the list of nodes"):
storage_nodes = list(set(self.cluster.storage_nodes) - {node_to_stop.storage_node})
with allure.step("Tick epoch"):
self.tick_epochs(1, storage_nodes[0])
with allure.step("Wait 2 block time"):
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2)
cluster_state_controller.stop_node_host(node=node_to_stop, mode="hard")
with allure.step(f"Check if the node {node_to_stop} has stopped"):
wait_for_host_offline(self.shell, node_to_stop.storage_node)
with allure.step("Verify that there are no corrupted objects"):
corrupted_objects_list = self.get_corrupted_objects_list(storage_nodes, storage_objects)
assert not corrupted_objects_list
with allure.step(f"Check {node_to_stop} in map"):
self.wait_node_in_map(
node_to_stop.storage_node, self.shell, alive_node=storage_nodes[0]
)
cluster_state_controller.start_node_host(node_to_stop)
with allure.step("Verify that there are no corrupted objects"):
corrupted_objects_list = self.get_corrupted_objects_list(storage_nodes, storage_objects)
assert not corrupted_objects_list

View file

@ -1,845 +0,0 @@
import logging
from datetime import datetime
from time import sleep
import allure
import pytest
from frostfs_testlib.analytics import test_case
from frostfs_testlib.hosting import Host
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
from frostfs_testlib.shell import CommandOptions, Shell
from frostfs_testlib.steps.cli.container import (
StorageContainer,
StorageContainerInfo,
create_container,
)
from frostfs_testlib.steps.cli.object import get_object, put_object_to_random_node
from frostfs_testlib.steps.node_management import (
check_node_in_map,
check_node_not_in_map,
exclude_node_from_network_map,
include_node_to_network_map,
remove_nodes_from_map_morph,
wait_for_node_to_be_ready,
)
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode
from frostfs_testlib.storage.controllers import ClusterStateController, ShardsWatcher
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils import datetime_utils
from frostfs_testlib.utils.failover_utils import (
wait_all_storage_nodes_returned,
wait_object_replication,
)
from frostfs_testlib.utils.file_keeper import FileKeeper
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
logger = logging.getLogger("NeoLogger")
stopped_nodes: list[StorageNode] = []
@pytest.fixture(scope="function")
@allure.title("Provide File Keeper")
def file_keeper():
keeper = FileKeeper()
yield keeper
keeper.restore_files()
@allure.step("Return all stopped hosts")
@pytest.fixture(scope="function", autouse=True)
def after_run_return_all_stopped_hosts(client_shell: Shell, cluster: Cluster) -> str:
yield "After this test stopped services will be started automatically via fixture"
return_stopped_hosts(client_shell, cluster)
@allure.step("Return all stopped storage services after test")
@pytest.fixture(scope="function")
def after_run_return_all_stopped_services(cluster_state_controller: ClusterStateController):
yield
cluster_state_controller.start_stopped_storage_services()
@allure.step("Return all stopped S3 GateWay services after test")
@pytest.fixture(scope="function")
def after_run_return_all_stopped_s3(cluster_state_controller: ClusterStateController):
yield
cluster_state_controller.start_stopped_s3_gate()
def panic_reboot_host(host: Host) -> None:
shell = host.get_shell()
shell.exec('sudo sh -c "echo 1 > /proc/sys/kernel/sysrq"')
options = CommandOptions(close_stdin=True, timeout=1, check=False)
shell.exec('sudo sh -c "echo b > /proc/sysrq-trigger"', options)
def return_stopped_hosts(shell: Shell, cluster: Cluster) -> None:
for node in list(stopped_nodes):
with allure.step(f"Start host {node}"):
node.host.start_host()
stopped_nodes.remove(node)
wait_all_storage_nodes_returned(shell, cluster)
@pytest.mark.failover
class TestFailoverStorage(ClusterTestBase):
@allure.title("Lose and return storage node's host")
@pytest.mark.parametrize("hard_reboot", [True, False])
@pytest.mark.failover_reboot
def test_lose_storage_node_host(
self, default_wallet, hard_reboot: bool, require_multiple_hosts, simple_object_size
):
wallet = default_wallet
placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
source_file_path = generate_file(simple_object_size)
cid = create_container(
wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=placement_rule,
basic_acl=PUBLIC_ACL,
)
oid = put_object_to_random_node(
wallet, source_file_path, cid, shell=self.shell, cluster=self.cluster
)
nodes = wait_object_replication(
cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes
)
for node in nodes:
stopped_nodes.append(node)
with allure.step(f"Stop host {node}"):
node.host.stop_host("hard" if hard_reboot else "soft")
new_nodes = wait_object_replication(
cid,
oid,
2,
shell=self.shell,
nodes=list(set(self.cluster.storage_nodes) - {node}),
)
assert all(old_node not in new_nodes for old_node in nodes)
with allure.step("Check object data is not corrupted"):
got_file_path = get_object(
wallet, cid, oid, endpoint=new_nodes[0].get_rpc_endpoint(), shell=self.shell
)
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
with allure.step("Return all hosts"):
return_stopped_hosts(self.shell, self.cluster)
with allure.step("Check object data is not corrupted"):
new_nodes = wait_object_replication(
cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes
)
got_file_path = get_object(
wallet, cid, oid, shell=self.shell, endpoint=new_nodes[0].get_rpc_endpoint()
)
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
@allure.title("Panic storage node's host")
@pytest.mark.parametrize("sequence", [True, False])
@pytest.mark.failover_panic
def test_panic_storage_node_host(
self, default_wallet, require_multiple_hosts, sequence: bool, simple_object_size
):
wallet = default_wallet
placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
source_file_path = generate_file(simple_object_size)
cid = create_container(
wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=placement_rule,
basic_acl=PUBLIC_ACL,
)
oid = put_object_to_random_node(
wallet, source_file_path, cid, shell=self.shell, cluster=self.cluster
)
nodes = wait_object_replication(
cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes
)
allure.attach(
"\n".join([str(node) for node in nodes]),
"Current nodes with object",
allure.attachment_type.TEXT,
)
new_nodes: list[StorageNode] = []
for node in nodes:
with allure.step(f"Hard reboot host {node} via magic SysRq option"):
panic_reboot_host(node.host)
if sequence:
try:
new_nodes = wait_object_replication(
cid,
oid,
2,
shell=self.shell,
nodes=list(set(self.cluster.storage_nodes) - {node}),
)
except AssertionError:
new_nodes = wait_object_replication(
cid,
oid,
2,
shell=self.shell,
nodes=self.cluster.storage_nodes,
)
allure.attach(
"\n".join([str(new_node) for new_node in new_nodes]),
f"Nodes with object after {node} fail",
allure.attachment_type.TEXT,
)
if not sequence:
new_nodes = wait_object_replication(
cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes
)
allure.attach(
"\n".join([str(new_node) for new_node in new_nodes]),
"Nodes with object after nodes fail",
allure.attachment_type.TEXT,
)
got_file_path = get_object(
wallet, cid, oid, shell=self.shell, endpoint=new_nodes[0].get_rpc_endpoint()
)
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
@allure.title("Do not ignore unhealthy tree endpoints")
def test_unhealthy_tree(
self,
s3_client: S3ClientWrapper,
simple_object_size: int,
cluster_state_controller: ClusterStateController,
after_run_return_all_stopped_s3,
after_run_return_all_stopped_services,
):
default_node = self.cluster.cluster_nodes[0]
default_s3gate = self.cluster.s3_gates[0]
with allure.step("Turn S3 GW off on default node"):
default_s3gate.stop_service()
with allure.step("Turn off storage on default node"):
cluster_state_controller.stop_storage_service(default_node)
with allure.step("Turn on S3 GW on default node"):
default_s3gate.start_service()
with allure.step("Turn on storage on default node"):
cluster_state_controller.start_stopped_storage_services()
with allure.step("Create bucket with REP 1 SELECT 1 policy"):
bucket = s3_client.create_bucket(
location_constraint="load-1-1",
)
file_path = generate_file(simple_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
with allure.step("Put object into bucket"):
put_object = s3_client.put_object(bucket, file_path)
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
with allure.step("Turn off all storage nodes except default"):
for node in self.cluster.cluster_nodes[1:]:
with allure.step(f"Stop storage service on node: {node}"):
cluster_state_controller.stop_storage_service(node)
with allure.step("Check that object is available"):
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
def pytest_generate_tests(metafunc: pytest.Metafunc):
if "s3_client" in metafunc.fixturenames:
metafunc.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
@pytest.mark.failover
@pytest.mark.failover_empty_map
class TestEmptyMap(ClusterTestBase):
"""
A set of tests for makes map empty and verify that we can read objects after that
"""
@allure.step("Teardown after EmptyMap offline test")
@pytest.fixture()
def empty_map_offline_teardown(self):
yield
with allure.step("Return all storage nodes to network map"):
for node in list(stopped_nodes):
include_node_to_network_map(node, node, shell=self.shell, cluster=self.cluster)
stopped_nodes.remove(node)
@test_case.title("Test makes network map empty (offline all storage nodes)")
@test_case.priority(test_case.TestCasePriority.HIGH)
@test_case.suite_name("failovers")
@test_case.suite_section("test_failover_storage")
@pytest.mark.failover_empty_map_offlne
@allure.title("Test makes network map empty (offline all storage nodes)")
def test_offline_all_storage_nodes(
self,
s3_client: S3ClientWrapper,
bucket: str,
simple_object_size: int,
empty_map_offline_teardown,
):
"""
The test makes network map empty (set offline status on all storage nodes) then returns all nodes to map and checks that object can read through s3.
Steps:
1. Check that bucket is empty
2: PUT object into bucket
3: Check that object exists in bucket
4: Exclude all storage nodes from network map (set status OFFLINE)
5: Return all storage nodes to network map
6: Check that we can read object from #2
Args:
bucket: bucket which contains tested object
simple_object_size: size of object
"""
file_path = generate_file(simple_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
bucket_objects = [file_name]
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with allure.step("Put object into bucket"):
s3_client.put_object(bucket, file_path)
with allure.step("Check that object exists in bucket"):
s3_helper.check_objects_in_bucket(s3_client, bucket, bucket_objects)
storage_nodes = self.cluster.storage_nodes
with allure.step("Exclude all storage nodes from network map"):
for node in storage_nodes:
exclude_node_from_network_map(node, node, shell=self.shell, cluster=self.cluster)
stopped_nodes.append(node)
with allure.step("Return all storage nodes to network map"):
for node in storage_nodes:
include_node_to_network_map(node, node, shell=self.shell, cluster=self.cluster)
stopped_nodes.remove(node)
with allure.step("Check that we can read object"):
s3_helper.check_objects_in_bucket(s3_client, bucket, bucket_objects)
@allure.step("Teardown after EmptyMap stop service test")
@pytest.fixture()
def empty_map_stop_service_teardown(self):
yield
with allure.step("Return all storage nodes to network map"):
for node in list(list(stopped_nodes)):
with allure.step(f"Start node {node}"):
node.start_service()
with allure.step(f"Waiting status ready for node {node}"):
wait_for_node_to_be_ready(node)
sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
self.tick_epochs(1)
check_node_in_map(node, shell=self.shell, alive_node=node)
stopped_nodes.remove(node)
@test_case.title("Test makes network map empty (stop storage service on all nodes)")
@test_case.priority(test_case.TestCasePriority.HIGH)
@test_case.suite_name("failovers")
@test_case.suite_section("test_failover_storage")
@pytest.mark.failover_empty_map_stop_service
@allure.title("Test makes network map empty (stop storage service on all nodes)")
def test_stop_all_storage_nodes(
self,
s3_client: S3ClientWrapper,
bucket: str,
simple_object_size: int,
empty_map_stop_service_teardown,
):
"""
The test makes network map empty (stop storage service on all nodes
then use 'frostfs-adm morph delete-nodes' to delete nodes from map)
then start all services and checks that object can read through s3.
Steps:
1. Check that bucket is empty
2: PUT object into bucket
3: Check that object exists in bucket
4: Exclude all storage nodes from network map (stop storage service
and manual exclude from map)
5: Return all storage nodes to network map
6: Check that we can read object from #2
Args:
bucket: bucket which contains tested object
simple_object_size: size of object
"""
file_path = generate_file(simple_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
bucket_objects = [file_name]
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with allure.step("Put object into bucket"):
s3_client.put_object(bucket, file_path)
with allure.step("Check that object exists in bucket"):
s3_helper.check_objects_in_bucket(s3_client, bucket, bucket_objects)
with allure.step("Stop all storage nodes"):
for node in self.cluster.storage_nodes:
with allure.step(f"Stop storage service on node: {node}"):
node.stop_service()
stopped_nodes.append(node)
with allure.step("Remove all nodes from network map"):
remove_nodes_from_map_morph(
shell=self.shell, cluster=self.cluster, remove_nodes=stopped_nodes
)
with allure.step("Return all storage nodes to network map"):
self.return_nodes_after_stop_with_check_empty_map(stopped_nodes)
with allure.step("Check that object exists in bucket"):
s3_helper.check_objects_in_bucket(s3_client, bucket, bucket_objects)
@allure.step("Return all nodes to cluster with check empty map first")
def return_nodes_after_stop_with_check_empty_map(self, return_nodes=None) -> None:
first_node = True
for node in list(return_nodes):
with allure.step(f"Start node {node}"):
node.start_service()
with allure.step(f"Waiting status ready for node {node}"):
wait_for_node_to_be_ready(node)
with allure.step("Make sure that network map is empty"):
if first_node:
for check_node in list(return_nodes):
check_node_not_in_map(check_node, shell=self.shell, alive_node=node)
first_node = False
sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
self.tick_epochs(1)
check_node_in_map(node, shell=self.shell, alive_node=node)
stopped_nodes.remove(node)
@allure.title("Test S3 Object loss from fstree/blobovnicza, versioning is enabled")
def test_s3_fstree_blobovnicza_loss_versioning_on(
self,
s3_client: S3ClientWrapper,
simple_object_size: int,
cluster_state_controller: ClusterStateController,
):
bucket = s3_client.create_bucket()
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
file_path = generate_file(simple_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
object_versions = []
with allure.step("Put object into one bucket"):
put_object = s3_client.put_object(bucket, file_path)
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
object_versions.append(put_object)
with allure.step("Stop all storage nodes"):
for node in self.cluster.cluster_nodes:
with allure.step(f"Stop storage service on node: {node}"):
cluster_state_controller.stop_storage_service(node)
with allure.step("Delete blobovnicza and fstree from all nodes"):
for node in self.cluster.storage_nodes:
node.delete_blobovnicza()
node.delete_fstree()
with allure.step("Start all storage nodes"):
cluster_state_controller.start_stopped_storage_services()
# need to get Delete Marker first
with allure.step("Delete the object from the bucket"):
delete_object = s3_client.delete_object(bucket, file_name)
object_versions.append(delete_object["VersionId"])
# and now delete all versions of object (including Delete Markers)
with allure.step("Delete all versions of the object from the bucket"):
for version in object_versions:
delete_object = s3_client.delete_object(bucket, file_name, version_id=version)
with allure.step("Delete bucket"):
s3_client.delete_bucket(bucket)
@allure.title("Test S3 Object loss from fstree/blobovnicza, versioning is disabled")
def test_s3_fstree_blobovnicza_loss_versioning_off(
self,
s3_client: S3ClientWrapper,
simple_object_size: int,
cluster_state_controller: ClusterStateController,
):
bucket = s3_client.create_bucket()
file_path = generate_file(simple_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
with allure.step("Put object into one bucket"):
s3_client.put_object(bucket, file_path)
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
with allure.step("Stop all storage nodes"):
for node in self.cluster.cluster_nodes:
with allure.step(f"Stop storage service on node: {node}"):
cluster_state_controller.stop_storage_service(node)
with allure.step("Delete blobovnicza and fstree from all nodes"):
for node in self.cluster.storage_nodes:
node.delete_blobovnicza()
node.delete_fstree()
with allure.step("Start all storage nodes"):
cluster_state_controller.start_stopped_storage_services()
with allure.step("Delete the object from the bucket"):
s3_client.delete_object(bucket, file_name)
with allure.step("Delete bucket"):
s3_client.delete_bucket(bucket)
@pytest.mark.skip(reason="Need to increase cache lifetime")
@pytest.mark.parametrize(
# versioning should NOT be VersioningStatus.SUSPENDED, it needs to be undefined
"versioning_status",
[VersioningStatus.ENABLED, None],
)
@allure.title(
"After Pilorama.db loss on all nodes list objects should return nothing in second listing"
)
def test_s3_pilorama_loss(
self,
s3_client: S3ClientWrapper,
simple_object_size: int,
versioning_status: VersioningStatus,
cluster_state_controller: ClusterStateController,
):
bucket = s3_client.create_bucket()
if versioning_status:
s3_helper.set_bucket_versioning(s3_client, bucket, versioning_status)
file_path = generate_file(simple_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
with allure.step("Put object into one bucket"):
s3_client.put_object(bucket, file_path)
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
with allure.step("Stop all storage nodes"):
for node in self.cluster.cluster_nodes:
with allure.step(f"Stop storage service on node: {node}"):
cluster_state_controller.stop_storage_service(node)
with allure.step("Delete pilorama.db from all nodes"):
for node in self.cluster.storage_nodes:
node.delete_pilorama()
with allure.step("Start all storage nodes"):
cluster_state_controller.start_stopped_storage_services()
with allure.step("Check list objects first time"):
objects_list = s3_client.list_objects(bucket)
assert objects_list, f"Expected not empty bucket"
with allure.step("Check list objects second time"):
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with allure.step("Delete bucket"):
s3_client.delete_bucket(bucket)
@pytest.mark.failover
@pytest.mark.failover_data_loss
class TestStorageDataLoss(ClusterTestBase):
@allure.step("Get list of all piloramas on node")
def get_piloramas_list(self, cluster_state_controller, node) -> list:
data_directory_path = cluster_state_controller.get_data_directory()
cmd = f"sudo ls -1 {data_directory_path}/meta*/pilorama*"
shell = cluster_state_controller.host.get_shell()
stdout = shell.exec(cmd).stdout
piloramas = stdout.split("\n")
return piloramas
@allure.title(
"After metabase loss on all nodes operations on objects and buckets should be still available via S3"
)
@pytest.mark.metabase_loss
def test_metabase_loss(
self,
s3_client: S3ClientWrapper,
simple_object_size: int,
complex_object_size: int,
cluster_state_controller: ClusterStateController,
after_run_return_all_stopped_services: str,
file_keeper: FileKeeper,
):
allure.dynamic.description(after_run_return_all_stopped_services)
with allure.step("Create bucket"):
bucket = s3_client.create_bucket()
with allure.step("Put objects into bucket"):
simple_object_path = generate_file(simple_object_size)
simple_object_key = s3_helper.object_key_from_file_path(simple_object_path)
complex_object_path = generate_file(complex_object_size)
complex_object_key = s3_helper.object_key_from_file_path(complex_object_path)
s3_client.put_object(bucket, simple_object_path)
s3_client.put_object(bucket, complex_object_path)
with allure.step("Check objects are in bucket"):
s3_helper.check_objects_in_bucket(
s3_client, bucket, expected_objects=[simple_object_key, complex_object_key]
)
with allure.step("Stop storage services on all nodes"):
cluster_state_controller.stop_all_storage_services()
with allure.step("Delete metabase from all nodes"):
for node in cluster_state_controller.cluster.storage_nodes:
node.delete_metabase()
with allure.step("Enable resync_metabase option for storage services"):
for storage_node in cluster_state_controller.cluster.storage_nodes:
with allure.step(f"Enable resync_metabase option for {storage_node}"):
config_file_path, config = storage_node.get_config()
if not config["storage"]["shard"]["default"]["resync_metabase"]:
file_keeper.add(storage_node, config_file_path)
config["storage"]["shard"]["default"]["resync_metabase"] = True
storage_node.save_config(config)
with allure.step("Start storage services on all nodes"):
cluster_state_controller.start_stopped_storage_services()
with allure.step("Delete objects from bucket"):
with allure.step("Delete simple object from bucket"):
with expect_not_raises():
s3_client.delete_object(bucket, simple_object_key)
with allure.step("Delete complex object from bucket"):
with expect_not_raises():
s3_client.delete_object(bucket, complex_object_key)
with allure.step("Delete bucket"):
with expect_not_raises():
s3_client.delete_bucket(bucket)
@allure.title(
"Write cache loss on one node should not affect shards and should not produce errors in log"
)
@pytest.mark.write_cache_loss
def test_write_cache_loss_on_one_node(
self,
node_under_test: ClusterNode,
simple_object_size: int,
cluster_state_controller: ClusterStateController,
shards_watcher: ShardsWatcher,
default_wallet: str,
test_start_time: datetime,
after_run_return_all_stopped_services: str,
):
exception_messages = []
allure.dynamic.description(after_run_return_all_stopped_services)
with allure.step(f"Create container on node {node_under_test}"):
locode = node_under_test.storage_node.get_un_locode()
placement_rule = f"""REP 1 IN X
CBF 1
SELECT 1 FROM C AS X
FILTER 'UN-LOCODE' EQ '{locode}' AS C"""
cid = create_container(
default_wallet,
self.shell,
node_under_test.storage_node.get_rpc_endpoint(),
rule=placement_rule,
)
container = StorageContainer(
StorageContainerInfo(cid, WalletInfo(default_wallet)),
self.shell,
cluster_state_controller.cluster,
)
with allure.step(f"Put couple objects to container on node {node_under_test}"):
storage_objects: list[StorageObjectInfo] = []
for _ in range(5):
storage_object = container.generate_object(
simple_object_size, endpoint=node_under_test.storage_node.get_rpc_endpoint()
)
storage_objects.append(storage_object)
with allure.step("Take shards snapshot"):
shards_watcher.take_shards_snapshot()
with allure.step(f"Stop storage service on node {node_under_test}"):
cluster_state_controller.stop_storage_service(node_under_test)
with allure.step(f"Delete write cache from node {node_under_test}"):
node_under_test.storage_node.delete_write_cache()
with allure.step(f"Start storage service on node {node_under_test}"):
cluster_state_controller.start_storage_service(node_under_test)
with allure.step("Objects should be available"):
for storage_object in storage_objects:
get_object(
storage_object.wallet_file_path,
container.get_id(),
storage_object.oid,
self.shell,
node_under_test.storage_node.get_rpc_endpoint(),
)
with allure.step("No shards should have new errors"):
shards_watcher.take_shards_snapshot()
shards_with_errors = shards_watcher.get_shards_with_new_errors()
if shards_with_errors:
exception_messages.append(f"Shards have new errors: {shards_with_errors}")
with allure.step("No shards should have degraded status"):
snapshot = shards_watcher.get_shards_snapshot()
for shard in snapshot:
status = snapshot[shard]["mode"]
if status != "read-write":
exception_messages.append(f"Shard {shard} changed status to {status}")
with allure.step("No related errors should be in log"):
if node_under_test.host.is_message_in_logs(
message_regex=r"\Wno such file or directory\W", since=test_start_time
):
exception_messages.append(f"Node {node_under_test} have shard errors in logs")
with allure.step("Pass test if no errors found"):
assert not exception_messages, "\n".join(exception_messages)
@allure.title(
"Test S3 Loss of one node should trigger use of tree and storage service in another node"
)
def test_s3_one_endpoint_loss(
self,
bucket,
s3_client: S3ClientWrapper,
simple_object_size: int,
after_run_return_all_stopped_services,
cluster_state_controller: ClusterStateController,
):
# TODO: need to check that s3 gate is connected to localhost (such metric will be supported in 1.3)
with allure.step(
"Stop one node and wait for rebalance connection of s3 gate to storage service"
):
current_node = self.cluster.cluster_nodes[0]
cluster_state_controller.stop_storage_service(current_node)
# waiting for rebalance connection of s3 gate to storage service
sleep(60)
file_path = generate_file(simple_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
with allure.step("Put object into one bucket"):
put_object = s3_client.put_object(bucket, file_path)
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
@allure.title("After Pilorama.db loss on one node object are retrievable")
def test_s3_one_pilorama_loss(
self,
s3_client: S3ClientWrapper,
simple_object_size: int,
cluster_state_controller: ClusterStateController,
):
bucket = s3_client.create_bucket(
location_constraint="load-1-4",
grant_read="uri=http://acs.amazonaws.com/groups/global/AllUsers",
)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with allure.step("Check bucket versioning"):
bucket_versioning = s3_client.get_bucket_versioning_status(bucket)
assert bucket_versioning == "Enabled", "Bucket should have enabled versioning"
file_path = generate_file(simple_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
object_versions = []
with allure.step("Put object into one bucket"):
put_object = s3_client.put_object(bucket, file_path)
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
object_versions.append(put_object)
node_to_check = self.cluster.storage_nodes[0]
piloramas_list_before_removing = {}
with allure.step("Get list of all pilorama.db"):
piloramas_list_before_removing = self.get_piloramas_list(
node_to_check, cluster_state_controller
)
with allure.step("Stop all storage nodes"):
for node in self.cluster.cluster_nodes:
with allure.step(f"Stop storage service on node: {node}"):
cluster_state_controller.stop_storage_service(node)
with allure.step("Delete pilorama.db from one node"):
node_to_check.delete_pilorama()
with allure.step("Start all storage nodes"):
cluster_state_controller.start_stopped_storage_services()
with allure.step("Tick epoch to trigger sync and then wait for 1 minute"):
self.tick_epochs(1)
sleep(120)
piloramas_list_afrer_removing = {}
with allure.step("Get list of all pilorama.db after sync"):
piloramas_list_afrer_removing = self.get_piloramas_list(
node_to_check, cluster_state_controller
)
assert (
piloramas_list_afrer_removing == piloramas_list_before_removing
), "List of pilorama.db is different"
with allure.step("Check bucket versioning"):
bucket_versioning = s3_client.get_bucket_versioning_status(bucket)
assert bucket_versioning == "Enabled", "Bucket should have enabled versioning"
with allure.step("Check list objects"):
objects_list = s3_client.list_objects(bucket)
assert objects_list, f"Expected not empty bucket"
with allure.step("Delete the object from the bucket"):
delete_object = s3_client.delete_object(bucket, file_name)
assert "DeleteMarker" in delete_object.keys(), "Delete markers not found"
with allure.step("Check list objects"):
objects_list = s3_client.list_objects_versions(bucket)
assert objects_list, f"Expected not empty bucket"
object_versions.append(delete_object["VersionId"])
# and now delete all versions of object (including Delete Markers)
with allure.step("Delete all versions of the object from the bucket"):
for version in object_versions:
delete_object = s3_client.delete_object(bucket, file_name, version_id=version)
with allure.step("Check list objects"):
objects_list = s3_client.list_objects_versions(bucket)
assert not objects_list, f"Expected empty bucket"
with allure.step("Delete bucket"):
s3_client.delete_bucket(bucket)

View file

@ -1,513 +0,0 @@
import logging
import random
from time import sleep
from typing import Optional, Tuple
import allure
import pytest
from frostfs_testlib.resources.common import FROSTFS_CONTRACT_CACHE_TIMEOUT, MORPH_BLOCK_TIME
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.cli.container import create_container, get_container
from frostfs_testlib.steps.cli.object import (
delete_object,
get_object,
get_object_from_random_node,
head_object,
put_object,
put_object_to_random_node,
)
from frostfs_testlib.steps.epoch import tick_epoch
from frostfs_testlib.steps.node_management import (
check_node_in_map,
delete_node_data,
drop_object,
exclude_node_from_network_map,
get_locode_from_random_node,
include_node_to_network_map,
node_shard_list,
node_shard_set_mode,
storage_node_healthcheck,
storage_node_set_status,
wait_for_node_to_be_ready,
)
from frostfs_testlib.steps.storage_policy import get_nodes_with_object, get_simple_object_copies
from frostfs_testlib.storage.cluster import StorageNode
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils import datetime_utils, string_utils
from frostfs_testlib.utils.failover_utils import wait_object_replication
from frostfs_testlib.utils.file_utils import generate_file
from pytest_tests.helpers.utility import (
placement_policy_from_container,
wait_for_gc_pass_on_storage_nodes,
)
logger = logging.getLogger("NeoLogger")
check_nodes: list[StorageNode] = []
@allure.title("Add one node to cluster")
@pytest.mark.add_nodes
@pytest.mark.node_mgmt
class TestNodeManagement(ClusterTestBase):
@pytest.fixture
@allure.title("Create container and pick the node with data")
def create_container_and_pick_node(
self, default_wallet: str, simple_object_size
) -> Tuple[str, StorageNode]:
file_path = generate_file(simple_object_size)
placement_rule = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
endpoint = self.cluster.default_rpc_endpoint
cid = create_container(
default_wallet,
shell=self.shell,
endpoint=endpoint,
rule=placement_rule,
basic_acl=PUBLIC_ACL,
)
oid = put_object_to_random_node(default_wallet, file_path, cid, self.shell, self.cluster)
nodes = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
assert len(nodes) == 1
node = nodes[0]
yield cid, node
shards = node_shard_list(node)
assert shards
for shard in shards:
node_shard_set_mode(node, shard, "read-write")
node_shard_list(node)
@allure.step("Tick epoch with retries")
def tick_epoch_with_retries(self, attempts: int = 3, timeout: int = 3):
for attempt in range(attempts):
try:
self.tick_epoch()
except RuntimeError:
sleep(timeout)
if attempt >= attempts - 1:
raise
continue
return
@pytest.fixture
def after_run_start_all_nodes(self):
yield
self.return_nodes()
@pytest.fixture
def return_nodes_after_test_run(self):
yield
self.return_nodes()
@allure.step("Return node to cluster")
def return_nodes(self, alive_node: Optional[StorageNode] = None) -> None:
for node in list(check_nodes):
with allure.step(f"Start node {node}"):
node.start_service()
with allure.step(f"Waiting status ready for node {node}"):
wait_for_node_to_be_ready(node)
# We need to wait for node to establish notifications from morph-chain
# Otherwise it will hang up when we will try to set status
sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
with allure.step(f"Move node {node} to online state"):
storage_node_set_status(node, status="online", retries=2)
check_nodes.remove(node)
sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
self.tick_epoch_with_retries(3)
check_node_in_map(node, shell=self.shell, alive_node=alive_node)
@allure.title("Add one node to cluster")
@pytest.mark.add_nodes
def test_add_nodes(
self,
default_wallet,
return_nodes_after_test_run,
simple_object_size,
):
"""
This test remove one node from frostfs_testlib.storage.cluster then add it back. Test uses base control operations with storage nodes (healthcheck, netmap-snapshot, set-status).
"""
wallet = default_wallet
placement_rule_3 = "REP 3 IN X CBF 1 SELECT 3 FROM * AS X"
placement_rule_4 = "REP 4 IN X CBF 1 SELECT 4 FROM * AS X"
source_file_path = generate_file(simple_object_size)
storage_nodes = self.cluster.storage_nodes
random_node = random.choice(storage_nodes[1:])
alive_node = random.choice(
[storage_node for storage_node in storage_nodes if storage_node.id != random_node.id]
)
check_node_in_map(random_node, shell=self.shell, alive_node=alive_node)
# Add node to recovery list before messing with it
check_nodes.append(random_node)
exclude_node_from_network_map(
random_node, alive_node, shell=self.shell, cluster=self.cluster
)
delete_node_data(random_node)
cid = create_container(
wallet,
rule=placement_rule_3,
basic_acl=PUBLIC_ACL,
shell=self.shell,
endpoint=alive_node.get_rpc_endpoint(),
)
oid = put_object(
wallet,
source_file_path,
cid,
shell=self.shell,
endpoint=alive_node.get_rpc_endpoint(),
)
wait_object_replication(cid, oid, 3, shell=self.shell, nodes=storage_nodes)
self.return_nodes(alive_node)
with allure.step("Check data could be replicated to new node"):
random_node = random.choice(list(set(storage_nodes) - {random_node, alive_node}))
# Add node to recovery list before messing with it
check_nodes.append(random_node)
exclude_node_from_network_map(
random_node, alive_node, shell=self.shell, cluster=self.cluster
)
wait_object_replication(
cid,
oid,
3,
shell=self.shell,
nodes=list(set(storage_nodes) - {random_node}),
)
include_node_to_network_map(
random_node, alive_node, shell=self.shell, cluster=self.cluster
)
wait_object_replication(cid, oid, 3, shell=self.shell, nodes=storage_nodes)
with allure.step("Check container could be created with new node"):
cid = create_container(
wallet,
rule=placement_rule_4,
basic_acl=PUBLIC_ACL,
shell=self.shell,
endpoint=alive_node.get_rpc_endpoint(),
)
oid = put_object(
wallet,
source_file_path,
cid,
shell=self.shell,
endpoint=alive_node.get_rpc_endpoint(),
)
wait_object_replication(cid, oid, 4, shell=self.shell, nodes=storage_nodes)
@pytest.mark.parametrize(
"placement_rule,expected_copies",
[
("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", 2),
("REP 2 IN X CBF 1 SELECT 2 FROM * AS X", 2),
("REP 3 IN X CBF 1 SELECT 3 FROM * AS X", 3),
("REP 1 IN X CBF 1 SELECT 1 FROM * AS X", 1),
("REP 1 IN X CBF 2 SELECT 1 FROM * AS X", 1),
("REP 4 IN X CBF 1 SELECT 4 FROM * AS X", 4),
("REP 2 IN X CBF 1 SELECT 4 FROM * AS X", 2),
],
)
@pytest.mark.node_mgmt
@allure.title("Test object copies based on placement policy")
def test_placement_policy(
self, default_wallet, placement_rule, expected_copies, simple_object_size
):
"""
This test checks object's copies based on container's placement policy.
"""
wallet = default_wallet
file_path = generate_file(simple_object_size)
self.validate_object_copies(wallet, placement_rule, file_path, expected_copies)
@pytest.mark.parametrize(
"placement_rule,expected_copies,expected_nodes_id",
[
("REP 4 IN X CBF 1 SELECT 4 FROM * AS X", 4, {1, 2, 3, 4}),
(
"REP 1 IN LOC_PLACE CBF 1 SELECT 1 FROM LOC_SW AS LOC_PLACE FILTER Country EQ Sweden AS LOC_SW",
1,
{3},
),
(
"REP 1 CBF 1 SELECT 1 FROM LOC_SPB FILTER 'UN-LOCODE' EQ 'RU LED' AS LOC_SPB",
1,
{2},
),
(
"REP 1 IN LOC_SPB_PLACE REP 1 IN LOC_MSK_PLACE CBF 1 SELECT 1 FROM LOC_SPB AS LOC_SPB_PLACE "
"SELECT 1 FROM LOC_MSK AS LOC_MSK_PLACE "
"FILTER 'UN-LOCODE' EQ 'RU LED' AS LOC_SPB FILTER 'UN-LOCODE' EQ 'RU MOW' AS LOC_MSK",
2,
{1, 2},
),
(
"REP 4 CBF 1 SELECT 4 FROM LOC_EU FILTER Continent EQ Europe AS LOC_EU",
4,
{1, 2, 3, 4},
),
(
"REP 1 CBF 1 SELECT 1 FROM LOC_SPB "
"FILTER 'UN-LOCODE' NE 'RU MOW' AND 'UN-LOCODE' NE 'SE STO' AND 'UN-LOCODE' NE 'FI HEL' AS LOC_SPB",
1,
{2},
),
(
"REP 2 CBF 1 SELECT 2 FROM LOC_RU FILTER SubDivCode NE 'AB' AND SubDivCode NE '18' AS LOC_RU",
2,
{1, 2},
),
(
"REP 2 CBF 1 SELECT 2 FROM LOC_RU FILTER Country EQ 'Russia' AS LOC_RU",
2,
{1, 2},
),
(
"REP 2 CBF 1 SELECT 2 FROM LOC_EU FILTER Country NE 'Russia' AS LOC_EU",
2,
{3, 4},
),
],
)
@pytest.mark.node_mgmt
@allure.title("Test object copies and storage nodes based on placement policy")
def test_placement_policy_with_nodes(
self,
default_wallet,
placement_rule,
expected_copies,
expected_nodes_id: set[int],
simple_object_size,
):
"""
Based on container's placement policy check that storage nodes are piked correctly and object has
correct copies amount.
"""
wallet = default_wallet
file_path = generate_file(simple_object_size)
cid, oid, found_nodes = self.validate_object_copies(
wallet, placement_rule, file_path, expected_copies
)
assert (
found_nodes == expected_nodes_id
), f"Expected nodes {expected_nodes_id}, got {found_nodes}"
@pytest.mark.parametrize(
"placement_rule,expected_copies",
[
("REP 2 IN X CBF 2 SELECT 6 FROM * AS X", 2),
],
)
@pytest.mark.node_mgmt
@allure.title("Negative cases for placement policy")
def test_placement_policy_negative(
self, default_wallet, placement_rule, expected_copies, simple_object_size
):
"""
Negative test for placement policy.
"""
wallet = default_wallet
file_path = generate_file(simple_object_size)
with pytest.raises(RuntimeError, match=".*not enough nodes to SELECT from.*"):
self.validate_object_copies(wallet, placement_rule, file_path, expected_copies)
@pytest.mark.node_mgmt
@allure.title("FrostFS object could be dropped using control command")
def test_drop_object(self, default_wallet, complex_object_size: int, simple_object_size: int):
"""
Test checks object could be dropped using `frostfs-cli control drop-objects` command.
"""
wallet = default_wallet
endpoint = self.cluster.default_rpc_endpoint
file_path_simple, file_path_complex = generate_file(simple_object_size), generate_file(
complex_object_size
)
locode = get_locode_from_random_node(self.cluster)
rule = f"REP 1 CBF 1 SELECT 1 FROM * FILTER 'UN-LOCODE' EQ '{locode}' AS LOC"
cid = create_container(wallet, rule=rule, shell=self.shell, endpoint=endpoint)
oid_simple = put_object_to_random_node(
wallet, file_path_simple, cid, shell=self.shell, cluster=self.cluster
)
oid_complex = put_object_to_random_node(
wallet, file_path_complex, cid, shell=self.shell, cluster=self.cluster
)
for oid in (oid_simple, oid_complex):
get_object_from_random_node(wallet, cid, oid, shell=self.shell, cluster=self.cluster)
head_object(wallet, cid, oid, shell=self.shell, endpoint=endpoint)
nodes_with_object = get_nodes_with_object(
cid, oid_simple, shell=self.shell, nodes=self.cluster.storage_nodes
)
random_node = random.choice(nodes_with_object)
for oid in (oid_simple, oid_complex):
with allure.step(f"Drop object {oid}"):
get_object_from_random_node(
wallet, cid, oid, shell=self.shell, cluster=self.cluster
)
head_object(wallet, cid, oid, shell=self.shell, endpoint=endpoint)
drop_object(random_node, cid, oid)
self.wait_for_obj_dropped(wallet, cid, oid, endpoint, get_object)
self.wait_for_obj_dropped(wallet, cid, oid, endpoint, head_object)
@pytest.mark.node_mgmt
@pytest.mark.skip(reason="Need to clarify scenario")
@allure.title("Control Operations with storage nodes")
def test_shards(
self,
default_wallet,
create_container_and_pick_node,
simple_object_size,
):
wallet = default_wallet
file_path = generate_file(simple_object_size)
cid, node = create_container_and_pick_node
original_oid = put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster)
# for mode in ('read-only', 'degraded'):
for mode in ("degraded",):
shards = node_shard_list(node)
assert shards
for shard in shards:
node_shard_set_mode(node, shard, mode)
shards = node_shard_list(node)
assert shards
with pytest.raises(RuntimeError):
put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster)
with pytest.raises(RuntimeError):
delete_object(
wallet, cid, original_oid, self.shell, self.cluster.default_rpc_endpoint
)
get_object_from_random_node(wallet, cid, original_oid, self.shell, self.cluster)
for shard in shards:
node_shard_set_mode(node, shard, "read-write")
shards = node_shard_list(node)
assert shards
oid = put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster)
delete_object(wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
@pytest.mark.node_mgmt
@allure.title("Put object with stopped node")
def test_stop_node(self, default_wallet, return_nodes_after_test_run, simple_object_size: int):
wallet = default_wallet
placement_rule = "REP 3 SELECT 4 FROM * AS X"
source_file_path = generate_file(simple_object_size)
storage_nodes = self.cluster.storage_nodes
random_node = random.choice(storage_nodes[1:])
alive_node = random.choice(
[storage_node for storage_node in storage_nodes if storage_node.id != random_node.id]
)
cid = create_container(
wallet,
rule=placement_rule,
basic_acl=PUBLIC_ACL,
shell=self.shell,
endpoint=random_node.get_rpc_endpoint(),
)
with allure.step("Stop the random node"):
check_nodes.append(random_node)
random_node.stop_service()
with allure.step("Try to put an object and expect success"):
put_object(
wallet,
source_file_path,
cid,
shell=self.shell,
endpoint=alive_node.get_rpc_endpoint(),
)
self.return_nodes(alive_node)
@allure.step("Validate object has {expected_copies} copies")
def validate_object_copies(
self, wallet: str, placement_rule: str, file_path: str, expected_copies: int
) -> set[int]:
endpoint = self.cluster.default_rpc_endpoint
cid = create_container(
wallet, rule=placement_rule, basic_acl=PUBLIC_ACL, shell=self.shell, endpoint=endpoint
)
got_policy = placement_policy_from_container(
get_container(wallet, cid, json_mode=False, shell=self.shell, endpoint=endpoint)
)
assert got_policy == placement_rule.replace(
"'", ""
), f"Expected \n{placement_rule} and got policy \n{got_policy} are the same"
oid = put_object_to_random_node(
wallet, file_path, cid, shell=self.shell, cluster=self.cluster
)
nodes = get_nodes_with_object(cid, oid, shell=self.shell, nodes=self.cluster.storage_nodes)
nodes_id = {node.id for node in nodes}
assert len(nodes) == expected_copies, f"Expected {expected_copies} copies, got {len(nodes)}"
return cid, oid, nodes_id
@allure.step("Wait for node {node} goes online")
def wait_for_node_go_online(self, node: StorageNode) -> None:
timeout, attempts = 5, 20
for _ in range(attempts):
try:
health_check = storage_node_healthcheck(node)
assert (
health_check.health_status == "READY"
and health_check.network_status == "ONLINE"
)
return
except Exception as err:
logger.warning(f"Node {node} is not online:\n{err}")
sleep(timeout)
raise AssertionError(
f"Node {node} hasn't gone to the READY and ONLINE state after {timeout * attempts} second"
)
@allure.step("Wait for {expected_copies} object copies in the wallet")
def wait_for_expected_object_copies(
self, wallet: str, cid: str, oid: str, expected_copies: int = 2
) -> None:
nodes = self.cluster.storage_nodes
for _ in range(2):
copies = get_simple_object_copies(wallet, cid, oid, self.shell, nodes)
if copies == expected_copies:
break
tick_epoch(self.shell, self.cluster)
sleep(datetime_utils.parse_time(FROSTFS_CONTRACT_CACHE_TIMEOUT))
else:
raise AssertionError(f"There are no {expected_copies} copies during time")
@allure.step("Wait for object to be dropped")
def wait_for_obj_dropped(self, wallet: str, cid: str, oid: str, endpoint: str, checker) -> None:
for _ in range(3):
try:
checker(wallet, cid, oid, shell=self.shell, endpoint=endpoint)
wait_for_gc_pass_on_storage_nodes()
except Exception as err:
if string_utils.is_str_match_pattern(err, OBJECT_NOT_FOUND):
return
raise AssertionError(f'Expected "{OBJECT_NOT_FOUND}" error, got\n{err}')
raise AssertionError(f"Object {oid} was not dropped from node")

576
pytest_tests/testsuites/object/test_object_api.py Executable file → Normal file
View file

@ -1,535 +1,95 @@
import logging
import random
import sys
from time import sleep
import allure
import pytest
from frostfs_testlib.resources.error_patterns import (
INVALID_LENGTH_SPECIFIER,
INVALID_OFFSET_SPECIFIER,
INVALID_RANGE_OVERFLOW,
INVALID_RANGE_ZERO_LENGTH,
OUT_OF_RANGE,
)
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import (
get_object_from_random_node,
get_range,
get_range_hash,
head_object,
put_object_to_random_node,
search_object,
)
from frostfs_testlib.steps.complex_object_actions import get_complex_object_split_ranges
from frostfs_testlib.steps.storage_object import delete_objects
from frostfs_testlib.steps.storage_policy import get_complex_object_copies, get_simple_object_copies
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file, get_file_content, get_file_hash
from pytest import FixtureRequest
from container import create_container
from epoch import tick_epoch
from tombstone import verify_head_tombstone
from python_keywords.neofs_verbs import (delete_object, get_object, get_range,
get_range_hash, head_object,
put_object, search_object)
from python_keywords.storage_policy import get_simple_object_copies
from python_keywords.utility_keywords import generate_file, get_file_hash
logger = logging.getLogger("NeoLogger")
logger = logging.getLogger('NeoLogger')
CLEANUP_TIMEOUT = 10
COMMON_ATTRIBUTE = {"common_key": "common_value"}
# Will upload object for each attribute set
OBJECT_ATTRIBUTES = [
None,
{"key1": 1, "key2": "abc", "common_key": "common_value"},
{"key1": 2, "common_key": "common_value"},
]
# Config for Range tests
RANGES_COUNT = 4 # by quarters
RANGE_MIN_LEN = 10
RANGE_MAX_LEN = 500
# Used for static ranges found with issues
STATIC_RANGES = {}
def generate_ranges(
storage_object: StorageObjectInfo, max_object_size: int, shell: Shell, cluster: Cluster
) -> list[(int, int)]:
file_range_step = storage_object.size / RANGES_COUNT
file_ranges = []
file_ranges_to_test = []
for i in range(0, RANGES_COUNT):
file_ranges.append((int(file_range_step * i), int(file_range_step)))
# For simple object we can read all file ranges without too much time for testing
if storage_object.size < max_object_size:
file_ranges_to_test.extend(file_ranges)
# For complex object we need to fetch multiple child objects from different nodes.
else:
assert (
storage_object.size >= RANGE_MAX_LEN + max_object_size
), f"Complex object size should be at least {max_object_size + RANGE_MAX_LEN}. Current: {storage_object.size}"
file_ranges_to_test.append((RANGE_MAX_LEN, max_object_size - RANGE_MAX_LEN))
file_ranges_to_test.extend(get_complex_object_split_ranges(storage_object, shell, cluster))
# Special cases to read some bytes from start and some bytes from end of object
file_ranges_to_test.append((0, RANGE_MIN_LEN))
file_ranges_to_test.append((storage_object.size - RANGE_MIN_LEN, RANGE_MIN_LEN))
for offset, length in file_ranges:
range_length = random.randint(RANGE_MIN_LEN, RANGE_MAX_LEN)
range_start = random.randint(offset, offset + length)
file_ranges_to_test.append(
(range_start, min(range_length, storage_object.size - range_start))
)
file_ranges_to_test.extend(STATIC_RANGES.get(storage_object.size, []))
return file_ranges_to_test
@pytest.fixture(
params=[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
# Scope session to upload/delete each files set only once
scope="module",
)
def storage_objects(
default_wallet: str, client_shell: Shell, cluster: Cluster, request: FixtureRequest
) -> list[StorageObjectInfo]:
wallet = default_wallet
# Separate containers for complex/simple objects to avoid side-effects
cid = create_container(wallet, shell=client_shell, endpoint=cluster.default_rpc_endpoint)
file_path = generate_file(request.param)
file_hash = get_file_hash(file_path)
storage_objects = []
with allure.step("Put objects"):
# We need to upload objects multiple times with different attributes
for attributes in OBJECT_ATTRIBUTES:
storage_object_id = put_object_to_random_node(
wallet=wallet,
path=file_path,
cid=cid,
shell=client_shell,
cluster=cluster,
attributes=attributes,
)
storage_object = StorageObjectInfo(cid, storage_object_id)
storage_object.size = request.param
storage_object.wallet_file_path = wallet
storage_object.file_path = file_path
storage_object.file_hash = file_hash
storage_object.attributes = attributes
storage_objects.append(storage_object)
yield storage_objects
# Teardown after all tests done with current param
delete_objects(storage_objects, client_shell, cluster)
@allure.title('Test native object API')
@pytest.mark.sanity
@pytest.mark.grpc_api
class TestObjectApi(ClusterTestBase):
@allure.title("Validate object storage policy by native API")
def test_object_storage_policies(
self, request: FixtureRequest, storage_objects: list[StorageObjectInfo], simple_object_size
):
"""
Validate object storage policy
"""
allure.dynamic.title(
f"Validate object storage policy by native API for {request.node.callspec.id}"
)
def test_object_api(prepare_wallet_and_deposit):
wallet = prepare_wallet_and_deposit
cid = create_container(wallet)
wallet_cid = {'wallet': wallet, 'cid': cid}
file_usr_header = {'key1': 1, 'key2': 'abc'}
file_usr_header_oth = {'key1': 2}
range_cut = '0:10'
oids = []
with allure.step("Validate storage policy for objects"):
for storage_object in storage_objects:
if storage_object.size == simple_object_size:
copies = get_simple_object_copies(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
)
else:
copies = get_complex_object_copies(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
)
assert copies == 2, "Expected 2 copies"
file_path = generate_file()
file_hash = get_file_hash(file_path)
@allure.title("Validate get object native API")
def test_get_object_api(
self, request: FixtureRequest, storage_objects: list[StorageObjectInfo]
):
"""
Validate get object native API
"""
allure.dynamic.title(f"Validate get object native API for {request.node.callspec.id}")
search_object(**wallet_cid, expected_objects_list=oids)
with allure.step("Get objects and compare hashes"):
for storage_object in storage_objects:
file_path = get_object_from_random_node(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
self.shell,
cluster=self.cluster,
)
file_hash = get_file_hash(file_path)
assert storage_object.file_hash == file_hash
with allure.step('Put objects'):
oids.append(put_object(wallet=wallet, path=file_path, cid=cid))
oids.append(put_object(wallet=wallet, path=file_path, cid=cid, user_headers=file_usr_header))
oids.append(put_object(wallet=wallet, path=file_path, cid=cid, user_headers=file_usr_header_oth))
@allure.title("Validate head object native API")
def test_head_object_api(
self, request: FixtureRequest, storage_objects: list[StorageObjectInfo]
):
"""
Validate head object native API
"""
allure.dynamic.title(f"Validate head object by native API for {request.node.callspec.id}")
with allure.step('Validate storage policy for objects'):
for oid_to_check in oids:
assert get_simple_object_copies(wallet=wallet, cid=cid, oid=oid_to_check) == 2, 'Expected 2 copies'
storage_object_1 = storage_objects[0]
storage_object_2 = storage_objects[1]
with allure.step('Get objects and compare hashes'):
for oid_to_check in oids:
got_file_path = get_object(wallet=wallet, cid=cid, oid=oid_to_check)
got_file_hash = get_file_hash(got_file_path)
assert file_hash == got_file_hash
with allure.step("Head object and validate"):
head_object(
storage_object_1.wallet_file_path,
storage_object_1.cid,
storage_object_1.oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
head_info = head_object(
storage_object_2.wallet_file_path,
storage_object_2.cid,
storage_object_2.oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
self.check_header_is_presented(head_info, storage_object_2.attributes)
with allure.step('Get range/range hash'):
get_range_hash(**wallet_cid, oid=oids[0], bearer_token='', range_cut=range_cut)
get_range_hash(**wallet_cid, oid=oids[1], bearer_token='', range_cut=range_cut)
get_range(**wallet_cid, oid=oids[1], bearer='', range_cut=range_cut)
@allure.title("Validate object search by native API")
def test_search_object_api(
self, request: FixtureRequest, storage_objects: list[StorageObjectInfo]
):
"""
Validate object search by native API
"""
allure.dynamic.title(f"Validate object search by native API for {request.node.callspec.id}")
with allure.step('Search objects'):
search_object(**wallet_cid, expected_objects_list=oids)
search_object(**wallet_cid, filters=file_usr_header, expected_objects_list=oids[1:2])
search_object(**wallet_cid, filters=file_usr_header_oth, expected_objects_list=oids[2:3])
oids = [storage_object.oid for storage_object in storage_objects]
wallet = storage_objects[0].wallet_file_path
cid = storage_objects[0].cid
with allure.step('Head object and validate'):
head_object(**wallet_cid, oid=oids[0])
head_info = head_object(**wallet_cid, oid=oids[1])
check_header_is_presented(head_info, file_usr_header)
test_table = [
(OBJECT_ATTRIBUTES[1], oids[1:2]),
(OBJECT_ATTRIBUTES[2], oids[2:3]),
(COMMON_ATTRIBUTE, oids[1:3]),
]
with allure.step('Delete objects'):
tombstone_s = delete_object(**wallet_cid, oid=oids[0])
tombstone_h = delete_object(**wallet_cid, oid=oids[1])
with allure.step("Search objects"):
# Search with no attributes
result = search_object(
wallet,
cid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
expected_objects_list=oids,
root=True,
)
assert sorted(oids) == sorted(result)
verify_head_tombstone(wallet_path=wallet, cid=cid, oid_ts=tombstone_s, oid=oids[0])
verify_head_tombstone(wallet_path=wallet, cid=cid, oid_ts=tombstone_h, oid=oids[1])
# search by test table
for filter, expected_oids in test_table:
result = search_object(
wallet,
cid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
filters=filter,
expected_objects_list=expected_oids,
root=True,
)
assert sorted(expected_oids) == sorted(result)
tick_epoch()
sleep(CLEANUP_TIMEOUT)
@allure.title("Validate object search with removed items")
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
def test_object_search_should_return_tombstone_items(
self, default_wallet: str, request: FixtureRequest, object_size: int
):
"""
Validate object search with removed items
"""
allure.dynamic.title(
f"Validate object search with removed items for {request.node.callspec.id}"
)
with allure.step('Get objects and check errors'):
get_object_and_check_error(**wallet_cid, oid=oids[0], err_msg='object already removed')
get_object_and_check_error(**wallet_cid, oid=oids[1], err_msg='object already removed')
wallet = default_wallet
cid = create_container(wallet, self.shell, self.cluster.default_rpc_endpoint)
with allure.step("Upload file"):
file_path = generate_file(object_size)
file_hash = get_file_hash(file_path)
def get_object_and_check_error(wallet: str, cid: str, oid: str, err_msg: str):
try:
get_object(wallet=wallet, cid=cid, oid=oid)
raise AssertionError(f'Expected object {oid} removed, but it is not')
except Exception as err:
logger.info(f'Error is {err}')
assert err_msg in str(err), f'Expected message {err_msg} in error: {err}'
storage_object = StorageObjectInfo(
cid=cid,
oid=put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster),
size=object_size,
wallet_file_path=wallet,
file_path=file_path,
file_hash=file_hash,
)
with allure.step("Search object"):
# Root Search object should return root object oid
result = search_object(
wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, root=True
)
assert result == [storage_object.oid]
with allure.step("Delete file"):
delete_objects([storage_object], self.shell, self.cluster)
with allure.step("Search deleted object with --root"):
# Root Search object should return nothing
result = search_object(
wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, root=True
)
assert len(result) == 0
with allure.step("Search deleted object with --phy should return only tombstones"):
# Physical Search object should return only tombstones
result = search_object(
wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, phy=True
)
assert (
storage_object.tombstone in result
), "Search result should contain tombstone of removed object"
assert (
storage_object.oid not in result
), "Search result should not contain ObjectId of removed object"
for tombstone_oid in result:
header = head_object(
wallet,
cid,
tombstone_oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)["header"]
object_type = header["objectType"]
assert (
object_type == "TOMBSTONE"
), f"Object wasn't deleted properly. Found object {tombstone_oid} with type {object_type}"
@allure.title("Validate native object API get_range_hash")
@pytest.mark.sanity
@pytest.mark.grpc_api
def test_object_get_range_hash(
self, request: FixtureRequest, storage_objects: list[StorageObjectInfo], max_object_size
):
"""
Validate get_range_hash for object by native gRPC API
"""
allure.dynamic.title(
f"Validate native get_range_hash object API for {request.node.callspec.id}"
)
wallet = storage_objects[0].wallet_file_path
cid = storage_objects[0].cid
oids = [storage_object.oid for storage_object in storage_objects[:2]]
file_path = storage_objects[0].file_path
file_ranges_to_test = generate_ranges(
storage_objects[0], max_object_size, self.shell, self.cluster
)
logging.info(f"Ranges used in test {file_ranges_to_test}")
for range_start, range_len in file_ranges_to_test:
range_cut = f"{range_start}:{range_len}"
with allure.step(f"Get range hash ({range_cut})"):
for oid in oids:
range_hash = get_range_hash(
wallet,
cid,
oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
range_cut=range_cut,
)
assert (
get_file_hash(file_path, range_len, range_start) == range_hash
), f"Expected range hash to match {range_cut} slice of file payload"
@allure.title("Validate native object API get_range")
@pytest.mark.sanity
@pytest.mark.grpc_api
def test_object_get_range(
self, request: FixtureRequest, storage_objects: list[StorageObjectInfo], max_object_size
):
"""
Validate get_range for object by native gRPC API
"""
allure.dynamic.title(f"Validate native get_range object API for {request.node.callspec.id}")
wallet = storage_objects[0].wallet_file_path
cid = storage_objects[0].cid
oids = [storage_object.oid for storage_object in storage_objects[:2]]
file_path = storage_objects[0].file_path
file_ranges_to_test = generate_ranges(
storage_objects[0], max_object_size, self.shell, self.cluster
)
logging.info(f"Ranges used in test {file_ranges_to_test}")
for range_start, range_len in file_ranges_to_test:
range_cut = f"{range_start}:{range_len}"
with allure.step(f"Get range ({range_cut})"):
for oid in oids:
_, range_content = get_range(
wallet,
cid,
oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
range_cut=range_cut,
)
assert (
get_file_content(
file_path, content_len=range_len, mode="rb", offset=range_start
)
== range_content
), f"Expected range content to match {range_cut} slice of file payload"
@allure.title("Validate native object API get_range negative cases")
@pytest.mark.sanity
@pytest.mark.grpc_api
def test_object_get_range_negatives(
self,
request: FixtureRequest,
storage_objects: list[StorageObjectInfo],
):
"""
Validate get_range negative for object by native gRPC API
"""
allure.dynamic.title(
f"Validate native get_range negative object API for {request.node.callspec.id}"
)
wallet = storage_objects[0].wallet_file_path
cid = storage_objects[0].cid
oids = [storage_object.oid for storage_object in storage_objects[:2]]
file_size = storage_objects[0].size
assert (
RANGE_MIN_LEN < file_size
), f"Incorrect test setup. File size ({file_size}) is less than RANGE_MIN_LEN ({RANGE_MIN_LEN})"
file_ranges_to_test: list[tuple(int, int, str)] = [
# Offset is bigger than the file size, the length is small.
(file_size + 1, RANGE_MIN_LEN, OUT_OF_RANGE),
# Offset is ok, but offset+length is too big.
(file_size - RANGE_MIN_LEN, RANGE_MIN_LEN * 2, OUT_OF_RANGE),
# Offset is ok, and length is very-very big (e.g. MaxUint64) so that offset+length is wrapped and still "valid".
(RANGE_MIN_LEN, sys.maxsize * 2 + 1, INVALID_RANGE_OVERFLOW),
# Length is zero
(10, 0, INVALID_RANGE_ZERO_LENGTH),
# Negative values
(-1, 1, INVALID_OFFSET_SPECIFIER),
(10, -5, INVALID_LENGTH_SPECIFIER),
]
for range_start, range_len, expected_error in file_ranges_to_test:
range_cut = f"{range_start}:{range_len}"
expected_error = (
expected_error.format(range=range_cut)
if "{range}" in expected_error
else expected_error
)
with allure.step(f"Get range ({range_cut})"):
for oid in oids:
with pytest.raises(Exception, match=expected_error):
get_range(
wallet,
cid,
oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
range_cut=range_cut,
)
@allure.title("Validate native object API get_range_hash negative cases")
def test_object_get_range_hash_negatives(
self,
request: FixtureRequest,
storage_objects: list[StorageObjectInfo],
):
"""
Validate get_range_hash negative for object by native gRPC API
"""
allure.dynamic.title(
f"Validate native get_range_hash negative object API for {request.node.callspec.id}"
)
wallet = storage_objects[0].wallet_file_path
cid = storage_objects[0].cid
oids = [storage_object.oid for storage_object in storage_objects[:2]]
file_size = storage_objects[0].size
assert (
RANGE_MIN_LEN < file_size
), f"Incorrect test setup. File size ({file_size}) is less than RANGE_MIN_LEN ({RANGE_MIN_LEN})"
file_ranges_to_test: list[tuple(int, int, str)] = [
# Offset is bigger than the file size, the length is small.
(file_size + 1, RANGE_MIN_LEN, OUT_OF_RANGE),
# Offset is ok, but offset+length is too big.
(file_size - RANGE_MIN_LEN, RANGE_MIN_LEN * 2, OUT_OF_RANGE),
# Offset is ok, and length is very-very big (e.g. MaxUint64) so that offset+length is wrapped and still "valid".
(RANGE_MIN_LEN, sys.maxsize * 2 + 1, INVALID_RANGE_OVERFLOW),
# Length is zero
(10, 0, INVALID_RANGE_ZERO_LENGTH),
# Negative values
(-1, 1, INVALID_OFFSET_SPECIFIER),
(10, -5, INVALID_LENGTH_SPECIFIER),
]
for range_start, range_len, expected_error in file_ranges_to_test:
range_cut = f"{range_start}:{range_len}"
expected_error = (
expected_error.format(range=range_cut)
if "{range}" in expected_error
else expected_error
)
with allure.step(f"Get range hash ({range_cut})"):
for oid in oids:
with pytest.raises(Exception, match=expected_error):
get_range_hash(
wallet,
cid,
oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
range_cut=range_cut,
)
def check_header_is_presented(self, head_info: dict, object_header: dict) -> None:
for key_to_check, val_to_check in object_header.items():
assert (
key_to_check in head_info["header"]["attributes"]
), f"Key {key_to_check} is found in {head_object}"
assert head_info["header"]["attributes"].get(key_to_check) == str(
val_to_check
), f"Value {val_to_check} is equal"
def check_header_is_presented(head_info: dict, object_header: dict):
for key_to_check, val_to_check in object_header.items():
assert key_to_check in head_info['header']['attributes'], f'Key {key_to_check} is found in {head_object}'
assert head_info['header']['attributes'].get(key_to_check) == str(
val_to_check), f'Value {val_to_check} is equal'

View file

@ -1,163 +0,0 @@
import allure
import pytest
from frostfs_testlib.resources.wellknown_acl import EACL_PUBLIC_READ_WRITE
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.acl import form_bearertoken_file
from frostfs_testlib.steps.cli.container import (
REP_2_FOR_3_NODES_PLACEMENT_RULE,
SINGLE_PLACEMENT_RULE,
StorageContainer,
StorageContainerInfo,
create_container,
)
from frostfs_testlib.steps.cli.object import delete_object, get_object
from frostfs_testlib.steps.epoch import get_epoch
from frostfs_testlib.steps.storage_object import StorageObjectInfo
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.acl import EACLAccess, EACLOperation, EACLRole, EACLRule
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises
from pytest import FixtureRequest
@pytest.fixture(scope="module")
@allure.title("Create bearer token for OTHERS with all operations allowed for all containers")
def bearer_token_file_all_allow(default_wallet: str, client_shell: Shell, cluster: Cluster) -> str:
bearer = form_bearertoken_file(
default_wallet,
"",
[
EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.OTHERS)
for op in EACLOperation
],
shell=client_shell,
endpoint=cluster.default_rpc_endpoint,
)
return bearer
@pytest.fixture(scope="module")
@allure.title("Create user container for bearer token usage")
def user_container(
default_wallet: str, client_shell: Shell, cluster: Cluster, request: FixtureRequest
) -> StorageContainer:
container_id = create_container(
default_wallet,
shell=client_shell,
rule=request.param,
basic_acl=EACL_PUBLIC_READ_WRITE,
endpoint=cluster.default_rpc_endpoint,
)
# Deliberately using s3gate wallet here to test bearer token
s3gate = cluster.s3_gates[0]
return StorageContainer(
StorageContainerInfo(container_id, WalletInfo.from_node(s3gate)),
client_shell,
cluster,
)
@pytest.fixture()
def storage_objects(
user_container: StorageContainer,
bearer_token_file_all_allow: str,
request: FixtureRequest,
client_shell: Shell,
cluster: Cluster,
) -> list[StorageObjectInfo]:
epoch = get_epoch(client_shell, cluster)
storage_objects: list[StorageObjectInfo] = []
for node in cluster.storage_nodes:
storage_objects.append(
user_container.generate_object(
request.param,
epoch + 3,
bearer_token=bearer_token_file_all_allow,
endpoint=node.get_rpc_endpoint(),
)
)
return storage_objects
@pytest.mark.smoke
@pytest.mark.bearer
class TestObjectApiWithBearerToken(ClusterTestBase):
@pytest.mark.parametrize(
"user_container",
[SINGLE_PLACEMENT_RULE],
ids=["single replica for all nodes placement rule"],
indirect=True,
)
@pytest.mark.parametrize(
"storage_objects",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
indirect=True,
)
def test_delete_object_with_s3_wallet_bearer(
self,
storage_objects: list[StorageObjectInfo],
bearer_token_file_all_allow: str,
request: FixtureRequest,
):
allure.dynamic.title(
f"Object can be deleted from any node using s3gate wallet with bearer token for {request.node.callspec.id}"
)
s3_gate_wallet = self.cluster.s3_gates[0]
with allure.step("Try to delete each object from first storage node"):
for storage_object in storage_objects:
with expect_not_raises():
delete_object(
s3_gate_wallet.get_wallet_path(),
storage_object.cid,
storage_object.oid,
self.shell,
endpoint=self.cluster.default_rpc_endpoint,
bearer=bearer_token_file_all_allow,
wallet_config=s3_gate_wallet.get_wallet_config_path(),
)
@pytest.mark.parametrize(
"user_container",
[REP_2_FOR_3_NODES_PLACEMENT_RULE],
ids=["2 replicas for 3 nodes placement rule"],
indirect=True,
)
@pytest.mark.parametrize(
"file_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
def test_get_object_with_s3_wallet_bearer_from_all_nodes(
self,
user_container: StorageContainer,
file_size: int,
bearer_token_file_all_allow: str,
request: FixtureRequest,
):
allure.dynamic.title(
f"Object can be fetched from any node using s3gate wallet with bearer token for {request.node.callspec.id}"
)
s3_gate_wallet = self.cluster.s3_gates[0]
with allure.step("Put one object to container"):
epoch = self.get_epoch()
storage_object = user_container.generate_object(
file_size, epoch + 3, bearer_token=bearer_token_file_all_allow
)
with allure.step("Try to fetch object from each storage node"):
for node in self.cluster.storage_nodes:
with expect_not_raises():
get_object(
s3_gate_wallet.get_wallet_path(),
storage_object.cid,
storage_object.oid,
self.shell,
endpoint=node.get_rpc_endpoint(),
bearer=bearer_token_file_all_allow,
wallet_config=s3_gate_wallet.get_wallet_config_path(),
)

View file

@ -1,72 +0,0 @@
import logging
import allure
import pytest
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import get_object_from_random_node, put_object_to_random_node, head_object
from frostfs_testlib.steps.epoch import get_epoch
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
from pytest import FixtureRequest
from pytest_tests.helpers.utility import wait_for_gc_pass_on_storage_nodes
logger = logging.getLogger("NeoLogger")
@pytest.mark.sanity
@pytest.mark.grpc_api
class TestObjectApiLifetime(ClusterTestBase):
@allure.title("Test object life time")
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
def test_object_api_lifetime(
self, default_wallet: str, request: FixtureRequest, object_size: int
):
"""
Test object deleted after expiration epoch.
"""
allure.dynamic.title(f"Test object life time for {request.node.callspec.id}")
wallet = default_wallet
endpoint = self.cluster.default_rpc_endpoint
cid = create_container(wallet, self.shell, endpoint)
file_path = generate_file(object_size)
file_hash = get_file_hash(file_path)
epoch = get_epoch(self.shell, self.cluster)
oid = put_object_to_random_node(
wallet, file_path, cid, self.shell, self.cluster, expire_at=epoch + 1
)
got_file = get_object_from_random_node(wallet, cid, oid, self.shell, self.cluster)
assert get_file_hash(got_file) == file_hash
with allure.step("Tick two epochs"):
for _ in range(2):
self.tick_epoch()
# Wait for GC, because object with expiration is counted as alive until GC removes it
wait_for_gc_pass_on_storage_nodes()
with allure.step("Check object deleted because it expires on epoch"):
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
head_object(wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_object_from_random_node(wallet, cid, oid, self.shell, self.cluster)
with allure.step("Tick additional epoch"):
self.tick_epoch()
wait_for_gc_pass_on_storage_nodes()
with allure.step("Check object deleted because it expires on previous epoch"):
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
head_object(wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_object_from_random_node(wallet, cid, oid, self.shell, self.cluster)

View file

@ -1,665 +0,0 @@
import logging
import re
import allure
import pytest
from frostfs_testlib.resources.common import STORAGE_GC_TIME
from frostfs_testlib.resources.error_patterns import (
LIFETIME_REQUIRED,
LOCK_NON_REGULAR_OBJECT,
LOCK_OBJECT_EXPIRATION,
LOCK_OBJECT_REMOVAL,
OBJECT_ALREADY_REMOVED,
OBJECT_IS_LOCKED,
OBJECT_NOT_FOUND,
)
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import (
StorageContainer,
StorageContainerInfo,
create_container,
)
from frostfs_testlib.steps.cli.object import delete_object, head_object, lock_object
from frostfs_testlib.steps.complex_object_actions import get_link_object, get_storage_object_chunks
from frostfs_testlib.steps.epoch import ensure_fresh_epoch, get_epoch, tick_epoch
from frostfs_testlib.steps.node_management import drop_object
from frostfs_testlib.steps.storage_object import delete_objects
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.storage_object_info import (
LockObjectInfo,
StorageObjectInfo,
)
from frostfs_testlib.storage.dataclasses.wallet import WalletFactory, WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises, wait_for_success
from frostfs_testlib.utils import datetime_utils
from pytest import FixtureRequest
from pytest_tests.helpers.utility import wait_for_gc_pass_on_storage_nodes
logger = logging.getLogger("NeoLogger")
FIXTURE_LOCK_LIFETIME = 5
FIXTURE_OBJECT_LIFETIME = 10
@pytest.fixture(
scope="module",
)
def user_wallet(wallet_factory: WalletFactory):
with allure.step("Create user wallet with container"):
wallet_file = wallet_factory.create_wallet()
return wallet_file
@pytest.fixture(
scope="module",
)
def user_container(user_wallet: WalletInfo, client_shell: Shell, cluster: Cluster):
container_id = create_container(
user_wallet.path, shell=client_shell, endpoint=cluster.default_rpc_endpoint
)
return StorageContainer(StorageContainerInfo(container_id, user_wallet), client_shell, cluster)
@pytest.fixture(
scope="module",
)
def locked_storage_object(
user_container: StorageContainer,
client_shell: Shell,
cluster: Cluster,
request: FixtureRequest,
):
"""
Intention of this fixture is to provide storage object which is NOT expected to be deleted during test act phase
"""
with allure.step("Creating locked object"):
current_epoch = ensure_fresh_epoch(client_shell, cluster)
expiration_epoch = current_epoch + FIXTURE_LOCK_LIFETIME
storage_object = user_container.generate_object(
request.param, expire_at=current_epoch + FIXTURE_OBJECT_LIFETIME
)
lock_object_id = lock_object(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
client_shell,
cluster.default_rpc_endpoint,
lifetime=FIXTURE_LOCK_LIFETIME,
)
storage_object.locks = [
LockObjectInfo(
storage_object.cid, lock_object_id, FIXTURE_LOCK_LIFETIME, expiration_epoch
)
]
yield storage_object
with allure.step("Delete created locked object"):
current_epoch = get_epoch(client_shell, cluster)
epoch_diff = expiration_epoch - current_epoch + 1
if epoch_diff > 0:
with allure.step(f"Tick {epoch_diff} epochs"):
for _ in range(epoch_diff):
tick_epoch(client_shell, cluster)
try:
delete_object(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
client_shell,
cluster.default_rpc_endpoint,
)
except Exception as ex:
ex_message = str(ex)
# It's okay if object already removed
if not re.search(OBJECT_NOT_FOUND, ex_message) and not re.search(
OBJECT_ALREADY_REMOVED, ex_message
):
raise ex
logger.debug(ex_message)
@pytest.mark.sanity
@pytest.mark.grpc_object_lock
class TestObjectLockWithGrpc(ClusterTestBase):
@pytest.fixture()
def new_locked_storage_object(
self, user_container: StorageContainer, request: FixtureRequest
) -> StorageObjectInfo:
"""
Intention of this fixture is to provide new storage object for tests which may delete or corrupt the object or it's complementary objects
So we need a new one each time we ask for it
"""
with allure.step("Creating locked object"):
current_epoch = self.get_epoch()
storage_object = user_container.generate_object(
request.param, expire_at=current_epoch + FIXTURE_OBJECT_LIFETIME
)
lock_object(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
lifetime=FIXTURE_LOCK_LIFETIME,
)
return storage_object
@allure.title("Locked object should be protected from deletion")
@pytest.mark.parametrize(
"locked_storage_object",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
indirect=True,
)
def test_locked_object_cannot_be_deleted(
self,
request: FixtureRequest,
locked_storage_object: StorageObjectInfo,
):
"""
Locked object should be protected from deletion
"""
allure.dynamic.title(
f"Locked object should be protected from deletion for {request.node.callspec.id}"
)
with pytest.raises(Exception, match=OBJECT_IS_LOCKED):
delete_object(
locked_storage_object.wallet_file_path,
locked_storage_object.cid,
locked_storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
)
@allure.title("Lock object itself should be protected from deletion")
# We operate with only lock object here so no complex object needed in this test
@pytest.mark.parametrize(
"locked_storage_object", [pytest.lazy_fixture("simple_object_size")], indirect=True
)
def test_lock_object_itself_cannot_be_deleted(
self,
locked_storage_object: StorageObjectInfo,
):
"""
Lock object itself should be protected from deletion
"""
lock_object = locked_storage_object.locks[0]
wallet_path = locked_storage_object.wallet_file_path
with pytest.raises(Exception, match=LOCK_OBJECT_REMOVAL):
delete_object(
wallet_path,
lock_object.cid,
lock_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
)
@allure.title("Lock object itself cannot be locked")
# We operate with only lock object here so no complex object needed in this test
@pytest.mark.parametrize(
"locked_storage_object", [pytest.lazy_fixture("simple_object_size")], indirect=True
)
def test_lock_object_cannot_be_locked(
self,
locked_storage_object: StorageObjectInfo,
):
"""
Lock object itself cannot be locked
"""
lock_object_info = locked_storage_object.locks[0]
wallet_path = locked_storage_object.wallet_file_path
with pytest.raises(Exception, match=LOCK_NON_REGULAR_OBJECT):
lock_object(
wallet_path,
lock_object_info.cid,
lock_object_info.oid,
self.shell,
self.cluster.default_rpc_endpoint,
1,
)
@allure.title("Cannot lock object without lifetime and expire_at fields")
# We operate with only lock object here so no complex object needed in this test
@pytest.mark.parametrize(
"locked_storage_object", [pytest.lazy_fixture("simple_object_size")], indirect=True
)
@pytest.mark.parametrize(
"wrong_lifetime,wrong_expire_at,expected_error",
[
(None, None, LIFETIME_REQUIRED),
(0, 0, LIFETIME_REQUIRED),
(0, None, LIFETIME_REQUIRED),
(None, 0, LIFETIME_REQUIRED),
(-1, None, 'invalid argument "-1" for "--lifetime" flag'),
(None, -1, 'invalid argument "-1" for "-e, --expire-at" flag'),
],
)
def test_cannot_lock_object_without_lifetime(
self,
locked_storage_object: StorageObjectInfo,
wrong_lifetime: int,
wrong_expire_at: int,
expected_error: str,
):
"""
Cannot lock object without lifetime and expire_at fields
"""
allure.dynamic.title(
f"Cannot lock object without lifetime and expire_at fields: (lifetime={wrong_lifetime}, expire-at={wrong_expire_at})"
)
lock_object_info = locked_storage_object.locks[0]
wallet_path = locked_storage_object.wallet_file_path
with pytest.raises(Exception, match=expected_error):
lock_object(
wallet_path,
lock_object_info.cid,
lock_object_info.oid,
self.shell,
self.cluster.default_rpc_endpoint,
lifetime=wrong_lifetime,
expire_at=wrong_expire_at,
)
@allure.title("Expired object should be deleted after locks are expired")
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
def test_expired_object_should_be_deleted_after_locks_are_expired(
self,
request: FixtureRequest,
user_container: StorageContainer,
object_size: int,
):
"""
Expired object should be deleted after locks are expired
"""
allure.dynamic.title(
f"Expired object should be deleted after locks are expired for {request.node.callspec.id}"
)
current_epoch = self.ensure_fresh_epoch()
storage_object = user_container.generate_object(object_size, expire_at=current_epoch + 1)
with allure.step("Lock object for couple epochs"):
lock_object(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
lifetime=2,
)
lock_object(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
expire_at=current_epoch + 2,
)
with allure.step("Check object is not deleted at expiration time"):
self.tick_epochs(2)
# Must wait to ensure object is not deleted
wait_for_gc_pass_on_storage_nodes()
with expect_not_raises():
head_object(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
)
@wait_for_success(datetime_utils.parse_time(STORAGE_GC_TIME))
def check_object_not_found():
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
head_object(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
)
with allure.step("Wait for object to be deleted after third epoch"):
self.tick_epoch()
check_object_not_found()
@allure.title("Should be possible to lock multiple objects at once")
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
def test_should_be_possible_to_lock_multiple_objects_at_once(
self,
request: FixtureRequest,
user_container: StorageContainer,
object_size: int,
):
"""
Should be possible to lock multiple objects at once
"""
allure.dynamic.title(
f"Should be possible to lock multiple objects at once for {request.node.callspec.id}"
)
current_epoch = ensure_fresh_epoch(self.shell, self.cluster)
storage_objects: list[StorageObjectInfo] = []
with allure.step("Generate three objects"):
for _ in range(3):
storage_objects.append(
user_container.generate_object(object_size, expire_at=current_epoch + 5)
)
lock_object(
storage_objects[0].wallet_file_path,
storage_objects[0].cid,
",".join([storage_object.oid for storage_object in storage_objects]),
self.shell,
self.cluster.default_rpc_endpoint,
expire_at=current_epoch + 1,
)
for storage_object in storage_objects:
with allure.step(f"Try to delete object {storage_object.oid}"):
with pytest.raises(Exception, match=OBJECT_IS_LOCKED):
delete_object(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
)
with allure.step("Tick two epochs"):
self.tick_epoch()
self.tick_epoch()
with expect_not_raises():
delete_objects(storage_objects, self.shell, self.cluster)
@allure.title("Already outdated lock should not be applied")
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
def test_already_outdated_lock_should_not_be_applied(
self,
request: FixtureRequest,
user_container: StorageContainer,
object_size: int,
):
"""
Already outdated lock should not be applied
"""
allure.dynamic.title(
f"Already outdated lock should not be applied for {request.node.callspec.id}"
)
current_epoch = self.ensure_fresh_epoch()
storage_object = user_container.generate_object(object_size, expire_at=current_epoch + 1)
expiration_epoch = current_epoch - 1
with pytest.raises(
Exception,
match=LOCK_OBJECT_EXPIRATION.format(
expiration_epoch=expiration_epoch, current_epoch=current_epoch
),
):
lock_object(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
expire_at=expiration_epoch,
)
@allure.title("After lock expiration with lifetime user should be able to delete object")
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
@expect_not_raises()
def test_after_lock_expiration_with_lifetime_user_should_be_able_to_delete_object(
self,
request: FixtureRequest,
user_container: StorageContainer,
object_size: int,
):
"""
After lock expiration with lifetime user should be able to delete object
"""
allure.dynamic.title(
f"After lock expiration with lifetime user should be able to delete object for {request.node.callspec.id}"
)
current_epoch = self.ensure_fresh_epoch()
storage_object = user_container.generate_object(object_size, expire_at=current_epoch + 5)
lock_object(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
lifetime=1,
)
self.tick_epochs(2)
with expect_not_raises():
delete_object(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
)
@allure.title("After lock expiration with expire_at user should be able to delete object")
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
@expect_not_raises()
def test_after_lock_expiration_with_expire_at_user_should_be_able_to_delete_object(
self,
request: FixtureRequest,
user_container: StorageContainer,
object_size: int,
):
"""
After lock expiration with expire_at user should be able to delete object
"""
allure.dynamic.title(
f"After lock expiration with expire_at user should be able to delete object for {request.node.callspec.id}"
)
current_epoch = self.ensure_fresh_epoch()
storage_object = user_container.generate_object(object_size, expire_at=current_epoch + 5)
lock_object(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
self.shell,
endpoint=self.cluster.default_rpc_endpoint,
expire_at=current_epoch + 1,
)
self.tick_epochs(2)
with expect_not_raises():
delete_object(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
)
@allure.title("Complex object chunks should also be protected from deletion")
@pytest.mark.parametrize(
# Only complex objects are required for this test
"locked_storage_object",
[pytest.lazy_fixture("complex_object_size")],
indirect=True,
)
def test_complex_object_chunks_should_also_be_protected_from_deletion(
self,
locked_storage_object: StorageObjectInfo,
):
"""
Complex object chunks should also be protected from deletion
"""
chunk_object_ids = get_storage_object_chunks(
locked_storage_object, self.shell, self.cluster
)
for chunk_object_id in chunk_object_ids:
with allure.step(f"Try to delete chunk object {chunk_object_id}"):
with pytest.raises(Exception, match=OBJECT_IS_LOCKED):
delete_object(
locked_storage_object.wallet_file_path,
locked_storage_object.cid,
chunk_object_id,
self.shell,
self.cluster.default_rpc_endpoint,
)
@allure.title("Link object of locked complex object can be dropped")
@pytest.mark.grpc_control
@pytest.mark.parametrize(
"new_locked_storage_object",
# Only complex object is required
[pytest.lazy_fixture("complex_object_size")],
indirect=True,
)
def test_link_object_of_locked_complex_object_can_be_dropped(
self, new_locked_storage_object: StorageObjectInfo
):
link_object_id = get_link_object(
new_locked_storage_object.wallet_file_path,
new_locked_storage_object.cid,
new_locked_storage_object.oid,
self.shell,
self.cluster.storage_nodes,
)
with allure.step(f"Drop link object with id {link_object_id} from nodes"):
nodes_with_object = get_nodes_with_object(
new_locked_storage_object.cid,
link_object_id,
shell=self.shell,
nodes=self.cluster.storage_nodes,
)
for node in nodes_with_object:
with expect_not_raises():
drop_object(node, new_locked_storage_object.cid, link_object_id)
@allure.title("Chunks of locked complex object can be dropped")
@pytest.mark.grpc_control
@pytest.mark.parametrize(
"new_locked_storage_object",
# Only complex object is required
[pytest.lazy_fixture("complex_object_size")],
indirect=True,
)
def test_chunks_of_locked_complex_object_can_be_dropped(
self, new_locked_storage_object: StorageObjectInfo
):
chunk_objects = get_storage_object_chunks(
new_locked_storage_object, self.shell, self.cluster
)
for chunk_object_id in chunk_objects:
with allure.step(f"Drop chunk object with id {chunk_object_id} from nodes"):
nodes_with_object = get_nodes_with_object(
new_locked_storage_object.cid,
chunk_object_id,
shell=self.shell,
nodes=self.cluster.storage_nodes,
)
for node in nodes_with_object:
with expect_not_raises():
drop_object(node, new_locked_storage_object.cid, chunk_object_id)
@pytest.mark.grpc_control
@pytest.mark.parametrize(
"new_locked_storage_object",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
indirect=True,
)
def test_locked_object_can_be_dropped(
self, new_locked_storage_object: StorageObjectInfo, request: FixtureRequest
):
allure.dynamic.title(f"Locked {request.node.callspec.id} can be dropped")
nodes_with_object = get_nodes_with_object(
new_locked_storage_object.cid,
new_locked_storage_object.oid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
)
for node in nodes_with_object:
with expect_not_raises():
drop_object(node, new_locked_storage_object.cid, new_locked_storage_object.oid)
@allure.title("Link object of complex object should also be protected from deletion")
@pytest.mark.parametrize(
# Only complex objects are required for this test
"locked_storage_object",
[pytest.lazy_fixture("complex_object_size")],
indirect=True,
)
def test_link_object_of_complex_object_should_also_be_protected_from_deletion(
self,
locked_storage_object: StorageObjectInfo,
):
"""
Link object of complex object should also be protected from deletion
"""
link_object_id = get_link_object(
locked_storage_object.wallet_file_path,
locked_storage_object.cid,
locked_storage_object.oid,
self.shell,
self.cluster.storage_nodes,
is_direct=False,
)
with allure.step(f"Try to delete link object {link_object_id}"):
with pytest.raises(Exception, match=OBJECT_IS_LOCKED):
delete_object(
locked_storage_object.wallet_file_path,
locked_storage_object.cid,
link_object_id,
self.shell,
self.cluster.default_rpc_endpoint,
)

View file

@ -1,129 +0,0 @@
import logging
import allure
import pytest
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.acl import (
bearer_token_base64_from_file,
create_eacl,
form_bearertoken_file,
set_eacl,
sign_bearer,
wait_for_cache_expired,
)
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.http.http_gate import upload_via_http_gate_curl, verify_object_hash
from frostfs_testlib.storage.dataclasses.acl import EACLAccess, EACLOperation, EACLRole, EACLRule
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
logger = logging.getLogger("NeoLogger")
@pytest.mark.skip("Temporarly skip for v0.37")
@pytest.mark.sanity
@pytest.mark.http_gate
class Test_http_bearer(ClusterTestBase):
PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
@pytest.fixture(scope="class", autouse=True)
@allure.title("[Class/Autouse]: Prepare wallet and deposit")
def prepare_wallet(self, default_wallet):
Test_http_bearer.wallet = default_wallet
@pytest.fixture(scope="class")
def user_container(self) -> str:
return create_container(
wallet=self.wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=self.PLACEMENT_RULE,
basic_acl=PUBLIC_ACL,
)
@pytest.fixture(scope="class")
def eacl_deny_for_others(self, user_container: str) -> None:
with allure.step(f"Set deny all operations for {EACLRole.OTHERS} via eACL"):
eacl = EACLRule(
access=EACLAccess.DENY, role=EACLRole.OTHERS, operation=EACLOperation.PUT
)
set_eacl(
self.wallet,
user_container,
create_eacl(user_container, eacl, shell=self.shell),
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
wait_for_cache_expired()
@pytest.fixture(scope="class")
def bearer_token_no_limit_for_others(self, user_container: str) -> str:
with allure.step(f"Create bearer token for {EACLRole.OTHERS} with all operations allowed"):
bearer = form_bearertoken_file(
self.wallet,
user_container,
[
EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.OTHERS)
for op in EACLOperation
],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
sign=False,
)
bearer_signed = f"{bearer}_signed"
sign_bearer(
shell=self.shell,
wallet_path=self.wallet,
eacl_rules_file_from=bearer,
eacl_rules_file_to=bearer_signed,
json=False,
)
return bearer_token_base64_from_file(bearer_signed)
@allure.title(f"[negative] Put object without bearer token for {EACLRole.OTHERS}")
def test_unable_put_without_bearer_token(
self, simple_object_size: int, user_container: str, eacl_deny_for_others
):
eacl_deny_for_others
upload_via_http_gate_curl(
cid=user_container,
filepath=generate_file(simple_object_size),
endpoint=self.cluster.default_http_gate_endpoint,
error_pattern="access to object operation denied",
)
@pytest.mark.skip("Temp disable for v0.37")
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
def test_put_with_bearer_when_eacl_restrict(
self,
object_size: int,
user_container: str,
eacl_deny_for_others,
bearer_token_no_limit_for_others: str,
):
eacl_deny_for_others
bearer = bearer_token_no_limit_for_others
file_path = generate_file(object_size)
with allure.step(
f"Put object with bearer token for {EACLRole.OTHERS}, then get and verify hashes"
):
headers = [f" -H 'Authorization: Bearer {bearer}'"]
oid = upload_via_http_gate_curl(
cid=user_container,
filepath=file_path,
endpoint=self.cluster.default_http_gate_endpoint,
headers=headers,
)
verify_object_hash(
oid=oid,
file_name=file_path,
wallet=self.wallet,
cid=user_container,
shell=self.shell,
nodes=self.cluster.storage_nodes,
endpoint=self.cluster.default_http_gate_endpoint,
)

View file

@ -1,401 +0,0 @@
import os
import allure
import pytest
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.steps.epoch import get_epoch
from frostfs_testlib.steps.http.http_gate import (
attr_into_header,
get_object_by_attr_and_verify_hashes,
get_via_http_curl,
get_via_http_gate,
get_via_zip_http_gate,
try_to_get_object_and_expect_error,
upload_via_http_gate,
upload_via_http_gate_curl,
verify_object_hash,
)
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
from pytest_tests.helpers.utility import wait_for_gc_pass_on_storage_nodes
OBJECT_NOT_FOUND_ERROR = "not found"
@allure.link(
"https://github.com/TrueCloudLab/frostfs-http-gw#frostfs-http-gateway",
name="frostfs-http-gateway",
)
@allure.link("https://github.com/TrueCloudLab/frostfs-http-gw#uploading", name="uploading")
@allure.link("https://github.com/TrueCloudLab/frostfs-http-gw#downloading", name="downloading")
@pytest.mark.sanity
@pytest.mark.http_gate
class TestHttpGate(ClusterTestBase):
PLACEMENT_RULE_1 = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
PLACEMENT_RULE_2 = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
@pytest.fixture(scope="class", autouse=True)
@allure.title("[Class/Autouse]: Prepare wallet and deposit")
def prepare_wallet(self, default_wallet):
TestHttpGate.wallet = default_wallet
@allure.title("Test Put over gRPC, Get over HTTP")
def test_put_grpc_get_http(self, complex_object_size: int, simple_object_size: int):
"""
Test that object can be put using gRPC interface and get using HTTP.
Steps:
1. Create simple and large objects.
2. Put objects using gRPC (frostfs-cli).
3. Download objects using HTTP gate (https://github.com/TrueCloudLab/frostfs-http-gw#downloading).
4. Get objects using gRPC (frostfs-cli).
5. Compare hashes for got objects.
6. Compare hashes for got and original objects.
Expected result:
Hashes must be the same.
"""
cid = create_container(
self.wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=self.PLACEMENT_RULE_1,
basic_acl=PUBLIC_ACL,
)
file_path_simple, file_path_large = generate_file(simple_object_size), generate_file(
complex_object_size
)
with allure.step("Put objects using gRPC"):
oid_simple = put_object_to_random_node(
wallet=self.wallet,
path=file_path_simple,
cid=cid,
shell=self.shell,
cluster=self.cluster,
)
oid_large = put_object_to_random_node(
wallet=self.wallet,
path=file_path_large,
cid=cid,
shell=self.shell,
cluster=self.cluster,
)
for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)):
verify_object_hash(
oid=oid,
file_name=file_path,
wallet=self.wallet,
cid=cid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname,
)
@allure.link("https://github.com/TrueCloudLab/frostfs-http-gw#uploading", name="uploading")
@allure.link("https://github.com/TrueCloudLab/frostfs-http-gw#downloading", name="downloading")
@allure.title("Test Put over HTTP, Get over HTTP")
@pytest.mark.skip("Skipped due to deprecated PUT via http")
@pytest.mark.smoke
def test_put_http_get_http(self, complex_object_size: int, simple_object_size: int):
"""
Test that object can be put and get using HTTP interface.
Steps:
1. Create simple and large objects.
2. Upload objects using HTTP (https://github.com/TrueCloudLab/frostfs-http-gw#uploading).
3. Download objects using HTTP gate (https://github.com/TrueCloudLab/frostfs-http-gw#downloading).
4. Compare hashes for got and original objects.
Expected result:
Hashes must be the same.
"""
cid = create_container(
self.wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=self.PLACEMENT_RULE_2,
basic_acl=PUBLIC_ACL,
)
file_path_simple, file_path_large = generate_file(simple_object_size), generate_file(
complex_object_size
)
with allure.step("Put objects using HTTP"):
oid_simple = upload_via_http_gate(
cid=cid, path=file_path_simple, endpoint=self.cluster.default_http_gate_endpoint
)
oid_large = upload_via_http_gate(
cid=cid, path=file_path_large, endpoint=self.cluster.default_http_gate_endpoint
)
for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)):
verify_object_hash(
oid=oid,
file_name=file_path,
wallet=self.wallet,
cid=cid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
endpoint=self.cluster.default_http_gate_endpoint,
hostname=self.cluster.default_http_hostname,
)
@allure.link(
"https://github.com/TrueCloudLab/frostfs-http-gw#by-attributes",
name="download by attributes",
)
@pytest.mark.skip("Skipped due to deprecated PUT via http")
@allure.title("Test Put over HTTP, Get over HTTP with headers")
@pytest.mark.parametrize(
"attributes",
[
{"fileName": "simple_obj_filename"},
{"file-Name": "simple obj filename"},
{"cat%jpeg": "cat%jpeg"},
],
ids=["simple", "hyphen", "percent"],
)
def test_put_http_get_http_with_headers(self, attributes: dict, simple_object_size: int):
"""
Test that object can be downloaded using different attributes in HTTP header.
Steps:
1. Create simple and large objects.
2. Upload objects using HTTP with particular attributes in the header.
3. Download objects by attributes using HTTP gate (https://github.com/TrueCloudLab/frostfs-http-gw#by-attributes).
4. Compare hashes for got and original objects.
Expected result:
Hashes must be the same.
"""
cid = create_container(
self.wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=self.PLACEMENT_RULE_2,
basic_acl=PUBLIC_ACL,
)
file_path = generate_file(simple_object_size)
with allure.step("Put objects using HTTP with attribute"):
headers = attr_into_header(attributes)
oid = upload_via_http_gate(
cid=cid,
path=file_path,
headers=headers,
endpoint=self.cluster.default_http_gate_endpoint,
)
get_object_by_attr_and_verify_hashes(
oid=oid,
file_name=file_path,
cid=cid,
attrs=attributes,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname,
)
@pytest.mark.skip("Skipped due to deprecated PUT via http")
@allure.title("Test Expiration-Epoch in HTTP header")
@pytest.mark.parametrize("epoch_gap", [0, 1])
def test_expiration_epoch_in_http(self, simple_object_size: int, epoch_gap):
endpoint = self.cluster.default_rpc_endpoint
http_endpoint = self.cluster.default_http_gate_endpoint
min_valid_epoch = get_epoch(self.shell, self.cluster) + epoch_gap
cid = create_container(
self.wallet,
shell=self.shell,
endpoint=endpoint,
rule=self.PLACEMENT_RULE_2,
basic_acl=PUBLIC_ACL,
)
file_path = generate_file(simple_object_size)
oids_to_be_expired = []
oids_to_be_valid = []
for gap_until in (0, 1, 2, 100):
valid_until = min_valid_epoch + gap_until
headers = {"X-Attribute-System-Expiration-Epoch": str(valid_until)}
with allure.step("Put objects using HTTP with attribute Expiration-Epoch"):
oid = upload_via_http_gate(
cid=cid,
path=file_path,
headers=headers,
endpoint=http_endpoint,
http_hostname=self.cluster.default_http_hostname,
)
if get_epoch(self.shell, self.cluster) + 1 <= valid_until:
oids_to_be_valid.append(oid)
else:
oids_to_be_expired.append(oid)
with allure.step("This object can be got"):
get_via_http_gate(
cid=cid,
oid=oid,
endpoint=http_endpoint,
http_hostname=self.cluster.default_http_hostname,
)
self.tick_epoch()
# Wait for GC, because object with expiration is counted as alive until GC removes it
wait_for_gc_pass_on_storage_nodes()
for oid in oids_to_be_expired:
with allure.step(f"{oid} shall be expired and cannot be got"):
try_to_get_object_and_expect_error(
cid=cid,
oid=oid,
error_pattern=OBJECT_NOT_FOUND_ERROR,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname,
)
for oid in oids_to_be_valid:
with allure.step(f"{oid} shall be valid and can be got"):
get_via_http_gate(
cid=cid,
oid=oid,
endpoint=http_endpoint,
http_hostname=self.cluster.default_http_hostname,
)
@pytest.mark.skip("Skipped due to deprecated PUT via http")
@allure.title("Test Zip in HTTP header")
def test_zip_in_http(self, complex_object_size: int, simple_object_size: int):
cid = create_container(
self.wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=self.PLACEMENT_RULE_2,
basic_acl=PUBLIC_ACL,
)
file_path_simple, file_path_large = generate_file(simple_object_size), generate_file(
complex_object_size
)
common_prefix = "my_files"
headers1 = {"X-Attribute-FilePath": f"{common_prefix}/file1"}
headers2 = {"X-Attribute-FilePath": f"{common_prefix}/file2"}
upload_via_http_gate(
cid=cid,
path=file_path_simple,
headers=headers1,
endpoint=self.cluster.default_http_gate_endpoint,
)
upload_via_http_gate(
cid=cid,
path=file_path_large,
headers=headers2,
endpoint=self.cluster.default_http_gate_endpoint,
)
dir_path = get_via_zip_http_gate(
cid=cid,
prefix=common_prefix,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname,
)
with allure.step("Verify hashes"):
assert get_file_hash(f"{dir_path}/file1") == get_file_hash(file_path_simple)
assert get_file_hash(f"{dir_path}/file2") == get_file_hash(file_path_large)
@pytest.mark.long
@pytest.mark.skip("Skipped due to deprecated PUT via http")
@allure.title("Test Put over HTTP/Curl, Get over HTTP/Curl for large object")
def test_put_http_get_http_large_file(self, complex_object_size: int):
"""
This test checks upload and download using curl with 'large' object.
Large is object with size up to 20Mb.
"""
cid = create_container(
self.wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=self.PLACEMENT_RULE_2,
basic_acl=PUBLIC_ACL,
)
obj_size = int(os.getenv("BIG_OBJ_SIZE", complex_object_size))
file_path = generate_file(obj_size)
with allure.step("Put objects using HTTP"):
oid_gate = upload_via_http_gate(
cid=cid, path=file_path, endpoint=self.cluster.default_http_gate_endpoint
)
oid_curl = upload_via_http_gate_curl(
cid=cid,
filepath=file_path,
endpoint=self.cluster.default_http_gate_endpoint,
)
verify_object_hash(
oid=oid_gate,
file_name=file_path,
wallet=self.wallet,
cid=cid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
endpoint=self.cluster.default_http_gate_endpoint,
hostname=self.cluster.default_http_hostname,
)
verify_object_hash(
oid=oid_curl,
file_name=file_path,
wallet=self.wallet,
cid=cid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname,
object_getter=get_via_http_curl,
)
@pytest.mark.skip("Skipped due to deprecated PUT via http")
@allure.title("Test Put/Get over HTTP using Curl utility")
def test_put_http_get_http_curl(self, complex_object_size: int, simple_object_size: int):
"""
Test checks upload and download over HTTP using curl utility.
"""
cid = create_container(
self.wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=self.PLACEMENT_RULE_2,
basic_acl=PUBLIC_ACL,
)
file_path_simple, file_path_large = generate_file(simple_object_size), generate_file(
complex_object_size
)
with allure.step("Put objects using curl utility"):
oid_simple = upload_via_http_gate_curl(
cid=cid, filepath=file_path_simple, endpoint=self.cluster.default_http_gate_endpoint
)
oid_large = upload_via_http_gate_curl(
cid=cid,
filepath=file_path_large,
endpoint=self.cluster.default_http_gate_endpoint,
)
for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)):
verify_object_hash(
oid=oid,
file_name=file_path,
wallet=self.wallet,
cid=cid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname,
object_getter=get_via_http_curl,
)

View file

@ -1,236 +0,0 @@
import logging
import os
import allure
import pytest
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.cli.container import (
create_container,
delete_container,
list_containers,
wait_for_container_deletion,
)
from frostfs_testlib.steps.cli.object import delete_object
from frostfs_testlib.steps.http.http_gate import (
attr_into_str_header_curl,
get_object_by_attr_and_verify_hashes,
try_to_get_object_and_expect_error,
try_to_get_object_via_passed_request_and_expect_error,
upload_via_http_gate_curl,
)
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
from pytest import FixtureRequest
OBJECT_ALREADY_REMOVED_ERROR = "object already removed"
logger = logging.getLogger("NeoLogger")
@pytest.mark.sanity
@pytest.mark.http_gate
class Test_http_headers(ClusterTestBase):
PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
obj1_keys = ["Writer", "Chapter1", "Chapter2"]
obj2_keys = ["Writer", "Ch@pter1", "chapter2"]
values = ["Leo Tolstoy", "peace", "w@r"]
OBJECT_ATTRIBUTES = [
{obj1_keys[0]: values[0], obj1_keys[1]: values[1], obj1_keys[2]: values[2]},
{obj2_keys[0]: values[0], obj2_keys[1]: values[1], obj2_keys[2]: values[2]},
]
@pytest.fixture(scope="class", autouse=True)
@allure.title("[Class/Autouse]: Prepare wallet and deposit")
def prepare_wallet(self, default_wallet):
Test_http_headers.wallet = default_wallet
@pytest.fixture(
params=[
pytest.lazy_fixture("simple_object_size"),
# TODO: Temp disable for v0.37
# pytest.lazy_fixture("complex_object_size"),
],
# TODO: Temp disable for v0.37
# ids=["simple object", "complex object"],
ids=["simple object"],
scope="class",
)
def storage_objects_with_attributes(self, request: FixtureRequest) -> list[StorageObjectInfo]:
storage_objects = []
wallet = self.wallet
cid = create_container(
wallet=self.wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=self.PLACEMENT_RULE,
basic_acl=PUBLIC_ACL,
)
file_path = generate_file(request.param)
for attributes in self.OBJECT_ATTRIBUTES:
storage_object_id = upload_via_http_gate_curl(
cid=cid,
filepath=file_path,
endpoint=self.cluster.default_http_gate_endpoint,
headers=attr_into_str_header_curl(attributes),
)
storage_object = StorageObjectInfo(cid, storage_object_id)
storage_object.size = os.path.getsize(file_path)
storage_object.wallet_file_path = wallet
storage_object.file_path = file_path
storage_object.attributes = attributes
storage_objects.append(storage_object)
yield storage_objects
@allure.title("Get object1 by attribute")
def test_object1_can_be_get_by_attr(
self, storage_objects_with_attributes: list[StorageObjectInfo]
):
"""
Test to get object#1 by attribute and comapre hashes
Steps:
1. Download object#1 with attributes [Chapter2=w@r] and compare hashes
"""
storage_object_1 = storage_objects_with_attributes[0]
with allure.step(
f'Download object#1 via wget with attributes Chapter2: {storage_object_1.attributes["Chapter2"]} and compare hashes'
):
get_object_by_attr_and_verify_hashes(
oid=storage_object_1.oid,
file_name=storage_object_1.file_path,
cid=storage_object_1.cid,
attrs={"Chapter2": storage_object_1.attributes["Chapter2"]},
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname,
)
@allure.title("Test get object2 with different attributes, then delete object2 and get object1")
def test_object2_can_be_get_by_attr(
self, storage_objects_with_attributes: list[StorageObjectInfo]
):
"""
Test to get object2 with different attributes, then delete object2 and get object1 using 1st attribute. Note: obj1 and obj2 have the same attribute#1,
and when obj2 is deleted you can get obj1 by 1st attribute
Steps:
1. Download object#2 with attributes [chapter2=w@r] and compare hashes
2. Download object#2 with attributes [Ch@pter1=peace] and compare hashes
3. Delete object#2
4. Download object#1 with attributes [Writer=Leo Tolstoy] and compare hashes
"""
storage_object_1 = storage_objects_with_attributes[0]
storage_object_2 = storage_objects_with_attributes[1]
with allure.step(
f'Download object#2 via wget with attributes [chapter2={storage_object_2.attributes["chapter2"]}] / [Ch@pter1={storage_object_2.attributes["Ch@pter1"]}] and compare hashes'
):
selected_attributes_object2 = [
{"chapter2": storage_object_2.attributes["chapter2"]},
{"Ch@pter1": storage_object_2.attributes["Ch@pter1"]},
]
for attributes in selected_attributes_object2:
get_object_by_attr_and_verify_hashes(
oid=storage_object_2.oid,
file_name=storage_object_2.file_path,
cid=storage_object_2.cid,
attrs=attributes,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname,
)
with allure.step("Delete object#2 and verify is the container deleted"):
delete_object(
wallet=self.wallet,
cid=storage_object_2.cid,
oid=storage_object_2.oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
try_to_get_object_and_expect_error(
cid=storage_object_2.cid,
oid=storage_object_2.oid,
error_pattern=OBJECT_ALREADY_REMOVED_ERROR,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname,
)
storage_objects_with_attributes.remove(storage_object_2)
with allure.step(
f'Download object#1 with attributes [Writer={storage_object_1.attributes["Writer"]}] and compare hashes'
):
key_value_pair = {"Writer": storage_object_1.attributes["Writer"]}
get_object_by_attr_and_verify_hashes(
oid=storage_object_1.oid,
file_name=storage_object_1.file_path,
cid=storage_object_1.cid,
attrs=key_value_pair,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname,
)
@allure.title("[Negative] Try to put object and get right after container is deleted")
@pytest.mark.skip("Skipped due to deprecated PUT via http")
def test_negative_put_and_get_object3(
self, storage_objects_with_attributes: list[StorageObjectInfo]
):
"""
Test to attempt to put object and try to download it right after the container has been deleted
Steps:
1. [Negative] Allocate and attempt to put object#3 via http with attributes: [Writer=Leo Tolstoy, Writer=peace, peace=peace]
Expected: "Error duplication of attributes detected"
2. Delete container
3. [Negative] Try to download object with attributes [peace=peace]
Expected: "HTTP request sent, awaiting response... 404 Not Found"
"""
storage_object_1 = storage_objects_with_attributes[0]
with allure.step(
"[Negative] Allocate and attemt to put object#3 via http with attributes: [Writer=Leo Tolstoy, Writer=peace, peace=peace]"
):
file_path_3 = generate_file(storage_object_1.size)
attrs_obj3 = {"Writer": "Leo Tolstoy", "peace": "peace"}
headers = attr_into_str_header_curl(attrs_obj3)
headers.append(" ".join(attr_into_str_header_curl({"Writer": "peace"})))
error_pattern = f"key duplication error: X-Attribute-Writer"
upload_via_http_gate_curl(
cid=storage_object_1.cid,
filepath=file_path_3,
endpoint=self.cluster.default_http_gate_endpoint,
headers=headers,
error_pattern=error_pattern,
)
with allure.step("Delete container and verify container deletion"):
delete_container(
wallet=self.wallet,
cid=storage_object_1.cid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
self.tick_epoch()
wait_for_container_deletion(
self.wallet,
storage_object_1.cid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
assert storage_object_1.cid not in list_containers(
self.wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
with allure.step(
"[Negative] Try to download (wget) object via wget with attributes [peace=peace]"
):
request = f"/get/{storage_object_1.cid}/peace/peace"
error_pattern = "404 Not Found"
try_to_get_object_via_passed_request_and_expect_error(
cid=storage_object_1.cid,
oid="",
error_pattern=error_pattern,
attrs=attrs_obj3,
http_request_path=request,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname,
)

View file

@ -1,130 +0,0 @@
import logging
import allure
import pytest
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.steps.http.http_gate import (
get_object_by_attr_and_verify_hashes,
try_to_get_object_via_passed_request_and_expect_error,
verify_object_hash,
)
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
logger = logging.getLogger("NeoLogger")
@pytest.mark.sanity
@pytest.mark.http_gate
class Test_http_object(ClusterTestBase):
PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
@pytest.fixture(scope="class", autouse=True)
@allure.title("[Class/Autouse]: Prepare wallet and deposit")
def prepare_wallet(self, default_wallet):
Test_http_object.wallet = default_wallet
@allure.title("Test Put over gRPC, Get over HTTP")
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
def test_object_put_get_attributes(self, object_size: int):
"""
Test that object can be put using gRPC interface and get using HTTP.
Steps:
1. Create object;
2. Put objects using gRPC (frostfs-cli) with attributes [--attributes chapter1=peace,chapter2=war];
3. Download object using HTTP gate (https://github.com/TrueCloudLab/frostfs-http-gw#downloading);
4. Compare hashes between original and downloaded object;
5. [Negative] Try to the get object with specified attributes and `get` request: [get/$CID/chapter1/peace];
6. Download the object with specified attributes and `get_by_attribute` request: [get_by_attribute/$CID/chapter1/peace];
7. Compare hashes between original and downloaded object;
8. [Negative] Try to the get object via `get_by_attribute` request: [get_by_attribute/$CID/$OID];
Expected result:
Hashes must be the same.
"""
with allure.step("Create public container"):
cid = create_container(
self.wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=self.PLACEMENT_RULE,
basic_acl=PUBLIC_ACL,
)
# Generate file
file_path = generate_file(object_size)
# List of Key=Value attributes
obj_key1 = "chapter1"
obj_value1 = "peace"
obj_key2 = "chapter2"
obj_value2 = "war"
# Prepare for grpc PUT request
key_value1 = obj_key1 + "=" + obj_value1
key_value2 = obj_key2 + "=" + obj_value2
with allure.step("Put objects using gRPC [--attributes chapter1=peace,chapter2=war]"):
oid = put_object_to_random_node(
wallet=self.wallet,
path=file_path,
cid=cid,
shell=self.shell,
cluster=self.cluster,
attributes=f"{key_value1},{key_value2}",
)
with allure.step("Get object and verify hashes [ get/$CID/$OID ]"):
verify_object_hash(
oid=oid,
file_name=file_path,
wallet=self.wallet,
cid=cid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname,
)
with allure.step("[Negative] try to get object: [get/$CID/chapter1/peace]"):
attrs = {obj_key1: obj_value1, obj_key2: obj_value2}
request = f"/get/{cid}/{obj_key1}/{obj_value1}"
expected_err_msg = "Failed to get object via HTTP gate:"
try_to_get_object_via_passed_request_and_expect_error(
cid=cid,
oid=oid,
error_pattern=expected_err_msg,
http_request_path=request,
attrs=attrs,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname,
)
with allure.step(
"Download the object with attribute [get_by_attribute/$CID/chapter1/peace]"
):
get_object_by_attr_and_verify_hashes(
oid=oid,
file_name=file_path,
cid=cid,
attrs=attrs,
endpoint=self.cluster.default_http_gate_endpoint,
hostname=self.cluster.default_http_hostname,
)
with allure.step("[Negative] try to get object: get_by_attribute/$CID/$OID"):
request = f"/get_by_attribute/{cid}/{oid}"
try_to_get_object_via_passed_request_and_expect_error(
cid=cid,
oid=oid,
error_pattern=expected_err_msg,
http_request_path=request,
endpoint=self.cluster.default_http_gate_endpoint,
hostname=self.cluster.default_http_hostname,
)

View file

@ -1,71 +0,0 @@
import logging
import allure
import pytest
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.http.http_gate import upload_via_http_gate_curl, verify_object_hash
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
logger = logging.getLogger("NeoLogger")
@pytest.mark.sanity
@pytest.mark.http_gate
class Test_http_streaming(ClusterTestBase):
PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
@pytest.fixture(scope="class", autouse=True)
@allure.title("[Class/Autouse]: Prepare wallet and deposit")
def prepare_wallet(self, default_wallet):
Test_http_streaming.wallet = default_wallet
@allure.title("Test Put via pipe (steaming), Get over HTTP and verify hashes")
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("complex_object_size")],
ids=["complex object"],
)
@pytest.mark.skip("Skipped due to deprecated PUT via http")
def test_object_can_be_put_get_by_streaming(self, object_size: int):
"""
Test that object can be put using gRPC interface and get using HTTP.
Steps:
1. Create big object;
2. Put object using curl with pipe (streaming);
3. Download object using HTTP gate (https://github.com/TrueCloudLab/frostfs-http-gw#downloading);
4. Compare hashes between original and downloaded object;
Expected result:
Hashes must be the same.
"""
with allure.step("Create public container and verify container creation"):
cid = create_container(
self.wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=self.PLACEMENT_RULE,
basic_acl=PUBLIC_ACL,
)
with allure.step("Allocate big object"):
# Generate file
file_path = generate_file(object_size)
with allure.step(
"Put objects using curl utility and Get object and verify hashes [ get/$CID/$OID ]"
):
oid = upload_via_http_gate_curl(
cid=cid, filepath=file_path, endpoint=self.cluster.default_http_gate_endpoint
)
verify_object_hash(
oid=oid,
file_name=file_path,
wallet=self.wallet,
cid=cid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
endpoint=self.cluster.default_http_gate_endpoint,
hostname=self.cluster.default_http_hostname,
)

View file

@ -1,420 +0,0 @@
import calendar
import datetime
import logging
from typing import Optional
import allure
import pytest
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import (
get_netmap_netinfo,
get_object_from_random_node,
head_object,
)
from frostfs_testlib.steps.epoch import get_epoch, wait_for_epochs_align
from frostfs_testlib.steps.http.http_gate import (
attr_into_str_header_curl,
try_to_get_object_and_expect_error,
upload_via_http_gate_curl,
verify_object_hash,
)
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
logger = logging.getLogger("NeoLogger")
EXPIRATION_TIMESTAMP_HEADER = "__SYSTEM__EXPIRATION_TIMESTAMP"
EXPIRATION_EPOCH_HEADER = "__SYSTEM__EXPIRATION_EPOCH"
EXPIRATION_DURATION_HEADER = "__SYSTEM__EXPIRATION_DURATION"
EXPIRATION_EXPIRATION_RFC = "__SYSTEM__EXPIRATION_RFC3339"
SYSTEM_EXPIRATION_EPOCH = "System-Expiration-Epoch"
SYSTEM_EXPIRATION_DURATION = "System-Expiration-Duration"
SYSTEM_EXPIRATION_TIMESTAMP = "System-Expiration-Timestamp"
SYSTEM_EXPIRATION_RFC3339 = "System-Expiration-RFC3339"
@pytest.mark.sanity
@pytest.mark.http_gate
class Test_http_system_header(ClusterTestBase):
PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
@pytest.fixture(scope="class", autouse=True)
@allure.title("[Class/Autouse]: Prepare wallet and deposit")
def prepare_wallet(self, default_wallet):
Test_http_system_header.wallet = default_wallet
@pytest.fixture(scope="class")
@allure.title("Create container")
def user_container(self):
return create_container(
wallet=self.wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=self.PLACEMENT_RULE,
basic_acl=PUBLIC_ACL,
)
@pytest.fixture(scope="class")
@allure.title("epoch_duration in seconds")
def epoch_duration(self) -> int:
net_info = get_netmap_netinfo(
wallet=self.wallet,
endpoint=self.cluster.default_rpc_endpoint,
shell=self.shell,
)
epoch_duration_in_blocks = net_info["epoch_duration"]
time_per_block = net_info["time_per_block"]
return int(epoch_duration_in_blocks * time_per_block)
@allure.title("Return N-epoch count in minutes")
def epoch_count_into_mins(self, epoch_duration: int, epoch: int) -> str:
mins = epoch_duration * epoch / 60
return f"{mins}m"
@allure.title("Return future timestamp after N epochs are passed")
def epoch_count_into_timestamp(
self, epoch_duration: int, epoch: int, rfc3339: Optional[bool] = False
) -> str:
current_datetime = datetime.datetime.utcnow()
epoch_count_in_seconds = epoch_duration * epoch
future_datetime = current_datetime + datetime.timedelta(seconds=epoch_count_in_seconds)
if rfc3339:
return future_datetime.isoformat("T") + "Z"
else:
return str(calendar.timegm(future_datetime.timetuple()))
@allure.title("Check is (header_output) Key=Value exists and equal in passed (header_to_find)")
def check_key_value_presented_header(self, header_output: dict, header_to_find: dict) -> bool:
header_att = header_output["header"]["attributes"]
for key_to_check, val_to_check in header_to_find.items():
if key_to_check not in header_att or val_to_check != header_att[key_to_check]:
logger.info(f"Unable to find {key_to_check}: '{val_to_check}' in {header_att}")
return False
return True
@allure.title(
f"Validate that only {EXPIRATION_EPOCH_HEADER} exists in header and other headers are abesent"
)
def validation_for_http_header_attr(self, head_info: dict, expected_epoch: int) -> None:
# check that __SYSTEM__EXPIRATION_EPOCH attribute has corresponding epoch
assert self.check_key_value_presented_header(
head_info, {EXPIRATION_EPOCH_HEADER: str(expected_epoch)}
), f'Expected to find {EXPIRATION_EPOCH_HEADER}: {expected_epoch} in: {head_info["header"]["attributes"]}'
# check that {EXPIRATION_EPOCH_HEADER} absents in header output
assert not (
self.check_key_value_presented_header(head_info, {EXPIRATION_DURATION_HEADER: ""})
), f"Only {EXPIRATION_EPOCH_HEADER} can be displayed in header attributes"
# check that {EXPIRATION_TIMESTAMP_HEADER} absents in header output
assert not (
self.check_key_value_presented_header(head_info, {EXPIRATION_TIMESTAMP_HEADER: ""})
), f"Only {EXPIRATION_TIMESTAMP_HEADER} can be displayed in header attributes"
# check that {EXPIRATION_EXPIRATION_RFC} absents in header output
assert not (
self.check_key_value_presented_header(head_info, {EXPIRATION_EXPIRATION_RFC: ""})
), f"Only {EXPIRATION_EXPIRATION_RFC} can be displayed in header attributes"
@allure.title("Put / get / verify object and return head command result to invoker")
def oid_header_info_for_object(self, file_path: str, attributes: dict, user_container: str):
oid = upload_via_http_gate_curl(
cid=user_container,
filepath=file_path,
endpoint=self.cluster.default_http_gate_endpoint,
headers=attr_into_str_header_curl(attributes),
)
verify_object_hash(
oid=oid,
file_name=file_path,
wallet=self.wallet,
cid=user_container,
shell=self.shell,
nodes=self.cluster.storage_nodes,
endpoint=self.cluster.default_http_gate_endpoint,
hostname=self.cluster.default_http_hostname,
)
head = head_object(
wallet=self.wallet,
cid=user_container,
oid=oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
return oid, head
@allure.title("[negative] attempt to put object with expired epoch")
def test_unable_put_expired_epoch(self, user_container: str, simple_object_size: int):
headers = attr_into_str_header_curl(
{"System-Expiration-Epoch": str(get_epoch(self.shell, self.cluster) - 1)}
)
file_path = generate_file(simple_object_size)
with allure.step(
"Put object using HTTP with attribute Expiration-Epoch where epoch is expired"
):
upload_via_http_gate_curl(
cid=user_container,
filepath=file_path,
endpoint=self.cluster.default_http_gate_endpoint,
headers=headers,
error_pattern="must be greater than current epoch",
)
@allure.title("[negative] attempt to put object with negative System-Expiration-Duration")
def test_unable_put_negative_duration(self, user_container: str, simple_object_size: int):
headers = attr_into_str_header_curl({"System-Expiration-Duration": "-1h"})
file_path = generate_file(simple_object_size)
with allure.step(
"Put object using HTTP with attribute System-Expiration-Duration where duration is negative"
):
upload_via_http_gate_curl(
cid=user_container,
filepath=file_path,
endpoint=self.cluster.default_http_gate_endpoint,
headers=headers,
error_pattern=f"{EXPIRATION_DURATION_HEADER} must be positive",
)
@allure.title(
"[negative] attempt to put object with System-Expiration-Timestamp value in the past"
)
def test_unable_put_expired_timestamp(self, user_container: str, simple_object_size: int):
headers = attr_into_str_header_curl({"System-Expiration-Timestamp": "1635075727"})
file_path = generate_file(simple_object_size)
with allure.step(
"Put object using HTTP with attribute System-Expiration-Timestamp where duration is in the past"
):
upload_via_http_gate_curl(
cid=user_container,
filepath=file_path,
endpoint=self.cluster.default_http_gate_endpoint,
headers=headers,
error_pattern=f"{EXPIRATION_TIMESTAMP_HEADER} must be in the future",
)
@allure.title(
"[negative] Put object using HTTP with attribute System-Expiration-RFC3339 where duration is in the past"
)
def test_unable_put_expired_rfc(self, user_container: str, simple_object_size: int):
headers = attr_into_str_header_curl({"System-Expiration-RFC3339": "2021-11-22T09:55:49Z"})
file_path = generate_file(simple_object_size)
upload_via_http_gate_curl(
cid=user_container,
filepath=file_path,
endpoint=self.cluster.default_http_gate_endpoint,
headers=headers,
error_pattern=f"{EXPIRATION_EXPIRATION_RFC} must be in the future",
)
@allure.title("priority of attributes epoch>duration")
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
@pytest.mark.skip("Temp disable for v0.37")
def test_http_attr_priority_epoch_duration(
self, user_container: str, object_size: int, epoch_duration: int
):
self.tick_epoch()
epoch_count = 1
expected_epoch = get_epoch(self.shell, self.cluster) + epoch_count
logger.info(
f"epoch duration={epoch_duration}, current_epoch= {get_epoch(self.shell, self.cluster)} expected_epoch {expected_epoch}"
)
attributes = {SYSTEM_EXPIRATION_EPOCH: expected_epoch, SYSTEM_EXPIRATION_DURATION: "1m"}
file_path = generate_file(object_size)
with allure.step(
f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
):
oid, head_info = self.oid_header_info_for_object(
file_path=file_path, attributes=attributes, user_container=user_container
)
self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch)
with allure.step("Check that object becomes unavailable when epoch is expired"):
for _ in range(0, epoch_count + 1):
self.tick_epoch()
assert (
get_epoch(self.shell, self.cluster) == expected_epoch + 1
), f"Epochs should be equal: {get_epoch(self.shell, self.cluster)} != {expected_epoch + 1}"
with allure.step("Check object deleted because it expires-on epoch"):
wait_for_epochs_align(self.shell, self.cluster)
try_to_get_object_and_expect_error(
cid=user_container,
oid=oid,
error_pattern="404 Not Found",
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname,
)
# check that object is not available via grpc
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_object_from_random_node(
self.wallet, user_container, oid, self.shell, self.cluster
)
@allure.title(
f"priority of attributes duration>timestamp, duration time has higher priority and should be converted {EXPIRATION_EPOCH_HEADER}"
)
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
@pytest.mark.skip("Temp disable for v0.37")
def test_http_attr_priority_dur_timestamp(
self, user_container: str, object_size: int, epoch_duration: int
):
self.tick_epoch()
epoch_count = 2
expected_epoch = get_epoch(self.shell, self.cluster) + epoch_count
logger.info(
f"epoch duration={epoch_duration}, current_epoch= {get_epoch(self.shell, self.cluster)} expected_epoch {expected_epoch}"
)
attributes = {
SYSTEM_EXPIRATION_DURATION: self.epoch_count_into_mins(
epoch_duration=epoch_duration, epoch=2
),
SYSTEM_EXPIRATION_TIMESTAMP: self.epoch_count_into_timestamp(
epoch_duration=epoch_duration, epoch=1
),
}
file_path = generate_file(object_size)
with allure.step(
f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
):
oid, head_info = self.oid_header_info_for_object(
file_path=file_path, attributes=attributes, user_container=user_container
)
self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch)
with allure.step("Check that object becomes unavailable when epoch is expired"):
for _ in range(0, epoch_count + 1):
self.tick_epoch()
assert (
get_epoch(self.shell, self.cluster) == expected_epoch + 1
), f"Epochs should be equal: {get_epoch(self.shell, self.cluster)} != {expected_epoch + 1}"
with allure.step("Check object deleted because it expires-on epoch"):
wait_for_epochs_align(self.shell, self.cluster)
try_to_get_object_and_expect_error(
cid=user_container,
oid=oid,
error_pattern="404 Not Found",
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname,
)
# check that object is not available via grpc
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_object_from_random_node(
self.wallet, user_container, oid, self.shell, self.cluster
)
@allure.title(
f"priority of attributes timestamp>Expiration-RFC, timestamp has higher priority and should be converted {EXPIRATION_EPOCH_HEADER}"
)
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
@pytest.mark.skip("Temp disable for v0.37")
def test_http_attr_priority_timestamp_rfc(
self, user_container: str, object_size: int, epoch_duration: int
):
self.tick_epoch()
epoch_count = 2
expected_epoch = get_epoch(self.shell, self.cluster) + epoch_count
logger.info(
f"epoch duration={epoch_duration}, current_epoch= {get_epoch(self.shell, self.cluster)} expected_epoch {expected_epoch}"
)
attributes = {
SYSTEM_EXPIRATION_TIMESTAMP: self.epoch_count_into_timestamp(
epoch_duration=epoch_duration, epoch=2
),
SYSTEM_EXPIRATION_RFC3339: self.epoch_count_into_timestamp(
epoch_duration=epoch_duration, epoch=1, rfc3339=True
),
}
file_path = generate_file(object_size)
with allure.step(
f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
):
oid, head_info = self.oid_header_info_for_object(
file_path=file_path, attributes=attributes, user_container=user_container
)
self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch)
with allure.step("Check that object becomes unavailable when epoch is expired"):
for _ in range(0, epoch_count + 1):
self.tick_epoch()
assert (
get_epoch(self.shell, self.cluster) == expected_epoch + 1
), f"Epochs should be equal: {get_epoch(self.shell, self.cluster)} != {expected_epoch + 1}"
with allure.step("Check object deleted because it expires-on epoch"):
wait_for_epochs_align(self.shell, self.cluster)
try_to_get_object_and_expect_error(
cid=user_container,
oid=oid,
error_pattern="404 Not Found",
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname,
)
# check that object is not available via grpc
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_object_from_random_node(
self.wallet, user_container, oid, self.shell, self.cluster
)
@allure.title("Test that object is automatically delete when expiration passed")
@pytest.mark.parametrize(
"object_size",
# TODO: Temp disabled for v0.37
# [pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
[pytest.lazy_fixture("simple_object_size")],
# ids=["simple object", "complex object"],
ids=["simple object"],
)
def test_http_rfc_object_unavailable_after_expir(
self, user_container: str, object_size: int, epoch_duration: int
):
self.tick_epoch()
epoch_count = 2
expected_epoch = get_epoch(self.shell, self.cluster) + epoch_count
logger.info(
f"epoch duration={epoch_duration}, current_epoch= {get_epoch(self.shell, self.cluster)} expected_epoch {expected_epoch}"
)
attributes = {
SYSTEM_EXPIRATION_RFC3339: self.epoch_count_into_timestamp(
epoch_duration=epoch_duration, epoch=2, rfc3339=True
)
}
file_path = generate_file(object_size)
with allure.step(
f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
):
oid, head_info = self.oid_header_info_for_object(
file_path=file_path,
attributes=attributes,
user_container=user_container,
)
self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch)
with allure.step("Check that object becomes unavailable when epoch is expired"):
for _ in range(0, epoch_count + 1):
self.tick_epoch()
# check that {EXPIRATION_EXPIRATION_RFC} absents in header output
assert (
get_epoch(self.shell, self.cluster) == expected_epoch + 1
), f"Epochs should be equal: {get_epoch(self.shell, self.cluster)} != {expected_epoch + 1}"
with allure.step("Check object deleted because it expires-on epoch"):
wait_for_epochs_align(self.shell, self.cluster)
try_to_get_object_and_expect_error(
cid=user_container,
oid=oid,
error_pattern="404 Not Found",
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname,
)
# check that object is not available via grpc
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_object_from_random_node(
self.wallet, user_container, oid, self.shell, self.cluster
)

View file

@ -1,67 +0,0 @@
import allure
import pytest
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.utils.file_utils import generate_file
@pytest.mark.sanity
@pytest.mark.acl
@pytest.mark.s3_gate
class TestS3GateACL:
@allure.title("Test S3: Object ACL")
@pytest.mark.parametrize("s3_client", [AwsCliClient], indirect=True)
def test_s3_object_ACL(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int):
file_path = generate_file(simple_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
with allure.step("Put object into bucket, Check ACL is empty"):
s3_client.put_object(bucket, file_path)
obj_acl = s3_client.get_object_acl(bucket, file_name)
assert obj_acl == [], f"Expected ACL is empty, got {obj_acl}"
with allure.step("Put object ACL = public-read"):
s3_client.put_object_acl(bucket, file_name, "public-read")
obj_acl = s3_client.get_object_acl(bucket, file_name)
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
with allure.step("Put object ACL = private"):
s3_client.put_object_acl(bucket, file_name, "private")
obj_acl = s3_client.get_object_acl(bucket, file_name)
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="CanonicalUser")
with allure.step(
"Put object with grant-read uri=http://acs.amazonaws.com/groups/global/AllUsers"
):
s3_client.put_object_acl(
bucket,
file_name,
grant_read="uri=http://acs.amazonaws.com/groups/global/AllUsers",
)
obj_acl = s3_client.get_object_acl(bucket, file_name)
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
@allure.title("Test S3: Bucket ACL")
@pytest.mark.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
def test_s3_bucket_ACL(self, s3_client: S3ClientWrapper):
with allure.step("Create bucket with ACL = public-read-write"):
bucket = s3_client.create_bucket(
object_lock_enabled_for_bucket=True, acl="public-read-write"
)
bucket_acl = s3_client.get_bucket_acl(bucket)
s3_helper.assert_s3_acl(acl_grants=bucket_acl, permitted_users="AllUsers")
with allure.step("Change bucket ACL to private"):
s3_client.put_bucket_acl(bucket, acl="private")
bucket_acl = s3_client.get_bucket_acl(bucket)
s3_helper.assert_s3_acl(acl_grants=bucket_acl, permitted_users="CanonicalUser")
with allure.step(
"Change bucket acl to --grant-write uri=http://acs.amazonaws.com/groups/global/AllUsers"
):
s3_client.put_bucket_acl(
bucket,
grant_write="uri=http://acs.amazonaws.com/groups/global/AllUsers",
)
bucket_acl = s3_client.get_bucket_acl(bucket)
s3_helper.assert_s3_acl(acl_grants=bucket_acl, permitted_users="AllUsers")

View file

@ -1,133 +0,0 @@
from datetime import datetime, timedelta
import allure
import pytest
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.utils.file_utils import generate_file
def pytest_generate_tests(metafunc: pytest.Metafunc):
if "s3_client" in metafunc.fixturenames:
metafunc.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
@pytest.mark.sanity
@pytest.mark.s3_gate
@pytest.mark.s3_gate_bucket
class TestS3GateBucket:
@allure.title("Test S3: Create Bucket with different ACL")
def test_s3_create_bucket_with_ACL(self, s3_client: S3ClientWrapper):
with allure.step("Create bucket with ACL private"):
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="private")
bucket_acl = s3_client.get_bucket_acl(bucket)
s3_helper.assert_s3_acl(acl_grants=bucket_acl, permitted_users="CanonicalUser")
with allure.step("Create bucket with ACL = public-read"):
bucket_1 = s3_client.create_bucket(
object_lock_enabled_for_bucket=True, acl="public-read"
)
bucket_acl_1 = s3_client.get_bucket_acl(bucket_1)
s3_helper.assert_s3_acl(acl_grants=bucket_acl_1, permitted_users="AllUsers")
with allure.step("Create bucket with ACL public-read-write"):
bucket_2 = s3_client.create_bucket(
object_lock_enabled_for_bucket=True, acl="public-read-write"
)
bucket_acl_2 = s3_client.get_bucket_acl(bucket_2)
s3_helper.assert_s3_acl(acl_grants=bucket_acl_2, permitted_users="AllUsers")
with allure.step("Create bucket with ACL = authenticated-read"):
bucket_3 = s3_client.create_bucket(
object_lock_enabled_for_bucket=True, acl="authenticated-read"
)
bucket_acl_3 = s3_client.get_bucket_acl(bucket_3)
s3_helper.assert_s3_acl(acl_grants=bucket_acl_3, permitted_users="AllUsers")
@allure.title("Test S3: Create Bucket with different ACL by grand")
def test_s3_create_bucket_with_grands(self, s3_client: S3ClientWrapper):
with allure.step("Create bucket with --grant-read"):
bucket = s3_client.create_bucket(
object_lock_enabled_for_bucket=True,
grant_read="uri=http://acs.amazonaws.com/groups/global/AllUsers",
)
bucket_acl = s3_client.get_bucket_acl(bucket)
s3_helper.assert_s3_acl(acl_grants=bucket_acl, permitted_users="AllUsers")
with allure.step("Create bucket with --grant-wtite"):
bucket_1 = s3_client.create_bucket(
object_lock_enabled_for_bucket=True,
grant_write="uri=http://acs.amazonaws.com/groups/global/AllUsers",
)
bucket_acl_1 = s3_client.get_bucket_acl(bucket_1)
s3_helper.assert_s3_acl(acl_grants=bucket_acl_1, permitted_users="AllUsers")
with allure.step("Create bucket with --grant-full-control"):
bucket_2 = s3_client.create_bucket(
object_lock_enabled_for_bucket=True,
grant_full_control="uri=http://acs.amazonaws.com/groups/global/AllUsers",
)
bucket_acl_2 = s3_client.get_bucket_acl(bucket_2)
s3_helper.assert_s3_acl(acl_grants=bucket_acl_2, permitted_users="AllUsers")
@allure.title("Test S3: create bucket with object lock")
def test_s3_bucket_object_lock(self, s3_client: S3ClientWrapper, simple_object_size: int):
file_path = generate_file(simple_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
with allure.step("Create bucket with --no-object-lock-enabled-for-bucket"):
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=False)
date_obj = datetime.utcnow() + timedelta(days=1)
with pytest.raises(
Exception, match=r".*Object Lock configuration does not exist for this bucket.*"
):
# An error occurred (ObjectLockConfigurationNotFoundError) when calling the PutObject operation (reached max retries: 0):
# Object Lock configuration does not exist for this bucket
s3_client.put_object(
bucket,
file_path,
object_lock_mode="COMPLIANCE",
object_lock_retain_until_date=date_obj.strftime("%Y-%m-%dT%H:%M:%S"),
)
with allure.step("Create bucket with --object-lock-enabled-for-bucket"):
bucket_1 = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
date_obj_1 = datetime.utcnow() + timedelta(days=1)
s3_client.put_object(
bucket_1,
file_path,
object_lock_mode="COMPLIANCE",
object_lock_retain_until_date=date_obj_1.strftime("%Y-%m-%dT%H:%M:%S"),
object_lock_legal_hold_status="ON",
)
s3_helper.assert_object_lock_mode(
s3_client, bucket_1, file_name, "COMPLIANCE", date_obj_1, "ON"
)
@allure.title("Test S3: delete bucket")
def test_s3_delete_bucket(self, s3_client: S3ClientWrapper, simple_object_size: int):
file_path_1 = generate_file(simple_object_size)
file_name_1 = s3_helper.object_key_from_file_path(file_path_1)
file_path_2 = generate_file(simple_object_size)
file_name_2 = s3_helper.object_key_from_file_path(file_path_2)
bucket = s3_client.create_bucket()
with allure.step("Put two objects into bucket"):
s3_client.put_object(bucket, file_path_1)
s3_client.put_object(bucket, file_path_2)
s3_helper.check_objects_in_bucket(s3_client, bucket, [file_name_1, file_name_2])
with allure.step("Try to delete not empty bucket and get error"):
with pytest.raises(Exception, match=r".*The bucket you tried to delete is not empty.*"):
s3_client.delete_bucket(bucket)
with allure.step("Delete object in bucket"):
s3_client.delete_object(bucket, file_name_1)
s3_client.delete_object(bucket, file_name_2)
s3_helper.check_objects_in_bucket(s3_client, bucket, [])
with allure.step("Delete empty bucket"):
s3_client.delete_bucket(bucket)
with pytest.raises(Exception, match=r".*Not Found.*"):
s3_client.head_bucket(bucket)

View file

@ -1,576 +0,0 @@
import logging
import os
from random import choice, choices
import allure
import pytest
from frostfs_testlib.resources.common import ASSETS_DIR
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.epoch import tick_epoch
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.utils.file_utils import (
generate_file,
generate_file_with_content,
get_file_content,
get_file_hash,
split_file,
)
logger = logging.getLogger("NeoLogger")
def pytest_generate_tests(metafunc: pytest.Metafunc):
if "s3_client" in metafunc.fixturenames:
metafunc.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
@allure.link(
"https://github.com/TrueCloudLab/frostfs-s3-gw#frostfs-s3-gw", name="frostfs-s3-gateway"
)
@pytest.mark.sanity
@pytest.mark.s3_gate
@pytest.mark.s3_gate_base
class TestS3Gate:
@allure.title("Test S3 Bucket API")
def test_s3_buckets(
self,
s3_client: S3ClientWrapper,
client_shell: Shell,
cluster: Cluster,
simple_object_size: int,
):
"""
Test base S3 Bucket API (Create/List/Head/Delete).
"""
file_path = generate_file(simple_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
with allure.step("Create buckets"):
bucket_1 = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
s3_helper.set_bucket_versioning(s3_client, bucket_1, VersioningStatus.ENABLED)
bucket_2 = s3_client.create_bucket()
with allure.step("Check buckets are presented in the system"):
buckets = s3_client.list_buckets()
assert bucket_1 in buckets, f"Expected bucket {bucket_1} is in the list"
assert bucket_2 in buckets, f"Expected bucket {bucket_2} is in the list"
with allure.step("Bucket must be empty"):
for bucket in (bucket_1, bucket_2):
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with allure.step("Check buckets are visible with S3 head command"):
s3_client.head_bucket(bucket_1)
s3_client.head_bucket(bucket_2)
with allure.step("Check we can put/list object with S3 commands"):
version_id = s3_client.put_object(bucket_1, file_path)
s3_client.head_object(bucket_1, file_name)
bucket_objects = s3_client.list_objects(bucket_1)
assert (
file_name in bucket_objects
), f"Expected file {file_name} in objects list {bucket_objects}"
with allure.step("Try to delete not empty bucket and get error"):
with pytest.raises(Exception, match=r".*The bucket you tried to delete is not empty.*"):
s3_client.delete_bucket(bucket_1)
s3_client.head_bucket(bucket_1)
with allure.step(f"Delete empty bucket {bucket_2}"):
s3_client.delete_bucket(bucket_2)
tick_epoch(client_shell, cluster)
with allure.step(f"Check bucket {bucket_2} deleted"):
with pytest.raises(Exception, match=r".*Not Found.*"):
s3_client.head_bucket(bucket_2)
buckets = s3_client.list_buckets()
assert bucket_1 in buckets, f"Expected bucket {bucket_1} is in the list"
assert bucket_2 not in buckets, f"Expected bucket {bucket_2} is not in the list"
with allure.step(f"Delete object from {bucket_1}"):
s3_client.delete_object(bucket_1, file_name, version_id)
s3_helper.check_objects_in_bucket(s3_client, bucket_1, expected_objects=[])
with allure.step(f"Delete bucket {bucket_1}"):
s3_client.delete_bucket(bucket_1)
tick_epoch(client_shell, cluster)
with allure.step(f"Check bucket {bucket_1} deleted"):
with pytest.raises(Exception, match=r".*Not Found.*"):
s3_client.head_bucket(bucket_1)
@allure.title("Test S3 Object API")
@pytest.mark.parametrize(
"file_type", ["simple", "large"], ids=["Simple object", "Large object"]
)
def test_s3_api_object(
self,
s3_client: S3ClientWrapper,
file_type: str,
two_buckets: tuple[str, str],
simple_object_size: int,
complex_object_size: int,
):
"""
Test base S3 Object API (Put/Head/List) for simple and large objects.
"""
file_path = generate_file(
simple_object_size if file_type == "simple" else complex_object_size
)
file_name = s3_helper.object_key_from_file_path(file_path)
bucket_1, bucket_2 = two_buckets
for bucket in (bucket_1, bucket_2):
with allure.step("Bucket must be empty"):
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
s3_client.put_object(bucket, file_path)
s3_client.head_object(bucket, file_name)
bucket_objects = s3_client.list_objects(bucket)
assert (
file_name in bucket_objects
), f"Expected file {file_name} in objects list {bucket_objects}"
with allure.step("Check object's attributes"):
for attrs in (["ETag"], ["ObjectSize", "StorageClass"]):
s3_client.get_object_attributes(bucket, file_name, attrs)
@allure.title("Test S3 Sync directory")
def test_s3_sync_dir(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int):
"""
Test checks sync directory with AWS CLI utility.
"""
file_path_1 = os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_1")
file_path_2 = os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_2")
key_to_path = {"test_file_1": file_path_1, "test_file_2": file_path_2}
if not isinstance(s3_client, AwsCliClient):
pytest.skip("This test is not supported with boto3 client")
generate_file_with_content(simple_object_size, file_path=file_path_1)
generate_file_with_content(simple_object_size, file_path=file_path_2)
s3_client.sync(bucket=bucket, dir_path=os.path.dirname(file_path_1))
with allure.step("Check objects are synced"):
objects = s3_client.list_objects(bucket)
with allure.step("Check these are the same objects"):
assert set(key_to_path.keys()) == set(
objects
), f"Expected all objects saved. Got {objects}"
for obj_key in objects:
got_object = s3_client.get_object(bucket, obj_key)
assert get_file_hash(got_object) == get_file_hash(
key_to_path.get(obj_key)
), "Expected hashes are the same"
@allure.title("Test S3 Object versioning")
def test_s3_api_versioning(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int
):
"""
Test checks basic versioning functionality for S3 bucket.
"""
version_1_content = "Version 1"
version_2_content = "Version 2"
file_name_simple = generate_file_with_content(simple_object_size, content=version_1_content)
obj_key = os.path.basename(file_name_simple)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_client.put_object(bucket, file_name_simple)
generate_file_with_content(
simple_object_size, file_path=file_name_simple, content=version_2_content
)
version_id_2 = s3_client.put_object(bucket, file_name_simple)
with allure.step("Check bucket shows all versions"):
versions = s3_client.list_objects_versions(bucket)
obj_versions = {
version.get("VersionId") for version in versions if version.get("Key") == obj_key
}
assert obj_versions == {
version_id_1,
version_id_2,
}, f"Expected object has versions: {version_id_1, version_id_2}"
with allure.step("Show information about particular version"):
for version_id in (version_id_1, version_id_2):
response = s3_client.head_object(bucket, obj_key, version_id=version_id)
assert "LastModified" in response, "Expected LastModified field"
assert "ETag" in response, "Expected ETag field"
assert (
response.get("VersionId") == version_id
), f"Expected VersionId is {version_id}"
assert response.get("ContentLength") != 0, "Expected ContentLength is not zero"
with allure.step("Check object's attributes"):
for version_id in (version_id_1, version_id_2):
got_attrs = s3_client.get_object_attributes(
bucket, obj_key, ["ETag"], version_id=version_id
)
if got_attrs:
assert (
got_attrs.get("VersionId") == version_id
), f"Expected VersionId is {version_id}"
with allure.step("Delete object and check it was deleted"):
response = s3_client.delete_object(bucket, obj_key)
version_id_delete = response.get("VersionId")
with pytest.raises(Exception, match=r".*Not Found.*"):
s3_client.head_object(bucket, obj_key)
with allure.step("Get content for all versions and check it is correct"):
for version, content in (
(version_id_2, version_2_content),
(version_id_1, version_1_content),
):
file_name = s3_client.get_object(bucket, obj_key, version_id=version)
got_content = get_file_content(file_name)
assert (
got_content == content
), f"Expected object content is\n{content}\nGot\n{got_content}"
with allure.step("Restore previous object version"):
s3_client.delete_object(bucket, obj_key, version_id=version_id_delete)
file_name = s3_client.get_object(bucket, obj_key)
got_content = get_file_content(file_name)
assert (
got_content == version_2_content
), f"Expected object content is\n{version_2_content}\nGot\n{got_content}"
@pytest.mark.s3_gate_multipart
@allure.title("Test S3 Object Multipart API")
def test_s3_api_multipart(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int
):
"""
Test checks S3 Multipart API (Create multipart upload/Abort multipart upload/List multipart upload/
Upload part/List parts/Complete multipart upload).
"""
parts_count = 3
file_name_large = generate_file(
simple_object_size * 1024 * 6 * parts_count
) # 5Mb - min part
object_key = s3_helper.object_key_from_file_path(file_name_large)
part_files = split_file(file_name_large, parts_count)
parts = []
uploads = s3_client.list_multipart_uploads(bucket)
assert not uploads, f"Expected there is no uploads in bucket {bucket}"
with allure.step("Create and abort multipart upload"):
upload_id = s3_client.create_multipart_upload(bucket, object_key)
uploads = s3_client.list_multipart_uploads(bucket)
assert uploads, f"Expected there one upload in bucket {bucket}"
assert (
uploads[0].get("Key") == object_key
), f"Expected correct key {object_key} in upload {uploads}"
assert (
uploads[0].get("UploadId") == upload_id
), f"Expected correct UploadId {upload_id} in upload {uploads}"
s3_client.abort_multipart_upload(bucket, object_key, upload_id)
uploads = s3_client.list_multipart_uploads(bucket)
assert not uploads, f"Expected there is no uploads in bucket {bucket}"
with allure.step("Create new multipart upload and upload several parts"):
upload_id = s3_client.create_multipart_upload(bucket, object_key)
for part_id, file_path in enumerate(part_files, start=1):
etag = s3_client.upload_part(bucket, object_key, upload_id, part_id, file_path)
parts.append((part_id, etag))
with allure.step("Check all parts are visible in bucket"):
got_parts = s3_client.list_parts(bucket, object_key, upload_id)
assert len(got_parts) == len(
part_files
), f"Expected {parts_count} parts, got\n{got_parts}"
s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts)
uploads = s3_client.list_multipart_uploads(bucket)
assert not uploads, f"Expected there is no uploads in bucket {bucket}"
with allure.step("Check we can get whole object from bucket"):
got_object = s3_client.get_object(bucket, object_key)
assert get_file_hash(got_object) == get_file_hash(file_name_large)
self.check_object_attributes(s3_client, bucket, object_key, parts_count)
@allure.title("Test S3 Bucket tagging API")
def test_s3_api_bucket_tagging(self, s3_client: S3ClientWrapper, bucket: str):
"""
Test checks S3 Bucket tagging API (Put tag/Get tag).
"""
key_value_pair = [("some-key", "some-value"), ("some-key-2", "some-value-2")]
s3_client.put_bucket_tagging(bucket, key_value_pair)
s3_helper.check_tags_by_bucket(s3_client, bucket, key_value_pair)
s3_client.delete_bucket_tagging(bucket)
s3_helper.check_tags_by_bucket(s3_client, bucket, [])
@allure.title("Test S3 Object tagging API")
def test_s3_api_object_tagging(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int
):
"""
Test checks S3 Object tagging API (Put tag/Get tag/Update tag).
"""
key_value_pair_bucket = [("some-key", "some-value"), ("some-key-2", "some-value-2")]
key_value_pair_obj = [
("some-key-obj", "some-value-obj"),
("some-key--obj2", "some-value--obj2"),
]
key_value_pair_obj_new = [("some-key-obj-new", "some-value-obj-new")]
file_name_simple = generate_file(simple_object_size)
obj_key = s3_helper.object_key_from_file_path(file_name_simple)
s3_client.put_bucket_tagging(bucket, key_value_pair_bucket)
s3_client.put_object(bucket, file_name_simple)
for tags in (key_value_pair_obj, key_value_pair_obj_new):
s3_client.put_object_tagging(bucket, obj_key, tags)
s3_helper.check_tags_by_object(
s3_client,
bucket,
obj_key,
tags,
)
s3_client.delete_object_tagging(bucket, obj_key)
s3_helper.check_tags_by_object(s3_client, bucket, obj_key, [])
@allure.title("Test S3: Delete object & delete objects S3 API")
def test_s3_api_delete(
self,
s3_client: S3ClientWrapper,
two_buckets: tuple[str, str],
simple_object_size: int,
complex_object_size: int,
):
"""
Check delete_object and delete_objects S3 API operation. From first bucket some objects deleted one by one.
From second bucket some objects deleted all at once.
"""
max_obj_count = 20
max_delete_objects = 17
put_objects = []
file_paths = []
obj_sizes = [simple_object_size, complex_object_size]
bucket_1, bucket_2 = two_buckets
with allure.step(f"Generate {max_obj_count} files"):
for _ in range(max_obj_count):
file_paths.append(generate_file(choice(obj_sizes)))
for bucket in (bucket_1, bucket_2):
with allure.step(f"Bucket {bucket} must be empty as it just created"):
objects_list = s3_client.list_objects_v2(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
for file_path in file_paths:
s3_client.put_object(bucket, file_path)
put_objects.append(s3_helper.object_key_from_file_path(file_path))
with allure.step(f"Check all objects put in bucket {bucket} successfully"):
bucket_objects = s3_client.list_objects_v2(bucket)
assert set(put_objects) == set(
bucket_objects
), f"Expected all objects {put_objects} in objects list {bucket_objects}"
with allure.step("Delete some objects from bucket_1 one by one"):
objects_to_delete_b1 = choices(put_objects, k=max_delete_objects)
for obj in objects_to_delete_b1:
s3_client.delete_object(bucket_1, obj)
with allure.step("Check deleted objects are not visible in bucket bucket_1"):
bucket_objects = s3_client.list_objects_v2(bucket_1)
assert set(put_objects).difference(set(objects_to_delete_b1)) == set(
bucket_objects
), f"Expected all objects {put_objects} in objects list {bucket_objects}"
for object_key in objects_to_delete_b1:
with pytest.raises(Exception, match="The specified key does not exist"):
s3_client.get_object(bucket_1, object_key)
with allure.step("Delete some objects from bucket_2 at once"):
objects_to_delete_b2 = choices(put_objects, k=max_delete_objects)
s3_client.delete_objects(bucket_2, objects_to_delete_b2)
with allure.step("Check deleted objects are not visible in bucket bucket_2"):
objects_list = s3_client.list_objects_v2(bucket_2)
assert set(put_objects).difference(set(objects_to_delete_b2)) == set(
objects_list
), f"Expected all objects {put_objects} in objects list {bucket_objects}"
for object_key in objects_to_delete_b2:
with pytest.raises(Exception, match="The specified key does not exist"):
s3_client.get_object(bucket_2, object_key)
@allure.title("Test S3: Copy object to the same bucket")
def test_s3_copy_same_bucket(
self,
s3_client: S3ClientWrapper,
bucket: str,
complex_object_size: int,
simple_object_size: int,
):
"""
Test object can be copied to the same bucket.
#TODO: delete after test_s3_copy_object will be merge
"""
file_path_simple, file_path_large = generate_file(simple_object_size), generate_file(
complex_object_size
)
file_name_simple = s3_helper.object_key_from_file_path(file_path_simple)
file_name_large = s3_helper.object_key_from_file_path(file_path_large)
bucket_objects = [file_name_simple, file_name_large]
with allure.step("Bucket must be empty"):
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with allure.step("Put objects into bucket"):
for file_path in (file_path_simple, file_path_large):
s3_client.put_object(bucket, file_path)
with allure.step("Copy one object into the same bucket"):
copy_obj_path = s3_client.copy_object(bucket, file_name_simple)
bucket_objects.append(copy_obj_path)
s3_helper.check_objects_in_bucket(s3_client, bucket, bucket_objects)
with allure.step("Check copied object has the same content"):
got_copied_file = s3_client.get_object(bucket, copy_obj_path)
assert get_file_hash(file_path_simple) == get_file_hash(
got_copied_file
), "Hashes must be the same"
with allure.step("Delete one object from bucket"):
s3_client.delete_object(bucket, file_name_simple)
bucket_objects.remove(file_name_simple)
s3_helper.check_objects_in_bucket(
s3_client,
bucket,
expected_objects=bucket_objects,
unexpected_objects=[file_name_simple],
)
@allure.title("Test S3: Copy object to another bucket")
def test_s3_copy_to_another_bucket(
self,
s3_client: S3ClientWrapper,
two_buckets: tuple[str, str],
complex_object_size: int,
simple_object_size: int,
):
"""
Test object can be copied to another bucket.
#TODO: delete after test_s3_copy_object will be merge
"""
file_path_simple, file_path_large = generate_file(simple_object_size), generate_file(
complex_object_size
)
file_name_simple = s3_helper.object_key_from_file_path(file_path_simple)
file_name_large = s3_helper.object_key_from_file_path(file_path_large)
bucket_1_objects = [file_name_simple, file_name_large]
bucket_1, bucket_2 = two_buckets
with allure.step("Buckets must be empty"):
for bucket in (bucket_1, bucket_2):
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with allure.step("Put objects into one bucket"):
for file_path in (file_path_simple, file_path_large):
s3_client.put_object(bucket_1, file_path)
with allure.step("Copy object from first bucket into second"):
copy_obj_path_b2 = s3_client.copy_object(bucket_1, file_name_large, bucket=bucket_2)
s3_helper.check_objects_in_bucket(s3_client, bucket_1, expected_objects=bucket_1_objects)
s3_helper.check_objects_in_bucket(s3_client, bucket_2, expected_objects=[copy_obj_path_b2])
with allure.step("Check copied object has the same content"):
got_copied_file_b2 = s3_client.get_object(bucket_2, copy_obj_path_b2)
assert get_file_hash(file_path_large) == get_file_hash(
got_copied_file_b2
), "Hashes must be the same"
with allure.step("Delete one object from first bucket"):
s3_client.delete_object(bucket_1, file_name_simple)
bucket_1_objects.remove(file_name_simple)
s3_helper.check_objects_in_bucket(s3_client, bucket_1, expected_objects=bucket_1_objects)
s3_helper.check_objects_in_bucket(s3_client, bucket_2, expected_objects=[copy_obj_path_b2])
with allure.step("Delete one object from second bucket and check it is empty"):
s3_client.delete_object(bucket_2, copy_obj_path_b2)
s3_helper.check_objects_in_bucket(s3_client, bucket_2, expected_objects=[])
def check_object_attributes(
self, s3_client: S3ClientWrapper, bucket: str, object_key: str, parts_count: int
):
if not isinstance(s3_client, AwsCliClient):
logger.warning("Attributes check is not supported for boto3 implementation")
return
with allure.step("Check object's attributes"):
obj_parts = s3_client.get_object_attributes(
bucket, object_key, ["ObjectParts"], full_output=False
)
assert (
obj_parts.get("TotalPartsCount") == parts_count
), f"Expected TotalPartsCount is {parts_count}"
assert (
len(obj_parts.get("Parts")) == parts_count
), f"Expected Parts cunt is {parts_count}"
with allure.step("Check object's attribute max-parts"):
max_parts = 2
obj_parts = s3_client.get_object_attributes(
bucket,
object_key,
["ObjectParts"],
max_parts=max_parts,
full_output=False,
)
assert (
obj_parts.get("TotalPartsCount") == parts_count
), f"Expected TotalPartsCount is {parts_count}"
assert obj_parts.get("MaxParts") == max_parts, f"Expected MaxParts is {parts_count}"
assert (
len(obj_parts.get("Parts")) == max_parts
), f"Expected Parts count is {parts_count}"
with allure.step("Check object's attribute part-number-marker"):
part_number_marker = 3
obj_parts = s3_client.get_object_attributes(
bucket,
object_key,
["ObjectParts"],
part_number=part_number_marker,
full_output=False,
)
assert (
obj_parts.get("TotalPartsCount") == parts_count
), f"Expected TotalPartsCount is {parts_count}"
assert (
obj_parts.get("PartNumberMarker") == part_number_marker
), f"Expected PartNumberMarker is {part_number_marker}"
assert len(obj_parts.get("Parts")) == 1, f"Expected Parts count is {parts_count}"

View file

@ -1,223 +0,0 @@
import time
from datetime import datetime, timedelta
import allure
import pytest
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.utils.file_utils import generate_file, generate_file_with_content
def pytest_generate_tests(metafunc: pytest.Metafunc):
if "s3_client" in metafunc.fixturenames:
metafunc.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
@pytest.mark.sanity
@pytest.mark.s3_gate
@pytest.mark.s3_gate_locking
@pytest.mark.parametrize("version_id", [None, "second"])
class TestS3GateLocking:
@allure.title("Test S3: Checking the operation of retention period & legal lock on the object")
def test_s3_object_locking(
self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: int
):
file_path = generate_file(simple_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
retention_period = 2
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
with allure.step("Put several versions of object into bucket"):
s3_client.put_object(bucket, file_path)
file_name_1 = generate_file_with_content(simple_object_size, file_path=file_path)
version_id_2 = s3_client.put_object(bucket, file_name_1)
s3_helper.check_objects_in_bucket(s3_client, bucket, [file_name])
if version_id:
version_id = version_id_2
with allure.step(f"Put retention period {retention_period}min to object {file_name}"):
date_obj = datetime.utcnow() + timedelta(minutes=retention_period)
retention = {
"Mode": "COMPLIANCE",
"RetainUntilDate": date_obj,
}
s3_client.put_object_retention(bucket, file_name, retention, version_id)
s3_helper.assert_object_lock_mode(
s3_client, bucket, file_name, "COMPLIANCE", date_obj, "OFF"
)
with allure.step(f"Put legal hold to object {file_name}"):
s3_client.put_object_legal_hold(bucket, file_name, "ON", version_id)
s3_helper.assert_object_lock_mode(
s3_client, bucket, file_name, "COMPLIANCE", date_obj, "ON"
)
with allure.step("Fail with deleting object with legal hold and retention period"):
if version_id:
with pytest.raises(Exception):
# An error occurred (AccessDenied) when calling the DeleteObject operation (reached max retries: 0): Access Denied.
s3_client.delete_object(bucket, file_name, version_id)
with allure.step("Check retention period is no longer set on the uploaded object"):
time.sleep((retention_period + 1) * 60)
s3_helper.assert_object_lock_mode(
s3_client, bucket, file_name, "COMPLIANCE", date_obj, "ON"
)
with allure.step("Fail with deleting object with legal hold and retention period"):
if version_id:
with pytest.raises(Exception):
# An error occurred (AccessDenied) when calling the DeleteObject operation (reached max retries: 0): Access Denied.
s3_client.delete_object(bucket, file_name, version_id)
else:
s3_client.delete_object(bucket, file_name, version_id)
@allure.title("Test S3: Checking the impossibility to change the retention mode COMPLIANCE")
def test_s3_mode_compliance(
self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: int
):
file_path = generate_file(simple_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
retention_period = 2
retention_period_1 = 1
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
with allure.step("Put object into bucket"):
obj_version = s3_client.put_object(bucket, file_path)
if version_id:
version_id = obj_version
s3_helper.check_objects_in_bucket(s3_client, bucket, [file_name])
with allure.step(f"Put retention period {retention_period}min to object {file_name}"):
date_obj = datetime.utcnow() + timedelta(minutes=retention_period)
retention = {
"Mode": "COMPLIANCE",
"RetainUntilDate": date_obj,
}
s3_client.put_object_retention(bucket, file_name, retention, version_id)
s3_helper.assert_object_lock_mode(
s3_client, bucket, file_name, "COMPLIANCE", date_obj, "OFF"
)
with allure.step(
f"Try to change retention period {retention_period_1}min to object {file_name}"
):
date_obj = datetime.utcnow() + timedelta(minutes=retention_period_1)
retention = {
"Mode": "COMPLIANCE",
"RetainUntilDate": date_obj,
}
with pytest.raises(Exception):
s3_client.put_object_retention(bucket, file_name, retention, version_id)
@allure.title("Test S3: Checking the ability to change retention mode GOVERNANCE")
def test_s3_mode_governance(
self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: int
):
file_path = generate_file(simple_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
retention_period = 3
retention_period_1 = 2
retention_period_2 = 5
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
with allure.step("Put object into bucket"):
obj_version = s3_client.put_object(bucket, file_path)
if version_id:
version_id = obj_version
s3_helper.check_objects_in_bucket(s3_client, bucket, [file_name])
with allure.step(f"Put retention period {retention_period}min to object {file_name}"):
date_obj = datetime.utcnow() + timedelta(minutes=retention_period)
retention = {
"Mode": "GOVERNANCE",
"RetainUntilDate": date_obj,
}
s3_client.put_object_retention(bucket, file_name, retention, version_id)
s3_helper.assert_object_lock_mode(
s3_client, bucket, file_name, "GOVERNANCE", date_obj, "OFF"
)
with allure.step(
f"Try to change retention period {retention_period_1}min to object {file_name}"
):
date_obj = datetime.utcnow() + timedelta(minutes=retention_period_1)
retention = {
"Mode": "GOVERNANCE",
"RetainUntilDate": date_obj,
}
with pytest.raises(Exception):
s3_client.put_object_retention(bucket, file_name, retention, version_id)
with allure.step(
f"Try to change retention period {retention_period_1}min to object {file_name}"
):
date_obj = datetime.utcnow() + timedelta(minutes=retention_period_1)
retention = {
"Mode": "GOVERNANCE",
"RetainUntilDate": date_obj,
}
with pytest.raises(Exception):
s3_client.put_object_retention(bucket, file_name, retention, version_id)
with allure.step(f"Put new retention period {retention_period_2}min to object {file_name}"):
date_obj = datetime.utcnow() + timedelta(minutes=retention_period_2)
retention = {
"Mode": "GOVERNANCE",
"RetainUntilDate": date_obj,
}
s3_client.put_object_retention(bucket, file_name, retention, version_id, True)
s3_helper.assert_object_lock_mode(
s3_client, bucket, file_name, "GOVERNANCE", date_obj, "OFF"
)
@allure.title("Test S3: Checking if an Object Cannot Be Locked")
def test_s3_legal_hold(
self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: int
):
file_path = generate_file(simple_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=False)
with allure.step("Put object into bucket"):
obj_version = s3_client.put_object(bucket, file_path)
if version_id:
version_id = obj_version
s3_helper.check_objects_in_bucket(s3_client, bucket, [file_name])
with allure.step(f"Put legal hold to object {file_name}"):
with pytest.raises(Exception):
s3_client.put_object_legal_hold(bucket, file_name, "ON", version_id)
@pytest.mark.s3_gate
class TestS3GateLockingBucket:
@allure.title("Test S3: Bucket Lock")
def test_s3_bucket_lock(self, s3_client: S3ClientWrapper, simple_object_size: int):
file_path = generate_file(simple_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
configuration = {"Rule": {"DefaultRetention": {"Mode": "COMPLIANCE", "Days": 1}}}
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
with allure.step("PutObjectLockConfiguration with ObjectLockEnabled=False"):
s3_client.put_object_lock_configuration(bucket, configuration)
with allure.step("PutObjectLockConfiguration with ObjectLockEnabled=True"):
configuration["ObjectLockEnabled"] = "Enabled"
s3_client.put_object_lock_configuration(bucket, configuration)
with allure.step("GetObjectLockConfiguration"):
config = s3_client.get_object_lock_configuration(bucket)
configuration["Rule"]["DefaultRetention"]["Years"] = 0
assert config == configuration, f"Configurations must be equal {configuration}"
with allure.step("Put object into bucket"):
s3_client.put_object(bucket, file_path)
s3_helper.assert_object_lock_mode(
s3_client, bucket, file_name, "COMPLIANCE", None, "OFF", 1
)

View file

@ -1,152 +0,0 @@
import allure
import pytest
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
from frostfs_testlib.steps.cli.container import list_objects, search_container_by_name
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash, split_file
PART_SIZE = 5 * 1024 * 1024
def pytest_generate_tests(metafunc: pytest.Metafunc):
if "s3_client" in metafunc.fixturenames:
metafunc.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
@pytest.mark.sanity
@pytest.mark.s3_gate
@pytest.mark.s3_gate_multipart
class TestS3GateMultipart(ClusterTestBase):
NO_SUCH_UPLOAD = (
"The upload ID may be invalid, or the upload may have been aborted or completed."
)
@allure.title("Test S3 Object Multipart API")
@pytest.mark.parametrize("bucket", [VersioningStatus.ENABLED], indirect=True)
def test_s3_object_multipart(self, s3_client: S3ClientWrapper, bucket: str):
parts_count = 5
file_name_large = generate_file(PART_SIZE * parts_count) # 5Mb - min part
object_key = s3_helper.object_key_from_file_path(file_name_large)
part_files = split_file(file_name_large, parts_count)
parts = []
with allure.step("Upload first part"):
upload_id = s3_client.create_multipart_upload(bucket, object_key)
uploads = s3_client.list_multipart_uploads(bucket)
etag = s3_client.upload_part(bucket, object_key, upload_id, 1, part_files[0])
parts.append((1, etag))
got_parts = s3_client.list_parts(bucket, object_key, upload_id)
assert len(got_parts) == 1, f"Expected {1} parts, got\n{got_parts}"
with allure.step("Upload last parts"):
for part_id, file_path in enumerate(part_files[1:], start=2):
etag = s3_client.upload_part(bucket, object_key, upload_id, part_id, file_path)
parts.append((part_id, etag))
got_parts = s3_client.list_parts(bucket, object_key, upload_id)
s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts)
assert len(got_parts) == len(
part_files
), f"Expected {parts_count} parts, got\n{got_parts}"
with allure.step("Check upload list is empty"):
uploads = s3_client.list_multipart_uploads(bucket)
assert not uploads, f"Expected there is no uploads in bucket {bucket}"
with allure.step("Check we can get whole object from bucket"):
got_object = s3_client.get_object(bucket, object_key)
assert get_file_hash(got_object) == get_file_hash(file_name_large)
@allure.title("Test S3 Multipart abort")
@pytest.mark.parametrize("bucket", [VersioningStatus.ENABLED], indirect=True)
def test_s3_abort_multipart(
self,
s3_client: S3ClientWrapper,
default_wallet: str,
bucket: str,
simple_object_size: int,
complex_object_size: int,
):
complex_file = generate_file(complex_object_size)
simple_file = generate_file(simple_object_size)
to_upload = [complex_file, complex_file, simple_file]
files_count = len(to_upload)
upload_key = "multipart_abort"
with allure.step(f"Get related container_id for bucket '{bucket}'"):
container_id = search_container_by_name(
default_wallet, bucket, self.shell, self.cluster.default_rpc_endpoint
)
with allure.step("Create multipart upload"):
upload_id = s3_client.create_multipart_upload(bucket, upload_key)
with allure.step(f"Upload {files_count} files to multipart upload"):
for i, file in enumerate(to_upload, 1):
s3_client.upload_part(bucket, upload_key, upload_id, i, file)
with allure.step(f"Check that we have {files_count} files in bucket"):
parts = s3_client.list_parts(bucket, upload_key, upload_id)
assert len(parts) == files_count, f"Expected {files_count} parts, got\n{parts}"
with allure.step(f"Check that we have {files_count} files in container '{container_id}'"):
objects = list_objects(
default_wallet, self.shell, container_id, self.cluster.default_rpc_endpoint
)
assert (
len(objects) == files_count
), f"Expected {files_count} objects in container, got\n{objects}"
with allure.step("Abort multipart upload"):
s3_client.abort_multipart_upload(bucket, upload_key, upload_id)
uploads = s3_client.list_multipart_uploads(bucket)
assert not uploads, f"Expected no uploads in bucket {bucket}"
with allure.step("Check that we have no files in bucket since upload was aborted"):
with pytest.raises(Exception, match=self.NO_SUCH_UPLOAD):
s3_client.list_parts(bucket, upload_key, upload_id)
with allure.step("Check that we have no files in container since upload was aborted"):
objects = list_objects(
default_wallet, self.shell, container_id, self.cluster.default_rpc_endpoint
)
assert len(objects) == 0, f"Expected no objects in container, got\n{objects}"
@allure.title("Test S3 Upload Part Copy")
@pytest.mark.parametrize("bucket", [VersioningStatus.ENABLED], indirect=True)
def test_s3_multipart_copy(self, s3_client: S3ClientWrapper, bucket: str):
parts_count = 3
file_name_large = generate_file(PART_SIZE * parts_count) # 5Mb - min part
object_key = s3_helper.object_key_from_file_path(file_name_large)
part_files = split_file(file_name_large, parts_count)
parts = []
objs = []
with allure.step(f"Put {parts_count} objects in bucket"):
for part in part_files:
s3_client.put_object(bucket, part)
objs.append(s3_helper.object_key_from_file_path(part))
s3_helper.check_objects_in_bucket(s3_client, bucket, objs)
with allure.step("Create multipart upload object"):
upload_id = s3_client.create_multipart_upload(bucket, object_key)
uploads = s3_client.list_multipart_uploads(bucket)
assert uploads, f"Expected there are uploads in bucket {bucket}"
with allure.step("Upload parts to multipart upload"):
for part_id, obj_key in enumerate(objs, start=1):
etag = s3_client.upload_part_copy(
bucket, object_key, upload_id, part_id, f"{bucket}/{obj_key}"
)
parts.append((part_id, etag))
got_parts = s3_client.list_parts(bucket, object_key, upload_id)
with allure.step("Complete multipart upload"):
s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts)
assert len(got_parts) == len(
part_files
), f"Expected {parts_count} parts, got\n{got_parts}"
with allure.step("Check we can get whole object from bucket"):
got_object = s3_client.get_object(bucket, object_key)
assert get_file_hash(got_object) == get_file_hash(file_name_large)

View file

@ -1,997 +0,0 @@
import os
import string
import uuid
from datetime import datetime, timedelta
from random import choices, sample
from typing import Literal
import allure
import pytest
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_PASS
from frostfs_testlib.resources.error_patterns import S3_MALFORMED_XML_REQUEST
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils import wallet_utils
from frostfs_testlib.utils.file_utils import (
concat_files,
generate_file,
generate_file_with_content,
get_file_hash,
)
def pytest_generate_tests(metafunc: pytest.Metafunc):
if "s3_client" in metafunc.fixturenames:
metafunc.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
@pytest.mark.sanity
@pytest.mark.s3_gate
@pytest.mark.s3_gate_object
class TestS3GateObject:
@allure.title("Set object size for current test")
@pytest.fixture
def object_size(self, request: pytest.FixtureRequest) -> int:
object_size = request.param
return object_size
@allure.title("Put objects in a bucket")
@pytest.fixture
def objects_in_bucket(
self,
s3_client: S3ClientWrapper,
bucket: str,
object_size: int,
request: pytest.FixtureRequest,
) -> list[str]:
objects: list[str] = []
objects_count = int(request.param)
with allure.step(
f"Put {objects_count} objects of size '{object_size}' bytes into bucket '{bucket}'"
):
for _ in range(objects_count):
file_path = generate_file(object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
objects.append(file_name)
s3_client.put_object(bucket, file_path)
return objects
@pytest.fixture
def second_wallet_public_key(self):
second_wallet = os.path.join(os.getcwd(), ASSETS_DIR, f"{str(uuid.uuid4())}.json")
wallet_utils.init_wallet(second_wallet, DEFAULT_WALLET_PASS)
public_key = wallet_utils.get_wallet_public_key(second_wallet, DEFAULT_WALLET_PASS)
yield public_key
@allure.title("Test S3: Copy object")
def test_s3_copy_object(
self, s3_client: S3ClientWrapper, two_buckets: tuple[str, str], simple_object_size: int
):
file_path = generate_file(simple_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
bucket_1_objects = [file_name]
bucket_1, bucket_2 = two_buckets
objects_list = s3_client.list_objects(bucket_1)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with allure.step("Put object into one bucket"):
s3_client.put_object(bucket_1, file_path)
with allure.step("Copy one object into the same bucket"):
copy_obj_path = s3_client.copy_object(bucket_1, file_name)
bucket_1_objects.append(copy_obj_path)
s3_helper.check_objects_in_bucket(s3_client, bucket_1, bucket_1_objects)
objects_list = s3_client.list_objects(bucket_2)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with allure.step("Copy object from first bucket into second"):
copy_obj_path_b2 = s3_client.copy_object(bucket_1, file_name, bucket=bucket_2)
s3_helper.check_objects_in_bucket(
s3_client, bucket_1, expected_objects=bucket_1_objects
)
s3_helper.check_objects_in_bucket(
s3_client, bucket_2, expected_objects=[copy_obj_path_b2]
)
with allure.step("Check copied object has the same content"):
got_copied_file_b2 = s3_client.get_object(bucket_2, copy_obj_path_b2)
assert get_file_hash(file_path) == get_file_hash(
got_copied_file_b2
), "Hashes must be the same"
with allure.step("Delete one object from first bucket"):
s3_client.delete_object(bucket_1, file_name)
bucket_1_objects.remove(file_name)
s3_helper.check_objects_in_bucket(
s3_client, bucket_1, expected_objects=bucket_1_objects
)
s3_helper.check_objects_in_bucket(
s3_client, bucket_2, expected_objects=[copy_obj_path_b2]
)
with allure.step("Copy one object into the same bucket"):
with pytest.raises(Exception):
s3_client.copy_object(bucket_1, file_name)
@allure.title("Test S3: Copy version of object")
def test_s3_copy_version_object(
self, s3_client: S3ClientWrapper, two_buckets: tuple[str, str], simple_object_size: int
):
version_1_content = "Version 1"
file_name_simple = generate_file_with_content(simple_object_size, content=version_1_content)
obj_key = os.path.basename(file_name_simple)
bucket_1, bucket_2 = two_buckets
s3_helper.set_bucket_versioning(s3_client, bucket_1, VersioningStatus.ENABLED)
with allure.step("Put object into bucket"):
s3_client.put_object(bucket_1, file_name_simple)
bucket_1_objects = [obj_key]
s3_helper.check_objects_in_bucket(s3_client, bucket_1, [obj_key])
with allure.step("Copy one object into the same bucket"):
copy_obj_path = s3_client.copy_object(bucket_1, obj_key)
bucket_1_objects.append(copy_obj_path)
s3_helper.check_objects_in_bucket(s3_client, bucket_1, bucket_1_objects)
s3_helper.set_bucket_versioning(s3_client, bucket_2, VersioningStatus.ENABLED)
with allure.step("Copy object from first bucket into second"):
copy_obj_path_b2 = s3_client.copy_object(bucket_1, obj_key, bucket=bucket_2)
s3_helper.check_objects_in_bucket(
s3_client, bucket_1, expected_objects=bucket_1_objects
)
s3_helper.check_objects_in_bucket(
s3_client, bucket_2, expected_objects=[copy_obj_path_b2]
)
with allure.step("Delete one object from first bucket and check object in bucket"):
s3_client.delete_object(bucket_1, obj_key)
bucket_1_objects.remove(obj_key)
s3_helper.check_objects_in_bucket(
s3_client, bucket_1, expected_objects=bucket_1_objects
)
with allure.step("Copy one object into the same bucket"):
with pytest.raises(Exception):
s3_client.copy_object(bucket_1, obj_key)
@allure.title("Test S3: Checking copy with acl")
def test_s3_copy_acl(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int):
version_1_content = "Version 1"
file_name_simple = generate_file_with_content(simple_object_size, content=version_1_content)
obj_key = os.path.basename(file_name_simple)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with allure.step("Put several versions of object into bucket"):
s3_client.put_object(bucket, file_name_simple)
s3_helper.check_objects_in_bucket(s3_client, bucket, [obj_key])
with allure.step("Copy object and check acl attribute"):
copy_obj_path = s3_client.copy_object(bucket, obj_key, acl="public-read-write")
obj_acl = s3_client.get_object_acl(bucket, copy_obj_path)
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="CanonicalUser")
@allure.title("Test S3: Copy object with metadata")
def test_s3_copy_metadate(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int
):
object_metadata = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
file_path = generate_file(simple_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
bucket_1_objects = [file_name]
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with allure.step("Put object into bucket"):
s3_client.put_object(bucket, file_path, metadata=object_metadata)
bucket_1_objects = [file_name]
s3_helper.check_objects_in_bucket(s3_client, bucket, bucket_1_objects)
with allure.step("Copy one object"):
copy_obj_path = s3_client.copy_object(bucket, file_name)
bucket_1_objects.append(copy_obj_path)
s3_helper.check_objects_in_bucket(s3_client, bucket, bucket_1_objects)
obj_head = s3_client.head_object(bucket, copy_obj_path)
assert (
obj_head.get("Metadata") == object_metadata
), f"Metadata must be {object_metadata}"
with allure.step("Copy one object with metadata"):
copy_obj_path = s3_client.copy_object(bucket, file_name, metadata_directive="COPY")
bucket_1_objects.append(copy_obj_path)
obj_head = s3_client.head_object(bucket, copy_obj_path)
assert (
obj_head.get("Metadata") == object_metadata
), f"Metadata must be {object_metadata}"
with allure.step("Copy one object with new metadata"):
object_metadata_1 = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
copy_obj_path = s3_client.copy_object(
bucket,
file_name,
metadata_directive="REPLACE",
metadata=object_metadata_1,
)
bucket_1_objects.append(copy_obj_path)
obj_head = s3_client.head_object(bucket, copy_obj_path)
assert (
obj_head.get("Metadata") == object_metadata_1
), f"Metadata must be {object_metadata_1}"
@allure.title("Test S3: Copy object with tagging")
def test_s3_copy_tagging(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int
):
object_tagging = [(f"{uuid.uuid4()}", f"{uuid.uuid4()}")]
file_path = generate_file(simple_object_size)
file_name_simple = s3_helper.object_key_from_file_path(file_path)
bucket_1_objects = [file_name_simple]
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with allure.step("Put several versions of object into bucket"):
s3_client.put_object(bucket, file_path)
s3_client.put_object_tagging(bucket, file_name_simple, tags=object_tagging)
bucket_1_objects = [file_name_simple]
s3_helper.check_objects_in_bucket(s3_client, bucket, bucket_1_objects)
with allure.step("Copy one object without tag"):
copy_obj_path = s3_client.copy_object(bucket, file_name_simple)
got_tags = s3_client.get_object_tagging(bucket, copy_obj_path)
assert got_tags, f"Expected tags, got {got_tags}"
expected_tags = [{"Key": key, "Value": value} for key, value in object_tagging]
for tag in expected_tags:
assert tag in got_tags, f"Expected tag {tag} in {got_tags}"
with allure.step("Copy one object with tag"):
copy_obj_path_1 = s3_client.copy_object(
bucket, file_name_simple, tagging_directive="COPY"
)
got_tags = s3_client.get_object_tagging(bucket, copy_obj_path_1)
assert got_tags, f"Expected tags, got {got_tags}"
expected_tags = [{"Key": key, "Value": value} for key, value in object_tagging]
for tag in expected_tags:
assert tag in got_tags, f"Expected tag {tag} in {got_tags}"
with allure.step("Copy one object with new tag"):
tag_key = "tag1"
tag_value = uuid.uuid4()
new_tag = f"{tag_key}={tag_value}"
copy_obj_path = s3_client.copy_object(
bucket,
file_name_simple,
tagging_directive="REPLACE",
tagging=new_tag,
)
got_tags = s3_client.get_object_tagging(bucket, copy_obj_path)
assert got_tags, f"Expected tags, got {got_tags}"
expected_tags = [{"Key": tag_key, "Value": str(tag_value)}]
for tag in expected_tags:
assert tag in got_tags, f"Expected tag {tag} in {got_tags}"
@allure.title("Test S3: Delete version of object")
def test_s3_delete_versioning(
self,
s3_client: S3ClientWrapper,
bucket: str,
simple_object_size: int,
complex_object_size: int,
):
version_1_content = "Version 1"
version_2_content = "Version 2"
file_name_simple = generate_file_with_content(simple_object_size, content=version_1_content)
obj_key = os.path.basename(file_name_simple)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_client.put_object(bucket, file_name_simple)
file_name_1 = generate_file_with_content(
simple_object_size, file_path=file_name_simple, content=version_2_content
)
version_id_2 = s3_client.put_object(bucket, file_name_1)
with allure.step("Check bucket shows all versions"):
versions = s3_client.list_objects_versions(bucket)
obj_versions = {
version.get("VersionId") for version in versions if version.get("Key") == obj_key
}
assert obj_versions == {
version_id_1,
version_id_2,
}, f"Object should have versions: {version_id_1, version_id_2}"
with allure.step("Delete 1 version of object"):
delete_obj = s3_client.delete_object(bucket, obj_key, version_id=version_id_1)
versions = s3_client.list_objects_versions(bucket)
obj_versions = {
version.get("VersionId") for version in versions if version.get("Key") == obj_key
}
assert obj_versions == {version_id_2}, f"Object should have versions: {version_id_2}"
assert "DeleteMarker" not in delete_obj.keys(), "Delete markers should not be created"
with allure.step("Delete second version of object"):
delete_obj = s3_client.delete_object(bucket, obj_key, version_id=version_id_2)
versions = s3_client.list_objects_versions(bucket)
obj_versions = {
version.get("VersionId") for version in versions if version.get("Key") == obj_key
}
assert not obj_versions, "Expected object not found"
assert "DeleteMarker" not in delete_obj.keys(), "Delete markers should not be created"
with allure.step("Put new object into bucket"):
file_name_simple = generate_file(complex_object_size)
obj_key = os.path.basename(file_name_simple)
s3_client.put_object(bucket, file_name_simple)
with allure.step("Delete last object"):
delete_obj = s3_client.delete_object(bucket, obj_key)
versions = s3_client.list_objects_versions(bucket, True)
assert versions.get("DeleteMarkers", None), "Expected delete Marker"
assert "DeleteMarker" in delete_obj.keys(), "Expected delete Marker"
@allure.title("Test S3: bulk delete version of object")
def test_s3_bulk_delete_versioning(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int
):
version_1_content = "Version 1"
version_2_content = "Version 2"
version_3_content = "Version 3"
version_4_content = "Version 4"
file_name_1 = generate_file_with_content(simple_object_size, content=version_1_content)
obj_key = os.path.basename(file_name_1)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_client.put_object(bucket, file_name_1)
file_name_2 = generate_file_with_content(
simple_object_size, file_path=file_name_1, content=version_2_content
)
version_id_2 = s3_client.put_object(bucket, file_name_2)
file_name_3 = generate_file_with_content(
simple_object_size, file_path=file_name_1, content=version_3_content
)
version_id_3 = s3_client.put_object(bucket, file_name_3)
file_name_4 = generate_file_with_content(
simple_object_size, file_path=file_name_1, content=version_4_content
)
version_id_4 = s3_client.put_object(bucket, file_name_4)
version_ids = {version_id_1, version_id_2, version_id_3, version_id_4}
with allure.step("Check bucket shows all versions"):
versions = s3_client.list_objects_versions(bucket)
obj_versions = {
version.get("VersionId") for version in versions if version.get("Key") == obj_key
}
assert obj_versions == version_ids, f"Object should have versions: {version_ids}"
with allure.step("Delete two objects from bucket one by one"):
version_to_delete_b1 = sample(
[version_id_1, version_id_2, version_id_3, version_id_4], k=2
)
version_to_save = list(set(version_ids) - set(version_to_delete_b1))
for ver in version_to_delete_b1:
s3_client.delete_object(bucket, obj_key, ver)
with allure.step("Check bucket shows all versions"):
versions = s3_client.list_objects_versions(bucket)
obj_versions = [
version.get("VersionId") for version in versions if version.get("Key") == obj_key
]
assert (
obj_versions.sort() == version_to_save.sort()
), f"Object should have versions: {version_to_save}"
@allure.title("Test S3: Get versions of object")
def test_s3_get_versioning(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int
):
version_1_content = "Version 1"
version_2_content = "Version 2"
file_name_simple = generate_file_with_content(simple_object_size, content=version_1_content)
obj_key = os.path.basename(file_name_simple)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_client.put_object(bucket, file_name_simple)
file_name_1 = generate_file_with_content(
simple_object_size, file_path=file_name_simple, content=version_2_content
)
version_id_2 = s3_client.put_object(bucket, file_name_1)
with allure.step("Get first version of object"):
object_1 = s3_client.get_object(bucket, obj_key, version_id_1, full_output=True)
assert (
object_1.get("VersionId") == version_id_1
), f"Get object with version {version_id_1}"
with allure.step("Get second version of object"):
object_2 = s3_client.get_object(bucket, obj_key, version_id_2, full_output=True)
assert (
object_2.get("VersionId") == version_id_2
), f"Get object with version {version_id_2}"
with allure.step("Get object"):
object_3 = s3_client.get_object(bucket, obj_key, full_output=True)
assert (
object_3.get("VersionId") == version_id_2
), f"Get object with version {version_id_2}"
@allure.title("Test S3: Get range")
def test_s3_get_range(
self,
s3_client: S3ClientWrapper,
bucket: str,
complex_object_size: int,
simple_object_size: int,
):
file_path = generate_file(complex_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
file_hash = get_file_hash(file_path)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_client.put_object(bucket, file_path)
file_name_1 = generate_file_with_content(simple_object_size, file_path=file_path)
version_id_2 = s3_client.put_object(bucket, file_name_1)
with allure.step("Get first version of object"):
object_1_part_1 = s3_client.get_object(
bucket,
file_name,
version_id_1,
object_range=[0, int(complex_object_size / 3)],
)
object_1_part_2 = s3_client.get_object(
bucket,
file_name,
version_id_1,
object_range=[int(complex_object_size / 3) + 1, 2 * int(complex_object_size / 3)],
)
object_1_part_3 = s3_client.get_object(
bucket,
file_name,
version_id_1,
object_range=[2 * int(complex_object_size / 3) + 1, complex_object_size],
)
con_file = concat_files([object_1_part_1, object_1_part_2, object_1_part_3])
assert get_file_hash(con_file) == file_hash, "Hashes must be the same"
with allure.step("Get second version of object"):
object_2_part_1 = s3_client.get_object(
bucket,
file_name,
version_id_2,
object_range=[0, int(simple_object_size / 3)],
)
object_2_part_2 = s3_client.get_object(
bucket,
file_name,
version_id_2,
object_range=[int(simple_object_size / 3) + 1, 2 * int(simple_object_size / 3)],
)
object_2_part_3 = s3_client.get_object(
bucket,
file_name,
version_id_2,
object_range=[2 * int(simple_object_size / 3) + 1, simple_object_size],
)
con_file_1 = concat_files([object_2_part_1, object_2_part_2, object_2_part_3])
assert get_file_hash(con_file_1) == get_file_hash(
file_name_1
), "Hashes must be the same"
with allure.step("Get object"):
object_3_part_1 = s3_client.get_object(
bucket, file_name, object_range=[0, int(simple_object_size / 3)]
)
object_3_part_2 = s3_client.get_object(
bucket,
file_name,
object_range=[int(simple_object_size / 3) + 1, 2 * int(simple_object_size / 3)],
)
object_3_part_3 = s3_client.get_object(
bucket,
file_name,
object_range=[2 * int(simple_object_size / 3) + 1, simple_object_size],
)
con_file = concat_files([object_3_part_1, object_3_part_2, object_3_part_3])
assert get_file_hash(con_file) == get_file_hash(file_name_1), "Hashes must be the same"
def copy_extend_list(self, original_list: list[str], n: int) -> list[str]:
"""Extend the list with own elements up to n elements"""
multiplier = n // len(original_list)
result_list = original_list.copy()
result_list = result_list * multiplier
for i in range(n - len(result_list)):
result_list.append(result_list[i])
return result_list
@allure.title("Test S3: Bulk deletion should be limited to 1000 objects")
@pytest.mark.parametrize(
"objects_in_bucket, object_size",
[(3, 10)],
indirect=True,
)
def test_s3_bulk_deletion_limit(
self, s3_client: S3ClientWrapper, bucket: str, objects_in_bucket: list[str]
):
# Extend deletion list to 1001 elements with same keys for test speed
objects_to_delete = self.copy_extend_list(objects_in_bucket, 1001)
with allure.step("Delete 1001 objects and expect error"):
with pytest.raises(Exception, match=S3_MALFORMED_XML_REQUEST):
s3_client.delete_objects(bucket, objects_to_delete)
with allure.step("Delete 1000 objects without error"):
with expect_not_raises():
s3_client.delete_objects(bucket, objects_to_delete[:1000])
@allure.title("Test S3: Copy object with metadata")
@pytest.mark.smoke
def test_s3_head_object(
self,
s3_client: S3ClientWrapper,
bucket: str,
complex_object_size: int,
simple_object_size: int,
):
object_metadata = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
file_path = generate_file(complex_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_client.put_object(bucket, file_path, metadata=object_metadata)
file_name_1 = generate_file_with_content(simple_object_size, file_path=file_path)
version_id_2 = s3_client.put_object(bucket, file_name_1)
with allure.step("Get head of first version of object"):
response = s3_client.head_object(bucket, file_name)
assert "LastModified" in response, "Expected LastModified field"
assert "ETag" in response, "Expected ETag field"
assert response.get("Metadata") == {}, "Expected Metadata empty"
assert (
response.get("VersionId") == version_id_2
), f"Expected VersionId is {version_id_2}"
assert response.get("ContentLength") != 0, "Expected ContentLength is not zero"
with allure.step("Get head ob first version of object"):
response = s3_client.head_object(bucket, file_name, version_id=version_id_1)
assert "LastModified" in response, "Expected LastModified field"
assert "ETag" in response, "Expected ETag field"
assert (
response.get("Metadata") == object_metadata
), f"Expected Metadata is {object_metadata}"
assert (
response.get("VersionId") == version_id_1
), f"Expected VersionId is {version_id_1}"
assert response.get("ContentLength") != 0, "Expected ContentLength is not zero"
@allure.title("Test S3: list of object with versions")
@pytest.mark.parametrize("list_type", ["v1", "v2"])
def test_s3_list_object(
self, s3_client: S3ClientWrapper, list_type: str, bucket: str, complex_object_size: int
):
file_path_1 = generate_file(complex_object_size)
file_name = s3_helper.object_key_from_file_path(file_path_1)
file_path_2 = generate_file(complex_object_size)
file_name_2 = s3_helper.object_key_from_file_path(file_path_2)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with allure.step("Put several versions of object into bucket"):
s3_client.put_object(bucket, file_path_1)
s3_client.put_object(bucket, file_path_2)
with allure.step("Get list of object"):
if list_type == "v1":
list_obj = s3_client.list_objects(bucket)
elif list_type == "v2":
list_obj = s3_client.list_objects_v2(bucket)
assert len(list_obj) == 2, "bucket should have 2 objects"
assert (
list_obj.sort() == [file_name, file_name_2].sort()
), f"bucket should have object key {file_name, file_name_2}"
with allure.step("Delete object"):
delete_obj = s3_client.delete_object(bucket, file_name)
if list_type == "v1":
list_obj_1 = s3_client.list_objects(bucket, full_output=True)
elif list_type == "v2":
list_obj_1 = s3_client.list_objects_v2(bucket, full_output=True)
contents = list_obj_1.get("Contents", [])
assert len(contents) == 1, "bucket should have only 1 object"
assert (
contents[0].get("Key") == file_name_2
), f"bucket should have object key {file_name_2}"
assert "DeleteMarker" in delete_obj.keys(), "Expected delete Marker"
@allure.title("Test S3: put object")
def test_s3_put_object(
self,
s3_client: S3ClientWrapper,
bucket: str,
complex_object_size: int,
simple_object_size: int,
):
file_path_1 = generate_file(complex_object_size)
file_name = s3_helper.object_key_from_file_path(file_path_1)
object_1_metadata = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
tag_key_1 = "tag1"
tag_value_1 = uuid.uuid4()
tag_1 = f"{tag_key_1}={tag_value_1}"
object_2_metadata = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
tag_key_2 = "tag2"
tag_value_2 = uuid.uuid4()
tag_2 = f"{tag_key_2}={tag_value_2}"
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.SUSPENDED)
with allure.step("Put first object into bucket"):
s3_client.put_object(bucket, file_path_1, metadata=object_1_metadata, tagging=tag_1)
obj_head = s3_client.head_object(bucket, file_name)
assert obj_head.get("Metadata") == object_1_metadata, "Metadata must be the same"
got_tags = s3_client.get_object_tagging(bucket, file_name)
assert got_tags, f"Expected tags, got {got_tags}"
assert got_tags == [
{"Key": tag_key_1, "Value": str(tag_value_1)}
], "Tags must be the same"
with allure.step("Rewrite file into bucket"):
file_path_2 = generate_file_with_content(simple_object_size, file_path=file_path_1)
s3_client.put_object(bucket, file_path_2, metadata=object_2_metadata, tagging=tag_2)
obj_head = s3_client.head_object(bucket, file_name)
assert obj_head.get("Metadata") == object_2_metadata, "Metadata must be the same"
got_tags_1 = s3_client.get_object_tagging(bucket, file_name)
assert got_tags_1, f"Expected tags, got {got_tags_1}"
assert got_tags_1 == [
{"Key": tag_key_2, "Value": str(tag_value_2)}
], "Tags must be the same"
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
file_path_3 = generate_file(complex_object_size)
file_hash = get_file_hash(file_path_3)
file_name_3 = s3_helper.object_key_from_file_path(file_path_3)
object_3_metadata = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
tag_key_3 = "tag3"
tag_value_3 = uuid.uuid4()
tag_3 = f"{tag_key_3}={tag_value_3}"
with allure.step("Put third object into bucket"):
version_id_1 = s3_client.put_object(
bucket, file_path_3, metadata=object_3_metadata, tagging=tag_3
)
obj_head_3 = s3_client.head_object(bucket, file_name_3)
assert obj_head_3.get("Metadata") == object_3_metadata, "Matadata must be the same"
got_tags_3 = s3_client.get_object_tagging(bucket, file_name_3)
assert got_tags_3, f"Expected tags, got {got_tags_3}"
assert got_tags_3 == [
{"Key": tag_key_3, "Value": str(tag_value_3)}
], "Tags must be the same"
with allure.step("Put new version of file into bucket"):
file_path_4 = generate_file_with_content(simple_object_size, file_path=file_path_3)
version_id_2 = s3_client.put_object(bucket, file_path_4)
versions = s3_client.list_objects_versions(bucket)
obj_versions = {
version.get("VersionId")
for version in versions
if version.get("Key") == file_name_3
}
assert obj_versions == {
version_id_1,
version_id_2,
}, f"Object should have versions: {version_id_1, version_id_2}"
got_tags_4 = s3_client.get_object_tagging(bucket, file_name_3)
assert not got_tags_4, "No tags expected"
with allure.step("Get object"):
object_3 = s3_client.get_object(bucket, file_name_3, full_output=True)
assert (
object_3.get("VersionId") == version_id_2
), f"get object with version {version_id_2}"
object_3 = s3_client.get_object(bucket, file_name_3)
assert get_file_hash(file_path_4) == get_file_hash(object_3), "Hashes must be the same"
with allure.step("Get first version of object"):
object_4 = s3_client.get_object(bucket, file_name_3, version_id_1, full_output=True)
assert (
object_4.get("VersionId") == version_id_1
), f"get object with version {version_id_1}"
object_4 = s3_client.get_object(bucket, file_name_3, version_id_1)
assert file_hash == get_file_hash(object_4), "Hashes must be the same"
obj_head_3 = s3_client.head_object(bucket, file_name_3, version_id_1)
assert obj_head_3.get("Metadata") == object_3_metadata, "Metadata must be the same"
got_tags_3 = s3_client.get_object_tagging(bucket, file_name_3, version_id_1)
assert got_tags_3, f"Expected tags, got {got_tags_3}"
assert got_tags_3 == [
{"Key": tag_key_3, "Value": str(tag_value_3)}
], "Tags must be the same"
@allure.title("Test S3: put object with ACL")
@pytest.mark.parametrize("bucket_versioning", ["ENABLED", "SUSPENDED"])
def test_s3_put_object_acl(
self,
s3_client: S3ClientWrapper,
bucket_versioning: Literal["ENABLED", "SUSPENDED"],
bucket: str,
complex_object_size: int,
simple_object_size: int,
second_wallet_public_key: str,
):
file_path_1 = generate_file(complex_object_size)
file_name = s3_helper.object_key_from_file_path(file_path_1)
if bucket_versioning == "ENABLED":
status = VersioningStatus.ENABLED
elif bucket_versioning == "SUSPENDED":
status = VersioningStatus.SUSPENDED
s3_helper.set_bucket_versioning(s3_client, bucket, status)
with allure.step("Put object with acl private"):
s3_client.put_object(bucket, file_path_1, acl="private")
obj_acl = s3_client.get_object_acl(bucket, file_name)
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="CanonicalUser")
object_1 = s3_client.get_object(bucket, file_name)
assert get_file_hash(file_path_1) == get_file_hash(object_1), "Hashes must be the same"
with allure.step("Put object with acl public-read"):
file_path_2 = generate_file_with_content(simple_object_size, file_path=file_path_1)
s3_client.put_object(bucket, file_path_2, acl="public-read")
obj_acl = s3_client.get_object_acl(bucket, file_name)
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
object_2 = s3_client.get_object(bucket, file_name)
assert get_file_hash(file_path_2) == get_file_hash(object_2), "Hashes must be the same"
with allure.step("Put object with acl public-read-write"):
file_path_3 = generate_file_with_content(simple_object_size, file_path=file_path_1)
s3_client.put_object(bucket, file_path_3, acl="public-read-write")
obj_acl = s3_client.get_object_acl(bucket, file_name)
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
object_3 = s3_client.get_object(bucket, file_name)
assert get_file_hash(file_path_3) == get_file_hash(object_3), "Hashes must be the same"
with allure.step("Put object with acl authenticated-read"):
file_path_4 = generate_file_with_content(simple_object_size, file_path=file_path_1)
s3_client.put_object(bucket, file_path_4, acl="authenticated-read")
obj_acl = s3_client.get_object_acl(bucket, file_name)
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
object_4 = s3_client.get_object(bucket, file_name)
assert get_file_hash(file_path_4) == get_file_hash(object_4), "Hashes must be the same"
file_path_5 = generate_file(complex_object_size)
file_name_5 = s3_helper.object_key_from_file_path(file_path_5)
with allure.step("Put object with --grant-full-control id=mycanonicaluserid"):
generate_file_with_content(simple_object_size, file_path=file_path_5)
s3_client.put_object(
bucket,
file_path_5,
grant_full_control=f"id={second_wallet_public_key}",
)
obj_acl = s3_client.get_object_acl(bucket, file_name_5)
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="CanonicalUser")
object_5 = s3_client.get_object(bucket, file_name_5)
assert get_file_hash(file_path_5) == get_file_hash(object_5), "Hashes must be the same"
with allure.step(
"Put object with --grant-read uri=http://acs.amazonaws.com/groups/global/AllUsers"
):
generate_file_with_content(simple_object_size, file_path=file_path_5)
s3_client.put_object(
bucket,
file_path_5,
grant_read="uri=http://acs.amazonaws.com/groups/global/AllUsers",
)
obj_acl = s3_client.get_object_acl(bucket, file_name_5)
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
object_6 = s3_client.get_object(bucket, file_name_5)
assert get_file_hash(file_path_5) == get_file_hash(object_6), "Hashes must be the same"
@allure.title("Test S3: put object with lock-mode")
def test_s3_put_object_lock_mode(
self, s3_client: S3ClientWrapper, complex_object_size: int, simple_object_size: int
):
file_path_1 = generate_file(complex_object_size)
file_name = s3_helper.object_key_from_file_path(file_path_1)
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with allure.step(
"Put object with lock-mode GOVERNANCE lock-retain-until-date +1day, lock-legal-hold-status"
):
date_obj = datetime.utcnow() + timedelta(days=1)
s3_client.put_object(
bucket,
file_path_1,
object_lock_mode="GOVERNANCE",
object_lock_retain_until_date=date_obj.strftime("%Y-%m-%dT%H:%M:%S"),
object_lock_legal_hold_status="OFF",
)
s3_helper.assert_object_lock_mode(
s3_client, bucket, file_name, "GOVERNANCE", date_obj, "OFF"
)
with allure.step(
"Put new version of object with [--object-lock-mode COMPLIANCE] и [--object-lock-retain-until-date +3days]"
):
date_obj = datetime.utcnow() + timedelta(days=2)
generate_file_with_content(simple_object_size, file_path=file_path_1)
s3_client.put_object(
bucket,
file_path_1,
object_lock_mode="COMPLIANCE",
object_lock_retain_until_date=date_obj,
)
s3_helper.assert_object_lock_mode(
s3_client, bucket, file_name, "COMPLIANCE", date_obj, "OFF"
)
with allure.step(
"Put new version of object with [--object-lock-mode COMPLIANCE] и [--object-lock-retain-until-date +2days]"
):
date_obj = datetime.utcnow() + timedelta(days=3)
generate_file_with_content(simple_object_size, file_path=file_path_1)
s3_client.put_object(
bucket,
file_path_1,
object_lock_mode="COMPLIANCE",
object_lock_retain_until_date=date_obj,
object_lock_legal_hold_status="ON",
)
s3_helper.assert_object_lock_mode(
s3_client, bucket, file_name, "COMPLIANCE", date_obj, "ON"
)
with allure.step("Put object with lock-mode"):
with pytest.raises(
Exception,
match=r".*must both be supplied*",
):
# x-amz-object-lock-retain-until-date and x-amz-object-lock-mode must both be supplied
s3_client.put_object(bucket, file_path_1, object_lock_mode="COMPLIANCE")
with allure.step("Put object with lock-mode and past date"):
date_obj = datetime.utcnow() - timedelta(days=3)
with pytest.raises(
Exception,
match=r".*until date must be in the future*",
):
# The retain until date must be in the future
s3_client.put_object(
bucket,
file_path_1,
object_lock_mode="COMPLIANCE",
object_lock_retain_until_date=date_obj,
)
@allure.title("Test S3 Sync directory")
@pytest.mark.parametrize("sync_type", ["sync", "cp"])
def test_s3_sync_dir(
self,
s3_client: S3ClientWrapper,
sync_type: Literal["sync", "cp"],
bucket: str,
simple_object_size: int,
):
file_path_1 = os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_1")
file_path_2 = os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_2")
object_metadata = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
key_to_path = {"test_file_1": file_path_1, "test_file_2": file_path_2}
if not isinstance(s3_client, AwsCliClient):
pytest.skip("This test is not supported with boto3 client")
generate_file_with_content(simple_object_size, file_path=file_path_1)
generate_file_with_content(simple_object_size, file_path=file_path_2)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
# TODO: return ACL, when https://github.com/nspcc-dev/neofs-s3-gw/issues/685 will be closed
if sync_type == "sync":
s3_client.sync(
bucket=bucket,
dir_path=os.path.dirname(file_path_1),
# acl="public-read-write",
metadata=object_metadata,
)
elif sync_type == "cp":
s3_client.cp(
bucket=bucket,
dir_path=os.path.dirname(file_path_1),
# acl="public-read-write",
metadata=object_metadata,
)
with allure.step("Check objects are synced"):
objects = s3_client.list_objects(bucket)
assert set(key_to_path.keys()) == set(
objects
), f"Expected all abjects saved. Got {objects}"
with allure.step("Check these are the same objects"):
for obj_key in objects:
got_object = s3_client.get_object(bucket, obj_key)
assert get_file_hash(got_object) == get_file_hash(
key_to_path.get(obj_key)
), "Expected hashes are the same"
obj_head = s3_client.head_object(bucket, obj_key)
assert (
obj_head.get("Metadata") == object_metadata
), f"Metadata of object is {object_metadata}"
# Uncomment after https://github.com/nspcc-dev/neofs-s3-gw/issues/685 is solved
# obj_acl = s3_client.get_object_acl(bucket, obj_key)
# s3_helper.assert_s3_acl(acl_grants = obj_acl, permitted_users = "AllUsers")
@allure.title("Test S3 Put 10 nested level object")
def test_s3_put_10_folder(
self, s3_client: S3ClientWrapper, bucket: str, temp_directory, simple_object_size: int
):
path = "/".join(["".join(choices(string.ascii_letters, k=3)) for _ in range(10)])
file_path_1 = os.path.join(temp_directory, path, "test_file_1")
generate_file_with_content(simple_object_size, file_path=file_path_1)
file_name = s3_helper.object_key_from_file_path(file_path_1)
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with allure.step("Put object"):
s3_client.put_object(bucket, file_path_1)
s3_helper.check_objects_in_bucket(s3_client, bucket, [file_name])
@allure.title("Test S3: Delete non-existing object from empty bucket")
def test_s3_delete_non_existing_object(self, s3_client: S3ClientWrapper, bucket: str):
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
objects_list = s3_client.list_objects_versions(bucket)
with allure.step("Check that bucket is empty"):
assert not objects_list, f"Expected empty bucket, got {objects_list}"
obj_key = "fake_object_key"
with allure.step("Delete non-existing object"):
delete_obj = s3_client.delete_object(bucket, obj_key)
# there should be no objects or delete markers in the bucket
assert "DeleteMarker" not in delete_obj.keys(), "Delete markers should not be created"
objects_list = s3_client.list_objects_versions(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
@allure.title("Test S3: Delete the same object twice")
def test_s3_delete_twice(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int
):
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
objects_list = s3_client.list_objects(bucket)
with allure.step("Check that bucket is empty"):
assert not objects_list, f"Expected empty bucket, got {objects_list}"
file_path = generate_file(simple_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
with allure.step("Put object into one bucket"):
s3_client.put_object(bucket, file_path)
with allure.step("Delete the object from the bucket"):
delete_object = s3_client.delete_object(bucket, file_name)
versions = s3_client.list_objects_versions(bucket)
obj_versions = {
version.get("VersionId") for version in versions if version.get("Key") == file_name
}
assert obj_versions, f"Object versions were not found {objects_list}"
assert "DeleteMarker" in delete_object.keys(), "Delete markers not found"
with allure.step("Delete the object from the bucket again"):
delete_object_2nd_attempt = s3_client.delete_object(bucket, file_name)
versions_2nd_attempt = s3_client.list_objects_versions(bucket)
assert (
delete_object.keys() == delete_object_2nd_attempt.keys()
), "Delete markers are not the same"
# check that nothing was changed
# checking here not VersionId only, but all data (for example LastModified)
assert versions == versions_2nd_attempt, "Versions are not the same"

View file

@ -1,166 +0,0 @@
import os
import allure
import pytest
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
from frostfs_testlib.steps.cli.container import search_container_by_name
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.steps.storage_policy import get_simple_object_copies
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils.file_utils import generate_file
def pytest_generate_tests(metafunc: pytest.Metafunc):
policy = f"{os.getcwd()}/pytest_tests/resources/files/policy.json"
if "s3_client" in metafunc.fixturenames:
metafunc.parametrize(
"s3_client, s3_policy",
[(AwsCliClient, policy), (Boto3ClientWrapper, policy)],
indirect=True,
ids=["aws cli", "boto3"],
)
@pytest.mark.s3_gate
class TestS3GatePolicy(ClusterTestBase):
@allure.title("Test S3: Verify bucket creation with retention policy applied")
def test_s3_bucket_location(
self, default_wallet: str, s3_client: S3ClientWrapper, simple_object_size: int
):
file_path_1 = generate_file(simple_object_size)
file_name_1 = s3_helper.object_key_from_file_path(file_path_1)
file_path_2 = generate_file(simple_object_size)
file_name_2 = s3_helper.object_key_from_file_path(file_path_2)
with allure.step("Create two buckets with different bucket configuration"):
bucket_1 = s3_client.create_bucket(location_constraint="complex")
s3_helper.set_bucket_versioning(s3_client, bucket_1, VersioningStatus.ENABLED)
bucket_2 = s3_client.create_bucket(location_constraint="rep-3")
s3_helper.set_bucket_versioning(s3_client, bucket_2, VersioningStatus.ENABLED)
list_buckets = s3_client.list_buckets()
assert (
bucket_1 in list_buckets and bucket_2 in list_buckets
), f"Expected two buckets {bucket_1, bucket_2}, got {list_buckets}"
with allure.step("Check head buckets"):
with expect_not_raises():
s3_client.head_bucket(bucket_1)
s3_client.head_bucket(bucket_2)
with allure.step("Put objects into buckets"):
version_id_1 = s3_client.put_object(bucket_1, file_path_1)
version_id_2 = s3_client.put_object(bucket_2, file_path_2)
s3_helper.check_objects_in_bucket(s3_client, bucket_1, [file_name_1])
s3_helper.check_objects_in_bucket(s3_client, bucket_2, [file_name_2])
with allure.step("Check bucket location"):
bucket_loc_1 = s3_client.get_bucket_location(bucket_1)
bucket_loc_2 = s3_client.get_bucket_location(bucket_2)
assert bucket_loc_1 == "complex"
assert bucket_loc_2 == "rep-3"
with allure.step("Check object policy"):
cid_1 = search_container_by_name(
default_wallet,
bucket_1,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
copies_1 = get_simple_object_copies(
wallet=default_wallet,
cid=cid_1,
oid=version_id_1,
shell=self.shell,
nodes=self.cluster.storage_nodes,
)
assert copies_1 == 1
cid_2 = search_container_by_name(
default_wallet,
bucket_2,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
copies_2 = get_simple_object_copies(
wallet=default_wallet,
cid=cid_2,
oid=version_id_2,
shell=self.shell,
nodes=self.cluster.storage_nodes,
)
assert copies_2 == 3
@allure.title("Test S3: bucket with unexisting location constraint")
def test_s3_bucket_wrong_location(self, s3_client: S3ClientWrapper):
with allure.step("Create bucket with unenxisting location constraint policy"):
with pytest.raises(Exception):
s3_client.create_bucket(location_constraint="UNEXISTING LOCATION CONSTRAINT")
@allure.title("Test S3: bucket policy ")
def test_s3_bucket_policy(self, s3_client: S3ClientWrapper):
with allure.step("Create bucket with default policy"):
bucket = s3_client.create_bucket()
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with allure.step("GetBucketPolicy"):
s3_client.get_bucket_policy(bucket)
with allure.step("Put new policy"):
custom_policy = f"file://{os.getcwd()}/pytest_tests/resources/files/bucket_policy.json"
custom_policy = {
"Version": "2008-10-17",
"Id": "aaaa-bbbb-cccc-dddd",
"Statement": [
{
"Sid": "AddPerm",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": ["s3:GetObject"],
"Resource": [f"arn:aws:s3:::{bucket}/*"],
}
],
}
s3_client.put_bucket_policy(bucket, custom_policy)
with allure.step("GetBucketPolicy"):
policy_1 = s3_client.get_bucket_policy(bucket)
print(policy_1)
@allure.title("Test S3: bucket CORS")
def test_s3_cors(self, s3_client: S3ClientWrapper):
with allure.step("Create bucket without cors"):
bucket = s3_client.create_bucket()
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with pytest.raises(Exception):
bucket_cors = s3_client.get_bucket_cors(bucket)
with allure.step("Put bucket cors"):
cors = {
"CORSRules": [
{
"AllowedOrigins": ["http://www.example.com"],
"AllowedHeaders": ["*"],
"AllowedMethods": ["PUT", "POST", "DELETE"],
"MaxAgeSeconds": 3000,
"ExposeHeaders": ["x-amz-server-side-encryption"],
},
{
"AllowedOrigins": ["*"],
"AllowedHeaders": ["Authorization"],
"AllowedMethods": ["GET"],
"MaxAgeSeconds": 3000,
},
]
}
s3_client.put_bucket_cors(bucket, cors)
bucket_cors = s3_client.get_bucket_cors(bucket)
assert bucket_cors == cors.get(
"CORSRules"
), f"Expected CORSRules must be {cors.get('CORSRules')}"
with allure.step("delete bucket cors"):
s3_client.delete_bucket_cors(bucket)
with pytest.raises(Exception):
bucket_cors = s3_client.get_bucket_cors(bucket)

View file

@ -1,111 +0,0 @@
from random import choice
from string import ascii_letters
from typing import Tuple
import allure
import pytest
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.utils.file_utils import generate_file
def pytest_generate_tests(metafunc: pytest.Metafunc):
if "s3_client" in metafunc.fixturenames:
metafunc.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
@pytest.mark.sanity
@pytest.mark.s3_gate
@pytest.mark.s3_gate_tagging
class TestS3GateTagging:
@staticmethod
def create_tags(count: int) -> Tuple[list, list]:
tags = []
for _ in range(count):
tag_key = "".join(choice(ascii_letters) for _ in range(8))
tag_value = "".join(choice(ascii_letters) for _ in range(12))
tags.append((tag_key, tag_value))
return tags
@allure.title("Test S3: Object tagging")
def test_s3_object_tagging(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int
):
file_path = generate_file(simple_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
with allure.step("Put with 3 tags object into bucket"):
tag_1 = "Tag1=Value1"
s3_client.put_object(bucket, file_path, tagging=tag_1)
got_tags = s3_client.get_object_tagging(bucket, file_name)
assert got_tags, f"Expected tags, got {got_tags}"
assert got_tags == [{"Key": "Tag1", "Value": "Value1"}], "Tags must be the same"
with allure.step("Put 10 new tags for object"):
tags_2 = self.create_tags(10)
s3_client.put_object_tagging(bucket, file_name, tags=tags_2)
s3_helper.check_tags_by_object(
s3_client, bucket, file_name, tags_2, [("Tag1", "Value1")]
)
with allure.step("Put 10 extra new tags for object"):
tags_3 = self.create_tags(10)
s3_client.put_object_tagging(bucket, file_name, tags=tags_3)
s3_helper.check_tags_by_object(s3_client, bucket, file_name, tags_3, tags_2)
with allure.step("Copy one object with tag"):
copy_obj_path_1 = s3_client.copy_object(bucket, file_name, tagging_directive="COPY")
s3_helper.check_tags_by_object(s3_client, bucket, copy_obj_path_1, tags_3, tags_2)
with allure.step("Put 11 new tags to object and expect an error"):
tags_4 = self.create_tags(11)
with pytest.raises(Exception, match=r".*Object tags cannot be greater than 10*"):
# An error occurred (BadRequest) when calling the PutObjectTagging operation: Object tags cannot be greater than 10
s3_client.put_object_tagging(bucket, file_name, tags=tags_4)
with allure.step("Put empty tag"):
tags_5 = []
s3_client.put_object_tagging(bucket, file_name, tags=tags_5)
s3_helper.check_tags_by_object(s3_client, bucket, file_name, [])
with allure.step("Put 10 object tags"):
tags_6 = self.create_tags(10)
s3_client.put_object_tagging(bucket, file_name, tags=tags_6)
s3_helper.check_tags_by_object(s3_client, bucket, file_name, tags_6)
with allure.step("Delete tags by delete-object-tagging"):
s3_client.delete_object_tagging(bucket, file_name)
s3_helper.check_tags_by_object(s3_client, bucket, file_name, [])
@allure.title("Test S3: bucket tagging")
def test_s3_bucket_tagging(self, s3_client: S3ClientWrapper, bucket: str):
with allure.step("Put 10 bucket tags"):
tags_1 = self.create_tags(10)
s3_client.put_bucket_tagging(bucket, tags_1)
s3_helper.check_tags_by_bucket(s3_client, bucket, tags_1)
with allure.step("Put new 10 bucket tags"):
tags_2 = self.create_tags(10)
s3_client.put_bucket_tagging(bucket, tags_2)
s3_helper.check_tags_by_bucket(s3_client, bucket, tags_2, tags_1)
with allure.step("Put 11 new tags to bucket and expect an error"):
tags_3 = self.create_tags(11)
with pytest.raises(Exception, match=r".*Object tags cannot be greater than 10.*"):
# An error occurred (BadRequest) when calling the PutBucketTagging operation (reached max retries: 0): Object tags cannot be greater than 10
s3_client.put_bucket_tagging(bucket, tags_3)
with allure.step("Put empty tag"):
tags_4 = []
s3_client.put_bucket_tagging(bucket, tags_4)
s3_helper.check_tags_by_bucket(s3_client, bucket, tags_4)
with allure.step("Put new 10 bucket tags"):
tags_5 = self.create_tags(10)
s3_client.put_bucket_tagging(bucket, tags_5)
s3_helper.check_tags_by_bucket(s3_client, bucket, tags_5, tags_2)
with allure.step("Delete tags by delete-bucket-tagging"):
s3_client.delete_bucket_tagging(bucket)
s3_helper.check_tags_by_bucket(s3_client, bucket, [])

View file

@ -1,85 +0,0 @@
import os
import allure
import pytest
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.utils.file_utils import generate_file, generate_file_with_content
def pytest_generate_tests(metafunc: pytest.Metafunc):
if "s3_client" in metafunc.fixturenames:
metafunc.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
@pytest.mark.sanity
@pytest.mark.s3_gate
@pytest.mark.s3_gate_versioning
class TestS3GateVersioning:
@allure.title("Test S3: try to disable versioning")
def test_s3_version_off(self, s3_client: S3ClientWrapper):
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
with pytest.raises(Exception):
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.SUSPENDED)
@allure.title("Test S3: Enable and disable versioning")
def test_s3_version(self, s3_client: S3ClientWrapper, simple_object_size: int):
file_path = generate_file(simple_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
bucket_objects = [file_name]
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=False)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.SUSPENDED)
with allure.step("Put object into bucket"):
s3_client.put_object(bucket, file_path)
objects_list = s3_client.list_objects(bucket)
assert (
objects_list == bucket_objects
), f"Expected list with single objects in bucket, got {objects_list}"
object_version = s3_client.list_objects_versions(bucket)
actual_version = [
version.get("VersionId")
for version in object_version
if version.get("Key") == file_name
]
assert actual_version == [
"null"
], f"Expected version is null in list-object-versions, got {object_version}"
object_0 = s3_client.head_object(bucket, file_name)
assert (
object_0.get("VersionId") == "null"
), f"Expected version is null in head-object, got {object_0.get('VersionId')}"
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_client.put_object(bucket, file_path)
file_name_1 = generate_file_with_content(simple_object_size, file_path=file_path)
version_id_2 = s3_client.put_object(bucket, file_name_1)
with allure.step("Check bucket shows all versions"):
versions = s3_client.list_objects_versions(bucket)
obj_versions = [
version.get("VersionId") for version in versions if version.get("Key") == file_name
]
assert (
obj_versions.sort() == [version_id_1, version_id_2, "null"].sort()
), f"Expected object has versions: {version_id_1, version_id_2, 'null'}"
with allure.step("Get object"):
object_1 = s3_client.get_object(bucket, file_name, full_output=True)
assert (
object_1.get("VersionId") == version_id_2
), f"Get object with version {version_id_2}"
with allure.step("Get first version of object"):
object_2 = s3_client.get_object(bucket, file_name, version_id_1, full_output=True)
assert (
object_2.get("VersionId") == version_id_1
), f"Get object with version {version_id_1}"
with allure.step("Get second version of object"):
object_3 = s3_client.get_object(bucket, file_name, version_id_2, full_output=True)
assert (
object_3.get("VersionId") == version_id_2
), f"Get object with version {version_id_2}"

View file

@ -1,82 +0,0 @@
import logging
import os
from http import HTTPStatus
from re import match
import allure
import pytest
import requests
from frostfs_testlib.hosting import Hosting
from frostfs_testlib.resources.common import ASSETS_DIR
from frostfs_testlib.utils.env_utils import read_env_properties, save_env_properties
from frostfs_testlib.utils.version_utils import get_remote_binaries_versions
from pytest import FixtureRequest
from pytest_tests.resources.common import BIN_VERSIONS_FILE
logger = logging.getLogger("NeoLogger")
@allure.title("Check binaries versions")
@pytest.mark.sanity
@pytest.mark.check_binaries
@pytest.mark.skip("Skipped due to https://j.yadro.com/browse/OBJECT-628")
def test_binaries_versions(request: FixtureRequest, hosting: Hosting):
"""
Compare binaries versions from external source (url) and deployed on servers.
"""
if not BIN_VERSIONS_FILE:
pytest.skip("File with binaries and versions was not provided")
binaries_to_check = download_versions_info(BIN_VERSIONS_FILE)
with allure.step("Get binaries versions from servers"):
got_versions = get_remote_binaries_versions(hosting)
environment_dir = request.config.getoption("--alluredir") or ASSETS_DIR
env_file = os.path.join(environment_dir, "environment.properties")
env_properties = read_env_properties(env_file)
# compare versions from servers and file
failed_versions = {}
additional_env_properties = {}
for binary, version in binaries_to_check.items():
actual_version = got_versions.get(binary)
if actual_version != version:
failed_versions[binary] = f"Expected version {version}, found version {actual_version}"
# If some binary was not listed in the env properties file, let's add it
# so that we have full information about versions in allure report
if env_properties and binary not in env_properties:
additional_env_properties[binary] = actual_version
if env_properties and additional_env_properties:
save_env_properties(env_file, additional_env_properties)
# create clear beautiful error with aggregation info
if failed_versions:
msg = "\n".join({f"{binary}: {error}" for binary, error in failed_versions.items()})
raise AssertionError(f"Found binaries with unexpected versions:\n{msg}")
@allure.step("Download versions info from {url}")
def download_versions_info(url: str) -> dict:
binaries_to_version = {}
response = requests.get(url)
assert (
response.status_code == HTTPStatus.OK
), f"Got {response.status_code} code. Content {response.json()}"
content = response.text
assert content, f"Expected file with content, got {response}"
for line in content.split("\n"):
m = match("(.*)=(.*)", line)
if not m:
logger.warning(f"Could not get binary/version from {line}")
continue
bin_name, bin_version = m.group(1), m.group(2)
binaries_to_version[bin_name] = bin_version
return binaries_to_version

View file

@ -0,0 +1,251 @@
import logging
import os
from random import choice
from time import sleep
import allure
import pytest
from common import COMPLEX_OBJ_SIZE
from container import create_container
from epoch import get_epoch, tick_epoch
from python_keywords.http_gate import (get_via_http_curl, get_via_http_gate,
get_via_http_gate_by_attribute, get_via_zip_http_gate,
upload_via_http_gate, upload_via_http_gate_curl)
from python_keywords.neofs_verbs import get_object, put_object
from python_keywords.storage_policy import get_nodes_without_object
from python_keywords.utility_keywords import generate_file, get_file_hash
from wellknown_acl import PUBLIC_ACL
logger = logging.getLogger('NeoLogger')
CLEANUP_TIMEOUT = 10
@allure.link('https://github.com/nspcc-dev/neofs-http-gw#neofs-http-gateway', name='neofs-http-gateway')
@allure.link('https://github.com/nspcc-dev/neofs-http-gw#uploading', name='uploading')
@allure.link('https://github.com/nspcc-dev/neofs-http-gw#downloading', name='downloading')
@pytest.mark.http_gate
class TestHttpGate:
PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
@pytest.fixture(scope="class", autouse=True)
@allure.title('[Class/Autouse]: Prepare wallet and deposit')
def prepare_wallet(self, prepare_wallet_and_deposit):
TestHttpGate.wallet = prepare_wallet_and_deposit
@allure.title('Test Put over gRPC, Get over HTTP')
def test_put_grpc_get_http(self):
"""
Test that object can be put using gRPC interface and get using HTTP.
Steps:
1. Create simple and large objects.
2. Put objects using gRPC (neofs-cli).
3. Download objects using HTTP gate (https://github.com/nspcc-dev/neofs-http-gw#downloading).
4. Get objects using gRPC (neofs-cli).
5. Compare hashes for got objects.
6. Compare hashes for got and original objects.
Expected result:
Hashes must be the same.
"""
cid = create_container(self.wallet, rule=self.PLACEMENT_RULE, basic_acl=PUBLIC_ACL)
file_path_simple, file_path_large = generate_file(), generate_file(COMPLEX_OBJ_SIZE)
with allure.step('Put objects using gRPC'):
oid_simple = put_object(wallet=self.wallet, path=file_path_simple, cid=cid)
oid_large = put_object(wallet=self.wallet, path=file_path_large, cid=cid)
for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)):
self.get_object_and_verify_hashes(oid, file_path, self.wallet, cid)
@allure.link('https://github.com/nspcc-dev/neofs-http-gw#uploading', name='uploading')
@allure.link('https://github.com/nspcc-dev/neofs-http-gw#downloading', name='downloading')
@pytest.mark.sanity
@allure.title('Test Put over HTTP, Get over HTTP')
def test_put_http_get_http(self):
"""
Test that object can be put and get using HTTP interface.
Steps:
1. Create simple and large objects.
2. Upload objects using HTTP (https://github.com/nspcc-dev/neofs-http-gw#uploading).
3. Download objects using HTTP gate (https://github.com/nspcc-dev/neofs-http-gw#downloading).
4. Compare hashes for got and original objects.
Expected result:
Hashes must be the same.
"""
cid = create_container(self.wallet, rule=self.PLACEMENT_RULE, basic_acl=PUBLIC_ACL)
file_path_simple, file_path_large = generate_file(), generate_file(COMPLEX_OBJ_SIZE)
with allure.step('Put objects using HTTP'):
oid_simple = upload_via_http_gate(cid=cid, path=file_path_simple)
oid_large = upload_via_http_gate(cid=cid, path=file_path_large)
for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)):
self.get_object_and_verify_hashes(oid, file_path, self.wallet, cid)
@allure.link('https://github.com/nspcc-dev/neofs-http-gw#by-attributes', name='download by attributes')
@allure.title('Test Put over HTTP, Get over HTTP with headers')
@pytest.mark.parametrize(
'attributes',
[
{'fileName': 'simple_obj_filename'},
{'file-Name': 'simple obj filename'},
{'cat%jpeg': 'cat%jpeg'}
],
ids=['simple', 'hyphen', 'percent']
)
def test_put_http_get_http_with_headers(self, attributes: dict):
"""
Test that object can be downloaded using different attributes in HTTP header.
Steps:
1. Create simple and large objects.
2. Upload objects using HTTP with particular attributes in the header.
3. Download objects by attributes using HTTP gate (https://github.com/nspcc-dev/neofs-http-gw#by-attributes).
4. Compare hashes for got and original objects.
Expected result:
Hashes must be the same.
"""
cid = create_container(self.wallet, rule=self.PLACEMENT_RULE, basic_acl=PUBLIC_ACL)
file_path = generate_file()
with allure.step('Put objects using HTTP with attribute'):
headers = self._attr_into_header(attributes)
oid = upload_via_http_gate(cid=cid, path=file_path, headers=headers)
self.get_object_by_attr_and_verify_hashes(oid, file_path, cid, attributes)
@allure.title('Test Expiration-Epoch in HTTP header')
def test_expiration_epoch_in_http(self):
cid = create_container(self.wallet, rule=self.PLACEMENT_RULE, basic_acl=PUBLIC_ACL)
file_path = generate_file()
object_not_found_err = 'object not found'
oids = []
curr_epoch = get_epoch()
epochs = (curr_epoch, curr_epoch + 1, curr_epoch + 2, curr_epoch + 100)
for epoch in epochs:
headers = {'X-Attribute-Neofs-Expiration-Epoch': str(epoch)}
with allure.step('Put objects using HTTP with attribute Expiration-Epoch'):
oids.append(upload_via_http_gate(cid=cid, path=file_path, headers=headers))
assert len(oids) == len(epochs), 'Expected all objects has been put successfully'
with allure.step('All objects can be get'):
for oid in oids:
get_via_http_gate(cid=cid, oid=oid)
for expired_objects, not_expired_objects in [(oids[:1], oids[1:]), (oids[:2], oids[2:])]:
tick_epoch()
sleep(CLEANUP_TIMEOUT)
for oid in expired_objects:
self.try_to_get_object_and_expect_error(
cid=cid,
oid=oid,
expected_err=object_not_found_err
)
with allure.step('Other objects can be get'):
for oid in not_expired_objects:
get_via_http_gate(cid=cid, oid=oid)
@allure.title('Test Zip in HTTP header')
def test_zip_in_http(self):
cid = create_container(self.wallet, rule=self.PLACEMENT_RULE, basic_acl=PUBLIC_ACL)
file_path_simple, file_path_large = generate_file(), generate_file(COMPLEX_OBJ_SIZE)
common_prefix = 'my_files'
headers1 = {'X-Attribute-FilePath': f'{common_prefix}/file1'}
headers2 = {'X-Attribute-FilePath': f'{common_prefix}/file2'}
upload_via_http_gate(cid=cid, path=file_path_simple, headers=headers1)
upload_via_http_gate(cid=cid, path=file_path_large, headers=headers2)
dir_path = get_via_zip_http_gate(cid=cid, prefix=common_prefix)
with allure.step('Verify hashes'):
assert get_file_hash(f'{dir_path}/file1') == get_file_hash(file_path_simple)
assert get_file_hash(f'{dir_path}/file2') == get_file_hash(file_path_large)
@pytest.mark.curl
@pytest.mark.long
@allure.title('Test Put over HTTP/Curl, Get over HTTP/Curl for large object')
def test_put_http_get_http_large_file(self):
"""
This test checks upload and download using curl with 'large' object. Large is object with size up to 20Mb.
"""
cid = create_container(self.wallet, rule=self.PLACEMENT_RULE, basic_acl=PUBLIC_ACL)
obj_size = int(os.getenv('BIG_OBJ_SIZE', COMPLEX_OBJ_SIZE))
file_path = generate_file(obj_size)
with allure.step('Put objects using HTTP'):
oid_gate = upload_via_http_gate(cid=cid, path=file_path)
oid_curl = upload_via_http_gate_curl(cid=cid, filepath=file_path, large_object=True)
self.get_object_and_verify_hashes(oid_gate, file_path, self.wallet, cid)
self.get_object_and_verify_hashes(oid_curl, file_path, self.wallet, cid, get_via_http_curl)
@pytest.mark.curl
@allure.title('Test Put/Get over HTTP using Curl utility')
def test_put_http_get_http_curl(self):
"""
Test checks upload and download over HTTP using curl utility.
"""
cid = create_container(self.wallet, rule=self.PLACEMENT_RULE, basic_acl=PUBLIC_ACL)
file_path_simple, file_path_large = generate_file(), generate_file(COMPLEX_OBJ_SIZE)
with allure.step('Put objects using curl utility'):
oid_simple = upload_via_http_gate_curl(cid=cid, filepath=file_path_simple)
oid_large = upload_via_http_gate_curl(cid=cid, filepath=file_path_large)
for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)):
self.get_object_and_verify_hashes(oid, file_path, self.wallet, cid, get_via_http_curl)
@staticmethod
@allure.step('Try to get object and expect error')
def try_to_get_object_and_expect_error(cid: str, oid: str, expected_err: str):
try:
get_via_http_gate(cid=cid, oid=oid)
raise AssertionError(f'Expected error on getting object with cid: {cid}')
except Exception as err:
assert expected_err in str(err), f'Expected error {expected_err} in {err}'
@staticmethod
@allure.step('Verify object can be get using HTTP header attribute')
def get_object_by_attr_and_verify_hashes(oid: str, file_name: str, cid: str, attrs: dict):
got_file_path_http = get_via_http_gate(cid=cid, oid=oid)
got_file_path_http_attr = get_via_http_gate_by_attribute(cid=cid, attribute=attrs)
TestHttpGate._assert_hashes_are_equal(file_name, got_file_path_http, got_file_path_http_attr)
@staticmethod
@allure.step('Verify object can be get using HTTP')
def get_object_and_verify_hashes(oid: str, file_name: str, wallet: str, cid: str, object_getter=None):
nodes = get_nodes_without_object(wallet=wallet, cid=cid, oid=oid)
random_node = choice(nodes)
object_getter = object_getter or get_via_http_gate
got_file_path = get_object(wallet=wallet, cid=cid, oid=oid, endpoint=random_node)
got_file_path_http = object_getter(cid=cid, oid=oid)
TestHttpGate._assert_hashes_are_equal(file_name, got_file_path, got_file_path_http)
@staticmethod
def _assert_hashes_are_equal(orig_file_name: str, got_file_1: str, got_file_2: str):
msg = 'Expected hashes are equal for files {f1} and {f2}'
got_file_hash_http = get_file_hash(got_file_1)
assert get_file_hash(got_file_2) == got_file_hash_http, msg.format(f1=got_file_2, f2=got_file_1)
assert get_file_hash(orig_file_name) == got_file_hash_http, msg.format(f1=orig_file_name, f2=got_file_1)
@staticmethod
def _attr_into_header(attrs: dict) -> dict:
return {f'X-Attribute-{_key}': _value for _key, _value in attrs.items()}

View file

@ -0,0 +1,523 @@
import logging
import os
from random import choice, choices
import allure
import pytest
from common import ASSETS_DIR, COMPLEX_OBJ_SIZE, SIMPLE_OBJ_SIZE
from epoch import tick_epoch
from python_keywords import s3_gate_bucket, s3_gate_object
from python_keywords.aws_cli_client import AwsCliClient
from python_keywords.container import list_containers
from python_keywords.utility_keywords import (generate_file, generate_file_and_file_hash,
get_file_hash)
from utility import create_file_with_content, get_file_content, split_file
logger = logging.getLogger('NeoLogger')
def pytest_generate_tests(metafunc):
if "s3_client" in metafunc.fixturenames:
metafunc.parametrize("s3_client", ['aws cli', 'boto3'], indirect=True)
@allure.link('https://github.com/nspcc-dev/neofs-s3-gw#neofs-s3-gateway', name='neofs-s3-gateway')
@pytest.mark.s3_gate
class TestS3Gate:
s3_client = None
@pytest.fixture(scope='class', autouse=True)
@allure.title('[Class/Autouse]: Create S3 client')
def s3_client(self, prepare_wallet_and_deposit, request):
wallet = prepare_wallet_and_deposit
s3_bearer_rules_file = f"{os.getcwd()}/robot/resources/files/s3_bearer_rules.json"
cid, bucket, access_key_id, secret_access_key, owner_private_key = \
s3_gate_bucket.init_s3_credentials(wallet, s3_bearer_rules_file=s3_bearer_rules_file)
containers_list = list_containers(wallet)
assert cid in containers_list, f'Expected cid {cid} in {containers_list}'
if request.param == 'aws cli':
try:
client = AwsCliClient(access_key_id, secret_access_key)
except Exception as err:
if 'command was not found or was not executable' in str(err):
pytest.skip('AWS CLI was not found')
else:
raise RuntimeError('Error on creating instance for AwsCliClient') from err
else:
client = s3_gate_bucket.config_s3_client(access_key_id, secret_access_key)
TestS3Gate.s3_client = client
@pytest.fixture
@allure.title('Create two buckets')
def create_buckets(self):
bucket_1 = s3_gate_bucket.create_bucket_s3(self.s3_client)
bucket_2 = s3_gate_bucket.create_bucket_s3(self.s3_client)
return bucket_1, bucket_2
@pytest.fixture
@allure.title('Create/delete bucket')
def bucket(self):
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client)
yield bucket
objects = s3_gate_object.list_objects_s3(self.s3_client, bucket)
if objects:
s3_gate_object.delete_objects_s3(self.s3_client, bucket, objects)
s3_gate_bucket.delete_bucket_s3(self.s3_client, bucket)
@allure.title('Test S3 Bucket API')
def test_s3_buckets(self):
"""
Test base S3 Bucket API (Create/List/Head/Delete).
"""
file_path = generate_file()
file_name = self.object_key_from_file_path(file_path)
with allure.step('Create buckets'):
bucket_1 = s3_gate_bucket.create_bucket_s3(self.s3_client)
bucket_2 = s3_gate_bucket.create_bucket_s3(self.s3_client)
with allure.step('Check buckets are presented in the system'):
buckets = s3_gate_bucket.list_buckets_s3(self.s3_client)
assert bucket_1 in buckets, f'Expected bucket {bucket_1} is in the list'
assert bucket_2 in buckets, f'Expected bucket {bucket_2} is in the list'
with allure.step('Bucket must be empty'):
for bucket in (bucket_1, bucket_2):
objects_list = s3_gate_object.list_objects_s3(self.s3_client, bucket)
assert not objects_list, f'Expected empty bucket, got {objects_list}'
with allure.step('Check buckets are visible with S3 head command'):
s3_gate_bucket.head_bucket(self.s3_client, bucket_1)
s3_gate_bucket.head_bucket(self.s3_client, bucket_2)
with allure.step('Check we can put/list object with S3 commands'):
s3_gate_object.put_object_s3(self.s3_client, bucket_1, file_path)
s3_gate_object.head_object_s3(self.s3_client, bucket_1, file_name)
bucket_objects = s3_gate_object.list_objects_s3(self.s3_client, bucket_1)
assert file_name in bucket_objects, \
f'Expected file {file_name} in objects list {bucket_objects}'
with allure.step('Try to delete not empty bucket and get error'):
with pytest.raises(Exception, match=r'.*The bucket you tried to delete is not empty.*'):
s3_gate_bucket.delete_bucket_s3(self.s3_client, bucket_1)
s3_gate_bucket.head_bucket(self.s3_client, bucket_1)
with allure.step(f'Delete empty bucket {bucket_2}'):
s3_gate_bucket.delete_bucket_s3(self.s3_client, bucket_2)
tick_epoch()
with allure.step(f'Check bucket {bucket_2} deleted'):
with pytest.raises(Exception, match=r'.*Not Found.*'):
s3_gate_bucket.head_bucket(self.s3_client, bucket_2)
buckets = s3_gate_bucket.list_buckets_s3(self.s3_client)
assert bucket_1 in buckets, f'Expected bucket {bucket_1} is in the list'
assert bucket_2 not in buckets, f'Expected bucket {bucket_2} is not in the list'
@allure.title('Test S3 Object API')
@pytest.mark.sanity
@pytest.mark.parametrize('file_type', ['simple', 'large'], ids=['Simple object', 'Large object'])
def test_s3_api_object(self, file_type):
"""
Test base S3 Object API (Put/Head/List) for simple and large objects.
"""
file_path = generate_file(SIMPLE_OBJ_SIZE if file_type == 'simple' else COMPLEX_OBJ_SIZE)
file_name = self.object_key_from_file_path(file_path)
bucket_1 = s3_gate_bucket.create_bucket_s3(self.s3_client)
bucket_2 = s3_gate_bucket.create_bucket_s3(self.s3_client)
for bucket in (bucket_1, bucket_2):
with allure.step('Bucket must be empty'):
objects_list = s3_gate_object.list_objects_s3(self.s3_client, bucket)
assert not objects_list, f'Expected empty bucket, got {objects_list}'
s3_gate_object.put_object_s3(self.s3_client, bucket, file_path)
s3_gate_object.head_object_s3(self.s3_client, bucket, file_name)
bucket_objects = s3_gate_object.list_objects_s3(self.s3_client, bucket)
assert file_name in bucket_objects, \
f'Expected file {file_name} in objects list {bucket_objects}'
with allure.step("Check object's attributes"):
for attrs in (['ETag'], ['ObjectSize', 'StorageClass']):
s3_gate_object.get_object_attributes(self.s3_client, bucket, file_name, *attrs)
@allure.title('Test S3 Sync directory')
def test_s3_sync_dir(self, bucket):
"""
Test checks sync directory with AWS CLI utility.
"""
file_path_1 = f"{os.getcwd()}/{ASSETS_DIR}/test_sync/test_file_1"
file_path_2 = f"{os.getcwd()}/{ASSETS_DIR}/test_sync/test_file_2"
key_to_path = {'test_file_1': file_path_1, 'test_file_2': file_path_2}
if not isinstance(self.s3_client, AwsCliClient):
pytest.skip('This test is not supported with boto3 client')
create_file_with_content(file_path=file_path_1)
create_file_with_content(file_path=file_path_2)
self.s3_client.sync(bucket_name=bucket, dir_path=os.path.dirname(file_path_1))
with allure.step('Check objects are synced'):
objects = s3_gate_object.list_objects_s3(self.s3_client, bucket)
with allure.step('Check these are the same objects'):
assert set(key_to_path.keys()) == set(objects), f'Expected all abjects saved. Got {objects}'
for obj_key in objects:
got_object = s3_gate_object.get_object_s3(self.s3_client, bucket, obj_key)
assert get_file_hash(got_object) == get_file_hash(key_to_path.get(obj_key)), \
'Expected hashes are the same'
@allure.title('Test S3 Object versioning')
def test_s3_api_versioning(self, bucket):
"""
Test checks basic versioning functionality for S3 bucket.
"""
version_1_content = 'Version 1'
version_2_content = 'Version 2'
file_name_simple = create_file_with_content(content=version_1_content)
obj_key = os.path.basename(file_name_simple)
with allure.step('Set versioning enable for bucket'):
s3_gate_bucket.get_bucket_versioning_status(self.s3_client, bucket)
s3_gate_bucket.set_bucket_versioning(self.s3_client, bucket, status=s3_gate_bucket.VersioningStatus.ENABLED)
status = s3_gate_bucket.get_bucket_versioning_status(self.s3_client, bucket)
assert status == s3_gate_bucket.VersioningStatus.ENABLED.value, f'Expected enabled status. Got {status}'
with allure.step('Put several versions of object into bucket'):
version_id_1 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_simple)
create_file_with_content(file_path=file_name_simple, content=version_2_content)
version_id_2 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_simple)
with allure.step('Check bucket shows all versions'):
versions = s3_gate_object.list_objects_versions_s3(self.s3_client, bucket)
obj_versions = {version.get('VersionId') for version in versions if version.get('Key') == obj_key}
assert obj_versions == {version_id_1, version_id_2}, \
f'Expected object has versions: {version_id_1, version_id_2}'
with allure.step('Show information about particular version'):
for version_id in (version_id_1, version_id_2):
response = s3_gate_object.head_object_s3(self.s3_client, bucket, obj_key, version_id=version_id)
assert 'LastModified' in response, 'Expected LastModified field'
assert 'ETag' in response, 'Expected ETag field'
assert response.get('VersionId') == version_id, f'Expected VersionId is {version_id}'
assert response.get('ContentLength') != 0, 'Expected ContentLength is not zero'
with allure.step("Check object's attributes"):
for version_id in (version_id_1, version_id_2):
got_attrs = s3_gate_object.get_object_attributes(self.s3_client, bucket, obj_key, 'ETag',
version_id=version_id)
if got_attrs:
assert got_attrs.get('VersionId') == version_id, f'Expected VersionId is {version_id}'
with allure.step('Delete object and check it was deleted'):
response = s3_gate_object.delete_object_s3(self.s3_client, bucket, obj_key)
version_id_delete = response.get('VersionId')
with pytest.raises(Exception, match=r'.*Not Found.*'):
s3_gate_object.head_object_s3(self.s3_client, bucket, obj_key)
with allure.step('Get content for all versions and check it is correct'):
for version, content in ((version_id_2, version_2_content), (version_id_1, version_1_content)):
file_name = s3_gate_object.get_object_s3(self.s3_client, bucket, obj_key, version_id=version)
got_content = get_file_content(file_name)
assert got_content == content, f'Expected object content is\n{content}\nGot\n{got_content}'
with allure.step('Restore previous object version'):
s3_gate_object.delete_object_s3(self.s3_client, bucket, obj_key, version_id=version_id_delete)
file_name = s3_gate_object.get_object_s3(self.s3_client, bucket, obj_key)
got_content = get_file_content(file_name)
assert got_content == version_2_content, \
f'Expected object content is\n{version_2_content}\nGot\n{got_content}'
@allure.title('Test S3 Object Multipart API')
def test_s3_api_multipart(self, bucket):
"""
Test checks S3 Multipart API (Create multipart upload/Abort multipart upload/List multipart upload/
Upload part/List parts/Complete multipart upload).
"""
parts_count = 3
file_name_large, _ = generate_file_and_file_hash(SIMPLE_OBJ_SIZE * 1024 * 6 * parts_count) # 5Mb - min part
# file_name_large, _ = generate_file_and_file_hash(SIMPLE_OBJ_SIZE * 1024 * 30 * parts_count) # 5Mb - min part
object_key = self.object_key_from_file_path(file_name_large)
part_files = split_file(file_name_large, parts_count)
parts = []
uploads = s3_gate_object.list_multipart_uploads_s3(self.s3_client, bucket)
assert not uploads, f'Expected there is no uploads in bucket {bucket}'
with allure.step('Create and abort multipart upload'):
upload_id = s3_gate_object.create_multipart_upload_s3(self.s3_client, bucket, object_key)
uploads = s3_gate_object.list_multipart_uploads_s3(self.s3_client, bucket)
assert uploads, f'Expected there one upload in bucket {bucket}'
assert uploads[0].get('Key') == object_key, f'Expected correct key {object_key} in upload {uploads}'
assert uploads[0].get('UploadId') == upload_id, f'Expected correct UploadId {upload_id} in upload {uploads}'
s3_gate_object.abort_multipart_uploads_s3(self.s3_client, bucket, object_key, upload_id)
uploads = s3_gate_object.list_multipart_uploads_s3(self.s3_client, bucket)
assert not uploads, f'Expected there is no uploads in bucket {bucket}'
with allure.step('Create new multipart upload and upload several parts'):
upload_id = s3_gate_object.create_multipart_upload_s3(self.s3_client, bucket, object_key)
for part_id, file_path in enumerate(part_files, start=1):
etag = s3_gate_object.upload_part_s3(self.s3_client, bucket, object_key, upload_id, part_id, file_path)
parts.append((part_id, etag))
with allure.step('Check all parts are visible in bucket'):
got_parts = s3_gate_object.list_parts_s3(self.s3_client, bucket, object_key, upload_id)
assert len(got_parts) == len(part_files), f'Expected {parts_count} parts, got\n{got_parts}'
s3_gate_object.complete_multipart_upload_s3(self.s3_client, bucket, object_key, upload_id, parts)
uploads = s3_gate_object.list_multipart_uploads_s3(self.s3_client, bucket)
assert not uploads, f'Expected there is no uploads in bucket {bucket}'
with allure.step('Check we can get whole object from bucket'):
got_object = s3_gate_object.get_object_s3(self.s3_client, bucket, object_key)
assert get_file_hash(got_object) == get_file_hash(file_name_large)
self.check_object_attributes(bucket, object_key, parts_count)
@allure.title('Test S3 Bucket tagging API')
def test_s3_api_bucket_tagging(self, bucket):
"""
Test checks S3 Bucket tagging API (Put tag/Get tag).
"""
key_value_pair = [('some-key', 'some-value'), ('some-key-2', 'some-value-2')]
s3_gate_bucket.put_bucket_tagging(self.s3_client, bucket, key_value_pair)
got_tags = s3_gate_bucket.get_bucket_tagging(self.s3_client, bucket)
with allure.step('Check all tags are presented'):
assert got_tags, f'Expected tags, got {got_tags}'
expected_tags = [{'Key': key, 'Value': value} for key, value in key_value_pair]
for tag in expected_tags:
assert tag in got_tags
s3_gate_bucket.delete_bucket_tagging(self.s3_client, bucket)
tags = s3_gate_bucket.get_bucket_tagging(self.s3_client, bucket)
assert not tags, f'Expected there is no tags for bucket {bucket}, got {tags}'
@allure.title('Test S3 Object tagging API')
def test_s3_api_object_tagging(self, bucket):
"""
Test checks S3 Object tagging API (Put tag/Get tag/Update tag).
"""
key_value_pair_bucket = [('some-key', 'some-value'), ('some-key-2', 'some-value-2')]
key_value_pair_obj = [('some-key-obj', 'some-value-obj'), ('some-key--obj2', 'some-value--obj2')]
key_value_pair_obj_new = [('some-key-obj-new', 'some-value-obj-new')]
file_name_simple, _ = generate_file_and_file_hash(SIMPLE_OBJ_SIZE)
obj_key = self.object_key_from_file_path(file_name_simple)
s3_gate_bucket.put_bucket_tagging(self.s3_client, bucket, key_value_pair_bucket)
s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_simple)
for tags in (key_value_pair_obj, key_value_pair_obj_new):
s3_gate_object.put_object_tagging(self.s3_client, bucket, obj_key, tags)
got_tags = s3_gate_object.get_object_tagging(self.s3_client, bucket, obj_key)
assert got_tags, f'Expected tags, got {got_tags}'
expected_tags = [{'Key': key, 'Value': value} for key, value in tags]
for tag in expected_tags:
assert tag in got_tags
s3_gate_object.delete_object_tagging(self.s3_client, bucket, obj_key)
got_tags = s3_gate_object.get_object_tagging(self.s3_client, bucket, obj_key)
assert not got_tags, f'Expected there is no tags for bucket {bucket}, got {got_tags}'
@allure.title('Test S3: Delete object & delete objects S3 API')
def test_s3_api_delete(self, create_buckets):
"""
Check delete_object and delete_objects S3 API operation. From first bucket some objects deleted one by one.
From second bucket some objects deleted all at once.
"""
max_obj_count = 20
max_delete_objects = 17
put_objects = []
file_paths = []
obj_sizes = [SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE]
bucket_1, bucket_2 = create_buckets
with allure.step(f'Generate {max_obj_count} files'):
for _ in range(max_obj_count):
file_paths.append(generate_file_and_file_hash(choice(obj_sizes))[0])
for bucket in (bucket_1, bucket_2):
with allure.step(f'Bucket {bucket} must be empty as it just created'):
objects_list = s3_gate_object.list_objects_s3_v2(self.s3_client, bucket)
assert not objects_list, f'Expected empty bucket, got {objects_list}'
for file_path in file_paths:
s3_gate_object.put_object_s3(self.s3_client, bucket, file_path)
put_objects.append(self.object_key_from_file_path(file_path))
with allure.step(f'Check all objects put in bucket {bucket} successfully'):
bucket_objects = s3_gate_object.list_objects_s3_v2(self.s3_client, bucket)
assert set(put_objects) == set(bucket_objects), \
f'Expected all objects {put_objects} in objects list {bucket_objects}'
with allure.step('Delete some objects from bucket_1 one by one'):
objects_to_delete_b1 = choices(put_objects, k=max_delete_objects)
for obj in objects_to_delete_b1:
s3_gate_object.delete_object_s3(self.s3_client, bucket_1, obj)
with allure.step('Check deleted objects are not visible in bucket bucket_1'):
bucket_objects = s3_gate_object.list_objects_s3_v2(self.s3_client, bucket_1)
assert set(put_objects).difference(set(objects_to_delete_b1)) == set(bucket_objects), \
f'Expected all objects {put_objects} in objects list {bucket_objects}'
self.try_to_get_object_and_got_error(bucket_1, objects_to_delete_b1)
with allure.step('Delete some objects from bucket_2 at once'):
objects_to_delete_b2 = choices(put_objects, k=max_delete_objects)
s3_gate_object.delete_objects_s3(self.s3_client, bucket_2, objects_to_delete_b2)
with allure.step('Check deleted objects are not visible in bucket bucket_2'):
objects_list = s3_gate_object.list_objects_s3_v2(self.s3_client, bucket_2)
assert set(put_objects).difference(set(objects_to_delete_b2)) == set(objects_list), \
f'Expected all objects {put_objects} in objects list {bucket_objects}'
self.try_to_get_object_and_got_error(bucket_2, objects_to_delete_b2)
@allure.title('Test S3: Copy object to the same bucket')
def test_s3_copy_same_bucket(self):
"""
Test object can be copied to the same bucket.
"""
file_path_simple, file_path_large = generate_file(), generate_file(COMPLEX_OBJ_SIZE)
file_name_simple = self.object_key_from_file_path(file_path_simple)
file_name_large = self.object_key_from_file_path(file_path_large)
bucket_objects = [file_name_simple, file_name_large]
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client)
with allure.step('Bucket must be empty'):
objects_list = s3_gate_object.list_objects_s3(self.s3_client, bucket)
assert not objects_list, f'Expected empty bucket, got {objects_list}'
with allure.step('Put objects into bucket'):
for file_path in (file_path_simple, file_path_large):
s3_gate_object.put_object_s3(self.s3_client, bucket, file_path)
with allure.step('Copy one object into the same bucket'):
copy_obj_path = s3_gate_object.copy_object_s3(self.s3_client, bucket, file_name_simple)
bucket_objects.append(copy_obj_path)
self.check_objects_in_bucket(bucket, bucket_objects)
with allure.step('Check copied object has the same content'):
got_copied_file = s3_gate_object.get_object_s3(self.s3_client, bucket, copy_obj_path)
assert get_file_hash(file_path_simple) == get_file_hash(got_copied_file), 'Hashes must be the same'
with allure.step('Delete one object from bucket'):
s3_gate_object.delete_object_s3(self.s3_client, bucket, file_name_simple)
bucket_objects.remove(file_name_simple)
self.check_objects_in_bucket(bucket, expected_objects=bucket_objects, unexpected_objects=[file_name_simple])
@allure.title('Test S3: Copy object to another bucket')
def test_s3_copy_to_another_bucket(self):
"""
Test object can be copied to another bucket.
"""
file_path_simple, file_path_large = generate_file(), generate_file(COMPLEX_OBJ_SIZE)
file_name_simple = self.object_key_from_file_path(file_path_simple)
file_name_large = self.object_key_from_file_path(file_path_large)
bucket_1_objects = [file_name_simple, file_name_large]
bucket_1 = s3_gate_bucket.create_bucket_s3(self.s3_client)
bucket_2 = s3_gate_bucket.create_bucket_s3(self.s3_client)
with allure.step('Buckets must be empty'):
for bucket in (bucket_1, bucket_2):
objects_list = s3_gate_object.list_objects_s3(self.s3_client, bucket)
assert not objects_list, f'Expected empty bucket, got {objects_list}'
with allure.step('Put objects into one bucket'):
for file_path in (file_path_simple, file_path_large):
s3_gate_object.put_object_s3(self.s3_client, bucket_1, file_path)
with allure.step('Copy object from first bucket into second'):
copy_obj_path_b2 = s3_gate_object.copy_object_s3(self.s3_client, bucket_1, file_name_large,
bucket_dst=bucket_2)
self.check_objects_in_bucket(bucket_1, expected_objects=bucket_1_objects)
self.check_objects_in_bucket(bucket_2, expected_objects=[copy_obj_path_b2])
with allure.step('Check copied object has the same content'):
got_copied_file_b2 = s3_gate_object.get_object_s3(self.s3_client, bucket_2, copy_obj_path_b2)
assert get_file_hash(file_path_large) == get_file_hash(got_copied_file_b2), 'Hashes must be the same'
with allure.step('Delete one object from first bucket'):
s3_gate_object.delete_object_s3(self.s3_client, bucket_1, file_name_simple)
bucket_1_objects.remove(file_name_simple)
self.check_objects_in_bucket(bucket_1, expected_objects=bucket_1_objects)
self.check_objects_in_bucket(bucket_2, expected_objects=[copy_obj_path_b2])
with allure.step('Delete one object from second bucket and check it is empty'):
s3_gate_object.delete_object_s3(self.s3_client, bucket_2, copy_obj_path_b2)
self.check_objects_in_bucket(bucket_2, expected_objects=[])
def check_object_attributes(self, bucket: str, object_key: str, parts_count: int):
if not isinstance(self.s3_client, AwsCliClient):
logger.warning('Attributes check is not supported for boto3 implementation')
return
with allure.step("Check object's attributes"):
obj_parts = s3_gate_object.get_object_attributes(self.s3_client, bucket, object_key, 'ObjectParts',
get_full_resp=False)
assert obj_parts.get('TotalPartsCount') == parts_count, f'Expected TotalPartsCount is {parts_count}'
assert len(obj_parts.get('Parts')) == parts_count, f'Expected Parts cunt is {parts_count}'
with allure.step("Check object's attribute max-parts"):
max_parts = 2
obj_parts = s3_gate_object.get_object_attributes(self.s3_client, bucket, object_key, 'ObjectParts',
max_parts=max_parts, get_full_resp=False)
assert obj_parts.get('TotalPartsCount') == parts_count, f'Expected TotalPartsCount is {parts_count}'
assert obj_parts.get('MaxParts') == max_parts, f'Expected MaxParts is {parts_count}'
assert len(obj_parts.get('Parts')) == max_parts, f'Expected Parts count is {parts_count}'
with allure.step("Check object's attribute part-number-marker"):
part_number_marker = 3
obj_parts = s3_gate_object.get_object_attributes(self.s3_client, bucket, object_key, 'ObjectParts',
part_number=part_number_marker, get_full_resp=False)
assert obj_parts.get('TotalPartsCount') == parts_count, f'Expected TotalPartsCount is {parts_count}'
assert obj_parts.get(
'PartNumberMarker') == part_number_marker, f'Expected PartNumberMarker is {part_number_marker}'
assert len(obj_parts.get('Parts')) == 1, f'Expected Parts count is {parts_count}'
@allure.step('Expected all objects are presented in the bucket')
def check_objects_in_bucket(self, bucket, expected_objects: list, unexpected_objects: list = None):
unexpected_objects = unexpected_objects or []
bucket_objects = s3_gate_object.list_objects_s3(self.s3_client, bucket)
assert len(bucket_objects) == len(expected_objects), f'Expected {len(expected_objects)} objects in the bucket'
for bucket_object in expected_objects:
assert bucket_object in bucket_objects, \
f'Expected object {bucket_object} in objects list {bucket_objects}'
for bucket_object in unexpected_objects:
assert bucket_object not in bucket_objects, \
f'Expected object {bucket_object} not in objects list {bucket_objects}'
@allure.step('Try to get object and got error')
def try_to_get_object_and_got_error(self, bucket: str, unexpected_objects: list):
for obj in unexpected_objects:
try:
s3_gate_object.get_object_s3(self.s3_client, bucket, obj)
raise AssertionError(f'Object {obj} found in bucket {bucket}')
except Exception as err:
assert 'The specified key does not exist' in str(err), f'Expected error in exception {err}'
@staticmethod
def object_key_from_file_path(full_path: str) -> str:
return os.path.basename(full_path)

View file

@ -1,26 +0,0 @@
import pytest
from frostfs_testlib.storage.dataclasses.wallet import WalletFactory, WalletInfo
@pytest.fixture(scope="module")
def owner_wallet(wallet_factory: WalletFactory) -> WalletInfo:
"""
Returns wallet which owns containers and objects
"""
return wallet_factory.create_wallet()
@pytest.fixture(scope="module")
def user_wallet(wallet_factory: WalletFactory) -> WalletInfo:
"""
Returns wallet which will use objects from owner via static session
"""
return wallet_factory.create_wallet()
@pytest.fixture(scope="module")
def stranger_wallet(wallet_factory: WalletFactory) -> WalletInfo:
"""
Returns stranger wallet which should fail to obtain data
"""
return wallet_factory.create_wallet()

View file

@ -1,144 +0,0 @@
import random
import allure
import pytest
from frostfs_testlib.resources.common import DEFAULT_WALLET_PASS
from frostfs_testlib.resources.error_patterns import SESSION_NOT_FOUND
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import delete_object, put_object, put_object_to_random_node
from frostfs_testlib.steps.session_token import create_session_token
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils import wallet_utils
from frostfs_testlib.utils.file_utils import generate_file
@pytest.mark.sanity
@pytest.mark.session_token
class TestDynamicObjectSession(ClusterTestBase):
@allure.title("Test Object Operations with Session Token")
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
def test_object_session_token(self, default_wallet, object_size):
"""
Test how operations over objects are executed with a session token
Steps:
1. Create a private container
2. Obj operation requests to the node which IS NOT in the container but granted
with a session token
3. Obj operation requests to the node which IS in the container and NOT granted
with a session token
4. Obj operation requests to the node which IS NOT in the container and NOT granted
with a session token
"""
with allure.step("Init wallet"):
wallet = default_wallet
address = wallet_utils.get_last_address_from_wallet(wallet, "")
with allure.step("Nodes Settlements"):
session_token_node, container_node, non_container_node = random.sample(
self.cluster.storage_nodes, 3
)
with allure.step("Create Session Token"):
session_token = create_session_token(
shell=self.shell,
owner=address,
wallet_path=wallet,
wallet_password=DEFAULT_WALLET_PASS,
rpc_endpoint=session_token_node.get_rpc_endpoint(),
)
with allure.step("Create Private Container"):
un_locode = container_node.get_un_locode()
locode = "SPB" if un_locode == "RU LED" else un_locode.split()[1]
placement_policy = (
f"REP 1 IN LOC_{locode}_PLACE CBF 1 SELECT 1 FROM LOC_{locode} "
f'AS LOC_{locode}_PLACE FILTER "UN-LOCODE" '
f'EQ "{un_locode}" AS LOC_{locode}'
)
cid = create_container(
wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=placement_policy,
)
with allure.step("Put Objects"):
file_path = generate_file(object_size)
oid = put_object_to_random_node(
wallet=wallet,
path=file_path,
cid=cid,
shell=self.shell,
cluster=self.cluster,
)
oid_delete = put_object_to_random_node(
wallet=wallet,
path=file_path,
cid=cid,
shell=self.shell,
cluster=self.cluster,
)
with allure.step("Node not in container but granted a session token"):
put_object(
wallet=wallet,
path=file_path,
cid=cid,
shell=self.shell,
endpoint=session_token_node.get_rpc_endpoint(),
session=session_token,
)
delete_object(
wallet=wallet,
cid=cid,
oid=oid_delete,
shell=self.shell,
endpoint=session_token_node.get_rpc_endpoint(),
session=session_token,
)
with allure.step("Node in container and not granted a session token"):
with pytest.raises(Exception, match=SESSION_NOT_FOUND):
put_object(
wallet=wallet,
path=file_path,
cid=cid,
shell=self.shell,
endpoint=container_node.get_rpc_endpoint(),
session=session_token,
)
with pytest.raises(Exception, match=SESSION_NOT_FOUND):
delete_object(
wallet=wallet,
cid=cid,
oid=oid,
shell=self.shell,
endpoint=container_node.get_rpc_endpoint(),
session=session_token,
)
with allure.step("Node not in container and not granted a session token"):
with pytest.raises(Exception, match=SESSION_NOT_FOUND):
put_object(
wallet=wallet,
path=file_path,
cid=cid,
shell=self.shell,
endpoint=non_container_node.get_rpc_endpoint(),
session=session_token,
)
with pytest.raises(Exception, match=SESSION_NOT_FOUND):
delete_object(
wallet=wallet,
cid=cid,
oid=oid,
shell=self.shell,
endpoint=non_container_node.get_rpc_endpoint(),
session=session_token,
)

View file

@ -1,757 +0,0 @@
import logging
import allure
import pytest
from frostfs_testlib.resources.error_patterns import (
EXPIRED_SESSION_TOKEN,
MALFORMED_REQUEST,
OBJECT_ACCESS_DENIED,
OBJECT_NOT_FOUND,
)
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import (
delete_object,
get_object,
get_object_from_random_node,
get_range,
get_range_hash,
head_object,
put_object_to_random_node,
search_object,
)
from frostfs_testlib.steps.epoch import ensure_fresh_epoch
from frostfs_testlib.steps.session_token import (
INVALID_SIGNATURE,
UNRELATED_CONTAINER,
UNRELATED_KEY,
UNRELATED_OBJECT,
WRONG_VERB,
Lifetime,
ObjectVerb,
generate_object_session_token,
get_object_signed_token,
sign_session_token,
)
from frostfs_testlib.steps.storage_object import delete_objects
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils.file_utils import generate_file
from pytest import FixtureRequest
logger = logging.getLogger("NeoLogger")
RANGE_OFFSET_FOR_COMPLEX_OBJECT = 200
@pytest.fixture(scope="module")
def storage_containers(
owner_wallet: WalletInfo, client_shell: Shell, cluster: Cluster
) -> list[str]:
cid = create_container(
owner_wallet.path, shell=client_shell, endpoint=cluster.default_rpc_endpoint
)
other_cid = create_container(
owner_wallet.path, shell=client_shell, endpoint=cluster.default_rpc_endpoint
)
yield [cid, other_cid]
@pytest.fixture(
params=[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
# Scope module to upload/delete each files set only once
scope="module",
)
def storage_objects(
owner_wallet: WalletInfo,
client_shell: Shell,
storage_containers: list[str],
cluster: Cluster,
request: FixtureRequest,
) -> list[StorageObjectInfo]:
file_path = generate_file(request.param)
storage_objects = []
with allure.step("Put objects"):
# upload couple objects
for _ in range(3):
storage_object_id = put_object_to_random_node(
wallet=owner_wallet.path,
path=file_path,
cid=storage_containers[0],
shell=client_shell,
cluster=cluster,
)
storage_object = StorageObjectInfo(storage_containers[0], storage_object_id)
storage_object.size = request.param
storage_object.wallet_file_path = owner_wallet.path
storage_object.file_path = file_path
storage_objects.append(storage_object)
yield storage_objects
# Teardown after all tests done with current param
delete_objects(storage_objects, client_shell, cluster)
@allure.step("Get ranges for test")
def get_ranges(
storage_object: StorageObjectInfo, max_object_size: int, shell: Shell, endpoint: str
) -> list[str]:
"""
Returns ranges to test range/hash methods via static session
"""
object_size = storage_object.size
if object_size > max_object_size:
assert object_size >= max_object_size + RANGE_OFFSET_FOR_COMPLEX_OBJECT
return [
"0:10",
f"{object_size-10}:10",
f"{max_object_size - RANGE_OFFSET_FOR_COMPLEX_OBJECT}:"
f"{RANGE_OFFSET_FOR_COMPLEX_OBJECT * 2}",
]
else:
return ["0:10", f"{object_size-10}:10"]
@pytest.fixture(scope="module")
def static_sessions(
owner_wallet: WalletInfo,
user_wallet: WalletInfo,
storage_containers: list[str],
storage_objects: list[StorageObjectInfo],
client_shell: Shell,
temp_directory: str,
) -> dict[ObjectVerb, str]:
"""
Returns dict with static session token file paths for all verbs with default lifetime with
valid container and first two objects
"""
return {
verb: get_object_signed_token(
owner_wallet,
user_wallet,
storage_containers[0],
storage_objects[0:2],
verb,
client_shell,
temp_directory,
)
for verb in ObjectVerb
}
@pytest.mark.static_session
class TestObjectStaticSession(ClusterTestBase):
@allure.title("Validate static session with read operations")
@pytest.mark.parametrize(
"method_under_test,verb",
[
(head_object, ObjectVerb.HEAD),
(get_object, ObjectVerb.GET),
],
)
def test_static_session_read(
self,
user_wallet: WalletInfo,
storage_objects: list[StorageObjectInfo],
static_sessions: dict[ObjectVerb, str],
method_under_test,
verb: ObjectVerb,
request: FixtureRequest,
):
"""
Validate static session with read operations
"""
allure.dynamic.title(
f"Validate static session with read operations for {request.node.callspec.id}"
)
for node in self.cluster.storage_nodes:
for storage_object in storage_objects[0:2]:
method_under_test(
wallet=user_wallet.path,
cid=storage_object.cid,
oid=storage_object.oid,
shell=self.shell,
endpoint=node.get_rpc_endpoint(),
session=static_sessions[verb],
)
@allure.title("Validate static session with range operations")
@pytest.mark.parametrize(
"method_under_test,verb",
[(get_range, ObjectVerb.RANGE), (get_range_hash, ObjectVerb.RANGEHASH)],
)
def test_static_session_range(
self,
user_wallet: WalletInfo,
storage_objects: list[StorageObjectInfo],
static_sessions: dict[ObjectVerb, str],
method_under_test,
verb: ObjectVerb,
request: FixtureRequest,
max_object_size,
):
"""
Validate static session with range operations
"""
allure.dynamic.title(
f"Validate static session with range operations for {request.node.callspec.id}"
)
storage_object = storage_objects[0]
ranges_to_test = get_ranges(
storage_object, max_object_size, self.shell, self.cluster.default_rpc_endpoint
)
for range_to_test in ranges_to_test:
with allure.step(f"Check range {range_to_test}"):
with expect_not_raises():
method_under_test(
user_wallet.path,
storage_object.cid,
storage_object.oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
session=static_sessions[verb],
range_cut=range_to_test,
)
@allure.title("Validate static session with search operation")
def test_static_session_search(
self,
user_wallet: WalletInfo,
storage_objects: list[StorageObjectInfo],
static_sessions: dict[ObjectVerb, str],
request: FixtureRequest,
):
"""
Validate static session with search operations
"""
allure.dynamic.title(f"Validate static session with search for {request.node.callspec.id}")
cid = storage_objects[0].cid
expected_object_ids = [storage_object.oid for storage_object in storage_objects[0:2]]
actual_object_ids = search_object(
user_wallet.path,
cid,
self.shell,
endpoint=self.cluster.default_rpc_endpoint,
session=static_sessions[ObjectVerb.SEARCH],
root=True,
)
assert sorted(expected_object_ids) == sorted(actual_object_ids)
@allure.title("Validate static session with object id not in session")
def test_static_session_unrelated_object(
self,
user_wallet: WalletInfo,
storage_objects: list[StorageObjectInfo],
static_sessions: dict[ObjectVerb, str],
request: FixtureRequest,
):
"""
Validate static session with object id not in session
"""
allure.dynamic.title(
f"Validate static session with object id not in session for {request.node.callspec.id}"
)
with pytest.raises(Exception, match=UNRELATED_OBJECT):
head_object(
user_wallet.path,
storage_objects[2].cid,
storage_objects[2].oid,
self.shell,
self.cluster.default_rpc_endpoint,
session=static_sessions[ObjectVerb.HEAD],
)
@allure.title("Validate static session with user id not in session")
def test_static_session_head_unrelated_user(
self,
stranger_wallet: WalletInfo,
storage_objects: list[StorageObjectInfo],
static_sessions: dict[ObjectVerb, str],
request: FixtureRequest,
):
"""
Validate static session with user id not in session
"""
allure.dynamic.title(
f"Validate static session with user id not in session for {request.node.callspec.id}"
)
storage_object = storage_objects[0]
with pytest.raises(Exception, match=UNRELATED_KEY):
head_object(
stranger_wallet.path,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
session=static_sessions[ObjectVerb.HEAD],
)
@allure.title("Validate static session with wrong verb in session")
def test_static_session_head_wrong_verb(
self,
user_wallet: WalletInfo,
storage_objects: list[StorageObjectInfo],
static_sessions: dict[ObjectVerb, str],
request: FixtureRequest,
):
"""
Validate static session with wrong verb in session
"""
allure.dynamic.title(
f"Validate static session with wrong verb in session for {request.node.callspec.id}"
)
storage_object = storage_objects[0]
with pytest.raises(Exception, match=WRONG_VERB):
get_object(
user_wallet.path,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
session=static_sessions[ObjectVerb.HEAD],
)
@allure.title("Validate static session with container id not in session")
def test_static_session_unrelated_container(
self,
user_wallet: WalletInfo,
storage_objects: list[StorageObjectInfo],
storage_containers: list[str],
static_sessions: dict[ObjectVerb, str],
request: FixtureRequest,
):
"""
Validate static session with container id not in session
"""
allure.dynamic.title(
f"Validate static session with container id not in session for {request.node.callspec.id}"
)
storage_object = storage_objects[0]
with pytest.raises(Exception, match=UNRELATED_CONTAINER):
get_object_from_random_node(
user_wallet.path,
storage_containers[1],
storage_object.oid,
self.shell,
self.cluster,
session=static_sessions[ObjectVerb.GET],
)
@allure.title("Validate static session which signed by another wallet")
def test_static_session_signed_by_other(
self,
owner_wallet: WalletInfo,
user_wallet: WalletInfo,
stranger_wallet: WalletInfo,
storage_containers: list[str],
storage_objects: list[StorageObjectInfo],
temp_directory: str,
request: FixtureRequest,
):
"""
Validate static session which signed by another wallet
"""
allure.dynamic.title(
f"Validate static session which signed by another wallet for {request.node.callspec.id}"
)
storage_object = storage_objects[0]
session_token_file = generate_object_session_token(
owner_wallet,
user_wallet,
[storage_object.oid],
storage_containers[0],
ObjectVerb.HEAD,
temp_directory,
)
signed_token_file = sign_session_token(self.shell, session_token_file, stranger_wallet)
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
head_object(
user_wallet.path,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
session=signed_token_file,
)
@allure.title("Validate static session which signed for another container")
def test_static_session_signed_for_other_container(
self,
owner_wallet: WalletInfo,
user_wallet: WalletInfo,
storage_containers: list[str],
storage_objects: list[StorageObjectInfo],
temp_directory: str,
request: FixtureRequest,
):
"""
Validate static session which signed for another container
"""
allure.dynamic.title(
f"Validate static session which signed for another container for {request.node.callspec.id}"
)
storage_object = storage_objects[0]
container = storage_containers[1]
session_token_file = generate_object_session_token(
owner_wallet,
user_wallet,
[storage_object.oid],
container,
ObjectVerb.HEAD,
temp_directory,
)
signed_token_file = sign_session_token(self.shell, session_token_file, owner_wallet)
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
head_object(
user_wallet.path,
container,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
session=signed_token_file,
)
@allure.title("Validate static session which wasn't signed")
def test_static_session_without_sign(
self,
owner_wallet: WalletInfo,
user_wallet: WalletInfo,
storage_containers: list[str],
storage_objects: list[StorageObjectInfo],
temp_directory: str,
request: FixtureRequest,
):
"""
Validate static session which wasn't signed
"""
allure.dynamic.title(
f"Validate static session which wasn't signed for {request.node.callspec.id}"
)
storage_object = storage_objects[0]
session_token_file = generate_object_session_token(
owner_wallet,
user_wallet,
[storage_object.oid],
storage_containers[0],
ObjectVerb.HEAD,
temp_directory,
)
with pytest.raises(Exception, match=INVALID_SIGNATURE):
head_object(
user_wallet.path,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
session=session_token_file,
)
@allure.title("Validate static session which expires at next epoch")
def test_static_session_expiration_at_next(
self,
owner_wallet: WalletInfo,
user_wallet: WalletInfo,
storage_containers: list[str],
storage_objects: list[StorageObjectInfo],
temp_directory: str,
request: FixtureRequest,
):
"""
Validate static session which expires at next epoch
"""
allure.dynamic.title(
f"Validate static session which expires at next epoch for {request.node.callspec.id}"
)
epoch = ensure_fresh_epoch(self.shell, self.cluster)
container = storage_containers[0]
object_id = storage_objects[0].oid
expiration = Lifetime(epoch + 1, epoch, epoch)
with allure.step("Create session token"):
token_expire_at_next_epoch = get_object_signed_token(
owner_wallet,
user_wallet,
container,
storage_objects,
ObjectVerb.HEAD,
self.shell,
temp_directory,
expiration,
)
with allure.step("Object should be available with session token after token creation"):
with expect_not_raises():
head_object(
user_wallet.path,
container,
object_id,
self.shell,
self.cluster.default_rpc_endpoint,
session=token_expire_at_next_epoch,
)
with allure.step(
"Object should be available at last epoch before session token expiration"
):
self.tick_epoch()
with expect_not_raises():
head_object(
user_wallet.path,
container,
object_id,
self.shell,
self.cluster.default_rpc_endpoint,
session=token_expire_at_next_epoch,
)
with allure.step("Object should NOT be available after session token expiration epoch"):
self.tick_epoch()
with pytest.raises(Exception, match=EXPIRED_SESSION_TOKEN):
head_object(
user_wallet.path,
container,
object_id,
self.shell,
self.cluster.default_rpc_endpoint,
session=token_expire_at_next_epoch,
)
@allure.title("Validate static session which is valid starting from next epoch")
def test_static_session_start_at_next(
self,
owner_wallet: WalletInfo,
user_wallet: WalletInfo,
storage_containers: list[str],
storage_objects: list[StorageObjectInfo],
temp_directory: str,
request: FixtureRequest,
):
"""
Validate static session which is valid starting from next epoch
"""
allure.dynamic.title(
f"Validate static session which is valid starting from next epoch for {request.node.callspec.id}"
)
epoch = ensure_fresh_epoch(self.shell, self.cluster)
container = storage_containers[0]
object_id = storage_objects[0].oid
expiration = Lifetime(epoch + 2, epoch + 1, epoch)
with allure.step("Create session token"):
token_start_at_next_epoch = get_object_signed_token(
owner_wallet,
user_wallet,
container,
storage_objects,
ObjectVerb.HEAD,
self.shell,
temp_directory,
expiration,
)
with allure.step("Object should NOT be available with session token after token creation"):
with pytest.raises(Exception, match=MALFORMED_REQUEST):
head_object(
user_wallet.path,
container,
object_id,
self.shell,
self.cluster.default_rpc_endpoint,
session=token_start_at_next_epoch,
)
with allure.step(
"Object should be available with session token starting from token nbf epoch"
):
self.tick_epoch()
with expect_not_raises():
head_object(
user_wallet.path,
container,
object_id,
self.shell,
self.cluster.default_rpc_endpoint,
session=token_start_at_next_epoch,
)
with allure.step(
"Object should be available at last epoch before session token expiration"
):
self.tick_epoch()
with expect_not_raises():
head_object(
user_wallet.path,
container,
object_id,
self.shell,
self.cluster.default_rpc_endpoint,
session=token_start_at_next_epoch,
)
with allure.step("Object should NOT be available after session token expiration epoch"):
self.tick_epoch()
with pytest.raises(Exception, match=EXPIRED_SESSION_TOKEN):
head_object(
user_wallet.path,
container,
object_id,
self.shell,
self.cluster.default_rpc_endpoint,
session=token_start_at_next_epoch,
)
@allure.title("Validate static session which is already expired")
def test_static_session_already_expired(
self,
owner_wallet: WalletInfo,
user_wallet: WalletInfo,
storage_containers: list[str],
storage_objects: list[StorageObjectInfo],
temp_directory: str,
request: FixtureRequest,
):
"""
Validate static session which is already expired
"""
allure.dynamic.title(
f"Validate static session which is already expired for {request.node.callspec.id}"
)
epoch = ensure_fresh_epoch(self.shell, self.cluster)
container = storage_containers[0]
object_id = storage_objects[0].oid
expiration = Lifetime(epoch - 1, epoch - 2, epoch - 2)
token_already_expired = get_object_signed_token(
owner_wallet,
user_wallet,
container,
storage_objects,
ObjectVerb.HEAD,
self.shell,
temp_directory,
expiration,
)
with pytest.raises(Exception, match=EXPIRED_SESSION_TOKEN):
head_object(
user_wallet.path,
container,
object_id,
self.shell,
self.cluster.default_rpc_endpoint,
session=token_already_expired,
)
@allure.title("Delete verb should be restricted for static session")
def test_static_session_delete_verb(
self,
user_wallet: WalletInfo,
storage_objects: list[StorageObjectInfo],
static_sessions: dict[ObjectVerb, str],
request: FixtureRequest,
):
"""
Delete verb should be restricted for static session
"""
allure.dynamic.title(
f"Delete verb should be restricted for static session for {request.node.callspec.id}"
)
storage_object = storage_objects[0]
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
delete_object(
user_wallet.path,
storage_object.cid,
storage_object.oid,
self.shell,
endpoint=self.cluster.default_rpc_endpoint,
session=static_sessions[ObjectVerb.DELETE],
)
@allure.title("Put verb should be restricted for static session")
def test_static_session_put_verb(
self,
user_wallet: WalletInfo,
storage_objects: list[StorageObjectInfo],
static_sessions: dict[ObjectVerb, str],
request: FixtureRequest,
):
"""
Put verb should be restricted for static session
"""
allure.dynamic.title(
f"Put verb should be restricted for static session for {request.node.callspec.id}"
)
storage_object = storage_objects[0]
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
put_object_to_random_node(
user_wallet.path,
storage_object.file_path,
storage_object.cid,
self.shell,
self.cluster,
session=static_sessions[ObjectVerb.PUT],
)
@allure.title("Validate static session which is issued in future epoch")
def test_static_session_invalid_issued_epoch(
self,
owner_wallet: WalletInfo,
user_wallet: WalletInfo,
storage_containers: list[str],
storage_objects: list[StorageObjectInfo],
temp_directory: str,
request: FixtureRequest,
):
"""
Validate static session which is issued in future epoch
"""
allure.dynamic.title(
f"Validate static session which is issued in future epoch for {request.node.callspec.id}"
)
epoch = ensure_fresh_epoch(self.shell, self.cluster)
container = storage_containers[0]
object_id = storage_objects[0].oid
expiration = Lifetime(epoch + 10, 0, epoch + 1)
token_invalid_issue_time = get_object_signed_token(
owner_wallet,
user_wallet,
container,
storage_objects,
ObjectVerb.HEAD,
self.shell,
temp_directory,
expiration,
)
with pytest.raises(Exception, match=MALFORMED_REQUEST):
head_object(
user_wallet.path,
container,
object_id,
self.shell,
self.cluster.default_rpc_endpoint,
session=token_invalid_issue_time,
)

View file

@ -1,174 +0,0 @@
import allure
import pytest
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.acl import create_eacl, set_eacl, wait_for_cache_expired
from frostfs_testlib.steps.cli.container import (
create_container,
delete_container,
get_container,
list_containers,
)
from frostfs_testlib.steps.session_token import ContainerVerb, get_container_signed_token
from frostfs_testlib.storage.dataclasses.acl import EACLAccess, EACLOperation, EACLRole, EACLRule
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
from pytest_tests.helpers.object_access import can_put_object
@pytest.mark.static_session_container
class TestSessionTokenContainer(ClusterTestBase):
@pytest.fixture(scope="module")
def static_sessions(
self,
owner_wallet: WalletInfo,
user_wallet: WalletInfo,
client_shell: Shell,
temp_directory: str,
) -> dict[ContainerVerb, str]:
"""
Returns dict with static session token file paths for all verbs with default lifetime
"""
return {
verb: get_container_signed_token(
owner_wallet, user_wallet, verb, client_shell, temp_directory
)
for verb in ContainerVerb
}
def test_static_session_token_container_create(
self,
owner_wallet: WalletInfo,
user_wallet: WalletInfo,
static_sessions: dict[ContainerVerb, str],
):
"""
Validate static session with create operation
"""
with allure.step("Create container with static session token"):
cid = create_container(
user_wallet.path,
session_token=static_sessions[ContainerVerb.CREATE],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
wait_for_creation=False,
)
container_info: dict[str, str] = get_container(
owner_wallet.path, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
assert container_info["ownerID"] == owner_wallet.get_address()
assert cid not in list_containers(
user_wallet.path, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
assert cid in list_containers(
owner_wallet.path, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
def test_static_session_token_container_create_with_other_verb(
self,
user_wallet: WalletInfo,
static_sessions: dict[ContainerVerb, str],
):
"""
Validate static session without create operation
"""
with allure.step("Try create container with static session token without PUT rule"):
for verb in [verb for verb in ContainerVerb if verb != ContainerVerb.CREATE]:
with pytest.raises(Exception):
create_container(
user_wallet.path,
session_token=static_sessions[verb],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
wait_for_creation=False,
)
def test_static_session_token_container_create_with_other_wallet(
self,
stranger_wallet: WalletInfo,
static_sessions: dict[ContainerVerb, str],
):
"""
Validate static session with create operation for other wallet
"""
with allure.step("Try create container with static session token without PUT rule"):
with pytest.raises(Exception):
create_container(
stranger_wallet.path,
session_token=static_sessions[ContainerVerb.CREATE],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
wait_for_creation=False,
)
def test_static_session_token_container_delete(
self,
owner_wallet: WalletInfo,
user_wallet: WalletInfo,
static_sessions: dict[ContainerVerb, str],
):
"""
Validate static session with delete operation
"""
with allure.step("Create container"):
cid = create_container(
owner_wallet.path,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
wait_for_creation=False,
)
with allure.step("Delete container with static session token"):
delete_container(
wallet=user_wallet.path,
cid=cid,
session_token=static_sessions[ContainerVerb.DELETE],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
await_mode=True,
)
assert cid not in list_containers(
owner_wallet.path, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
def test_static_session_token_container_set_eacl(
self,
owner_wallet: WalletInfo,
user_wallet: WalletInfo,
stranger_wallet: WalletInfo,
static_sessions: dict[ContainerVerb, str],
simple_object_size,
):
"""
Validate static session with set eacl operation
"""
with allure.step("Create container"):
cid = create_container(
owner_wallet.path,
basic_acl=PUBLIC_ACL,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
file_path = generate_file(simple_object_size)
assert can_put_object(stranger_wallet.path, cid, file_path, self.shell, self.cluster)
with allure.step("Deny all operations for other via eACL"):
eacl_deny = [
EACLRule(access=EACLAccess.DENY, role=EACLRole.OTHERS, operation=op)
for op in EACLOperation
]
set_eacl(
user_wallet.path,
cid,
create_eacl(cid, eacl_deny, shell=self.shell),
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
session_token=static_sessions[ContainerVerb.SETEACL],
)
wait_for_cache_expired()
assert not can_put_object(stranger_wallet.path, cid, file_path, self.shell, self.cluster)

View file

@ -1,155 +0,0 @@
import json
import pathlib
import re
from dataclasses import dataclass
from io import StringIO
import allure
import pytest
import yaml
from configobj import ConfigObj
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG
from frostfs_testlib.storage.cluster import Cluster, StorageNode
SHARD_PREFIX = "FROSTFS_STORAGE_SHARD_"
BLOBSTOR_PREFIX = "_BLOBSTOR_"
@dataclass
class Blobstor:
path: str
path_type: str
def __eq__(self, other) -> bool:
if not isinstance(other, self.__class__):
raise RuntimeError(f"Only two {self.__class__.__name__} instances can be compared")
return self.path == other.path and self.path_type == other.path_type
def __hash__(self):
return hash((self.path, self.path_type))
@staticmethod
def from_config_object(section: ConfigObj, shard_id: str, blobstor_id: str):
var_prefix = f"{SHARD_PREFIX}{shard_id}{BLOBSTOR_PREFIX}{blobstor_id}"
return Blobstor(section.get(f"{var_prefix}_PATH"), section.get(f"{var_prefix}_TYPE"))
@dataclass
class Shard:
blobstor: list[Blobstor]
metabase: str
writecache: str
def __eq__(self, other) -> bool:
if not isinstance(other, self.__class__):
raise RuntimeError(f"Only two {self.__class__.__name__} instances can be compared")
return (
set(self.blobstor) == set(other.blobstor)
and self.metabase == other.metabase
and self.writecache == other.writecache
)
def __hash__(self):
return hash((self.metabase, self.writecache))
@staticmethod
def _get_blobstor_count_from_section(config_object: ConfigObj, shard_id: int):
pattern = f"{SHARD_PREFIX}{shard_id}{BLOBSTOR_PREFIX}"
blobstors = {key[: len(pattern) + 2] for key in config_object.keys() if pattern in key}
return len(blobstors)
@staticmethod
def from_config_object(config_object: ConfigObj, shard_id: int):
var_prefix = f"{SHARD_PREFIX}{shard_id}"
blobstor_count = Shard._get_blobstor_count_from_section(config_object, shard_id)
blobstors = [
Blobstor.from_config_object(config_object, shard_id, blobstor_id)
for blobstor_id in range(blobstor_count)
]
write_cache_enabled = config_object.as_bool(f"{var_prefix}_WRITECACHE_ENABLED")
return Shard(
blobstors,
config_object.get(f"{var_prefix}_METABASE_PATH"),
config_object.get(f"{var_prefix}_WRITECACHE_PATH") if write_cache_enabled else "",
)
@staticmethod
def from_object(shard):
metabase = shard["metabase"]["path"] if "path" in shard["metabase"] else shard["metabase"]
writecache = (
shard["writecache"]["path"] if "path" in shard["writecache"] else shard["writecache"]
)
return Shard(
blobstor=[
Blobstor(path=blobstor["path"], path_type=blobstor["type"])
for blobstor in shard["blobstor"]
],
metabase=metabase,
writecache=writecache,
)
def shards_from_yaml(contents: str) -> list[Shard]:
config = yaml.safe_load(contents)
config["storage"]["shard"].pop("default")
return [Shard.from_object(shard) for shard in config["storage"]["shard"].values()]
def shards_from_env(contents: str) -> list[Shard]:
configObj = ConfigObj(StringIO(contents))
pattern = f"{SHARD_PREFIX}\d*"
num_shards = len(set(re.findall(pattern, contents)))
return [Shard.from_config_object(configObj, shard_id) for shard_id in range(num_shards)]
@pytest.mark.sanity
@pytest.mark.shard
class TestControlShard:
@staticmethod
def get_shards_from_config(node: StorageNode) -> list[Shard]:
config_file = node.get_remote_config_path()
file_type = pathlib.Path(config_file).suffix
contents = node.host.get_shell().exec(f"cat {config_file}").stdout
parser_method = {
".env": shards_from_env,
".yaml": shards_from_yaml,
".yml": shards_from_yaml,
}
shards = parser_method[file_type](contents)
return shards
@staticmethod
def get_shards_from_cli(node: StorageNode) -> list[Shard]:
wallet_path = node.get_remote_wallet_path()
wallet_password = node.get_wallet_password()
control_endpoint = node.get_control_endpoint()
cli_config = node.host.get_cli_config("frostfs-cli")
cli = FrostfsCli(node.host.get_shell(), cli_config.exec_path, DEFAULT_WALLET_CONFIG)
result = cli.shards.list(
endpoint=control_endpoint,
wallet=wallet_path,
wallet_password=wallet_password,
json_mode=True,
timeout=CLI_DEFAULT_TIMEOUT,
)
return [Shard.from_object(shard) for shard in json.loads(result.stdout.split(">", 1)[1])]
@allure.title("All shards are available")
def test_control_shard(self, cluster: Cluster):
for storage_node in cluster.storage_nodes:
shards_from_config = self.get_shards_from_config(storage_node)
shards_from_cli = self.get_shards_from_cli(storage_node)
assert set(shards_from_config) == set(shards_from_cli)

View file

@ -1,46 +0,0 @@
import os
import shutil
from datetime import datetime
import allure
import pytest
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
class TestLogs(ClusterTestBase):
@pytest.mark.logs_after_session
def test_logs_after_session(self, temp_directory: str, session_start_time: datetime):
"""
This test automatically added to any test run to check logs from cluster for critical errors.
"""
end_time = datetime.utcnow()
logs_dir = os.path.join(temp_directory, "logs")
os.makedirs(logs_dir)
issues_regex = r"\Wpanic\W|\Woom\W|\Wtoo many\W|\Winsufficient funds\W"
hosts_with_problems = []
for host in self.cluster.hosts:
with allure.step(f"Check logs on {host.config.address}"):
if host.is_message_in_logs(issues_regex, session_start_time, end_time):
hosts_with_problems.append(host.config.address)
host.dump_logs(
logs_dir,
since=session_start_time,
until=end_time,
filter_regex=issues_regex,
)
if hosts_with_problems:
self._attach_logs(logs_dir)
assert (
not hosts_with_problems
), f"The following hosts contains contain critical errors in system logs: {', '.join(hosts_with_problems)}"
def _attach_logs(self, logs_dir: str) -> None:
# Zip all files and attach to Allure because it is more convenient to download a single
# zip with all logs rather than mess with individual logs files per service or node
logs_zip_file_path = shutil.make_archive(logs_dir, "zip", logs_dir)
allure.attach.file(logs_zip_file_path, name="logs.zip", extension="zip")

View file

@ -0,0 +1 @@
password: ""

View file

@ -1,16 +1,8 @@
allure-pytest==2.13.2
allure-python-commons==2.13.2
base58==2.1.0
boto3==1.16.33
botocore==1.19.33
configobj==5.0.6
frostfs-testlib>=2.0.1
neo-mamba==1.0.0
robotframework==4.1.2
requests==2.25.1
pexpect==4.8.0
pyyaml==6.0
pytest==7.1.2
pytest-lazy-fixture==0.6.3
python-dateutil==2.8.2
requests==2.28.0
tenacity==8.0.1
urllib3==1.26.9
boto3==1.16.33
docker==4.4.0
botocore==1.19.33
urllib3==1.26.3
base58==1.0.3

View file

@ -1,3 +0,0 @@
pre-commit==2.20.0
isort==5.12.0
pylint==2.17.4

View file

@ -0,0 +1,74 @@
{
"records": [
{
"operation": "GET",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "HEAD",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "PUT",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "DELETE",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "SEARCH",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "GETRANGE",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "GETRANGEHASH",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
}
]
}

View file

@ -0,0 +1,74 @@
{
"records": [
{
"operation": "GET",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "SYSTEM"
}
]
},
{
"operation": "HEAD",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "SYSTEM"
}
]
},
{
"operation": "PUT",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "SYSTEM"
}
]
},
{
"operation": "DELETE",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "SYSTEM"
}
]
},
{
"operation": "SEARCH",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "SYSTEM"
}
]
},
{
"operation": "GETRANGE",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "SYSTEM"
}
]
},
{
"operation": "GETRANGEHASH",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "SYSTEM"
}
]
}
]
}

View file

@ -0,0 +1,74 @@
{
"records": [
{
"operation": "GET",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "USER"
}
]
},
{
"operation": "HEAD",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "USER"
}
]
},
{
"operation": "PUT",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "USER"
}
]
},
{
"operation": "DELETE",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "USER"
}
]
},
{
"operation": "SEARCH",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "USER"
}
]
},
{
"operation": "GETRANGE",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "USER"
}
]
},
{
"operation": "GETRANGEHASH",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "USER"
}
]
}
]
}

View file

@ -0,0 +1,34 @@
{
"records": [
{
"operation": "DELETE",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "PUT",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "HEAD",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
}
]
}

View file

@ -0,0 +1,34 @@
{
"records": [
{
"operation": "DELETE",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "SYSTEM"
}
]
},
{
"operation": "PUT",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "SYSTEM"
}
]
},
{
"operation": "HEAD",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "SYSTEM"
}
]
}
]
}

View file

@ -0,0 +1,34 @@
{
"records": [
{
"operation": "DELETE",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "USER"
}
]
},
{
"operation": "PUT",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "USER"
}
]
},
{
"operation": "HEAD",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "USER"
}
]
}
]
}

View file

@ -0,0 +1,44 @@
{
"records": [
{
"operation": "GET",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "GETRANGE",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "GETRANGEHASH",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "HEAD",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
}
]
}

View file

@ -0,0 +1,44 @@
{
"records": [
{
"operation": "GET",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "SYSTEM"
}
]
},
{
"operation": "GETRANGE",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "SYSTEM"
}
]
},
{
"operation": "GETRANGEHASH",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "SYSTEM"
}
]
},
{
"operation": "HEAD",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "SYSTEM"
}
]
}
]
}

View file

@ -0,0 +1,44 @@
{
"records": [
{
"operation": "GET",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "USER"
}
]
},
{
"operation": "GETRANGE",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "USER"
}
]
},
{
"operation": "GETRANGEHASH",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "USER"
}
]
},
{
"operation": "HEAD",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "USER"
}
]
}
]
}

View file

@ -0,0 +1,34 @@
{
"records": [
{
"operation": "GETRANGEHASH",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "GETRANGE",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "GET",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
}
]
}

View file

@ -0,0 +1,34 @@
{
"records": [
{
"operation": "GETRANGEHASH",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "SYSTEM"
}
]
},
{
"operation": "GETRANGE",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "SYSTEM"
}
]
},
{
"operation": "GET",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "SYSTEM"
}
]
}
]
}

View file

@ -0,0 +1,34 @@
{
"records": [
{
"operation": "GETRANGEHASH",
"action": "ALLOW",
"filters": [],
"targets": [
{
"role": "USER"
}
]
},
{
"operation": "GETRANGE",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "USER"
}
]
},
{
"operation": "GET",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "USER"
}
]
}
]
}

View file

@ -0,0 +1,74 @@
{
"records": [
{
"operation": "GET",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "HEAD",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "PUT",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "DELETE",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "SEARCH",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "GETRANGE",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "GETRANGEHASH",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
}
]
}

View file

@ -0,0 +1,74 @@
{
"records": [
{
"operation": "GET",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "SYSTEM"
}
]
},
{
"operation": "HEAD",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "SYSTEM"
}
]
},
{
"operation": "PUT",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "SYSTEM"
}
]
},
{
"operation": "DELETE",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "SYSTEM"
}
]
},
{
"operation": "SEARCH",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "SYSTEM"
}
]
},
{
"operation": "GETRANGE",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "SYSTEM"
}
]
},
{
"operation": "GETRANGEHASH",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "SYSTEM"
}
]
}
]
}

View file

@ -0,0 +1,74 @@
{
"records": [
{
"operation": "GET",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "USER"
}
]
},
{
"operation": "HEAD",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "USER"
}
]
},
{
"operation": "PUT",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "USER"
}
]
},
{
"operation": "DELETE",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "USER"
}
]
},
{
"operation": "SEARCH",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "USER"
}
]
},
{
"operation": "GETRANGE",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "USER"
}
]
},
{
"operation": "GETRANGEHASH",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "USER"
}
]
}
]
}

View file

@ -0,0 +1,193 @@
{
"records": [
{
"operation": "GET",
"action": "ALLOW",
"filters": [
{
"headerType": "REQUEST",
"matchType": "STRING_EQUAL",
"key": "a",
"value": "2"
}
],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "GET",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "HEAD",
"action": "ALLOW",
"filters": [
{
"headerType": "REQUEST",
"matchType": "STRING_EQUAL",
"key": "a",
"value": "2"
}
],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "HEAD",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "PUT",
"action": "ALLOW",
"filters": [
{
"headerType": "REQUEST",
"matchType": "STRING_EQUAL",
"key": "a",
"value": "2"
}
],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "PUT",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "DELETE",
"action": "ALLOW",
"filters": [
{
"headerType": "REQUEST",
"matchType": "STRING_EQUAL",
"key": "a",
"value": "2"
}
],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "DELETE",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "SEARCH",
"action": "ALLOW",
"filters": [
{
"headerType": "REQUEST",
"matchType": "STRING_EQUAL",
"key": "a",
"value": "2"
}
],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "SEARCH",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "GETRANGE",
"action": "ALLOW",
"filters": [
{
"headerType": "REQUEST",
"matchType": "STRING_EQUAL",
"key": "a",
"value": "2"
}
],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "GETRANGE",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "GETRANGEHASH",
"action": "ALLOW",
"filters": [
{
"headerType": "REQUEST",
"matchType": "STRING_EQUAL",
"key": "a",
"value": "2"
}
],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "GETRANGEHASH",
"action": "DENY",
"filters": [],
"targets": [
{
"role": "OTHERS"
}
]
}
]
}

View file

@ -0,0 +1,123 @@
{
"records": [
{
"operation": "GET",
"action": "DENY",
"filters": [
{
"headerType": "REQUEST",
"matchType": "STRING_EQUAL",
"key": "a",
"value": "2"
}
],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "HEAD",
"action": "DENY",
"filters": [
{
"headerType": "REQUEST",
"matchType": "STRING_EQUAL",
"key": "a",
"value": "2"
}
],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "PUT",
"action": "DENY",
"filters": [
{
"headerType": "REQUEST",
"matchType": "STRING_EQUAL",
"key": "a",
"value": "2"
}
],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "DELETE",
"action": "DENY",
"filters": [
{
"headerType": "REQUEST",
"matchType": "STRING_EQUAL",
"key": "a",
"value": "2"
}
],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "SEARCH",
"action": "DENY",
"filters": [
{
"headerType": "REQUEST",
"matchType": "STRING_EQUAL",
"key": "a",
"value": "2"
}
],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "GETRANGE",
"action": "DENY",
"filters": [
{
"headerType": "REQUEST",
"matchType": "STRING_EQUAL",
"key": "a",
"value": "2"
}
],
"targets": [
{
"role": "OTHERS"
}
]
},
{
"operation": "GETRANGEHASH",
"action": "DENY",
"filters": [
{
"headerType": "REQUEST",
"matchType": "STRING_EQUAL",
"key": "a",
"value": "2"
}
],
"targets": [
{
"role": "OTHERS"
}
]
}
]
}

View file

@ -0,0 +1,41 @@
{
"records":
[
{
"operation":"PUT",
"action":"ALLOW",
"filters":[],
"targets":
[
{
"role":"OTHERS",
"keys":[]
}
]
},
{
"operation":"SEARCH",
"action":"ALLOW",
"filters":[],
"targets":
[
{
"role":"OTHERS",
"keys":[]
}
]
},
{
"operation":"GET",
"action":"ALLOW",
"filters":[],
"targets":
[
{
"role":"OTHERS",
"keys":[]
}
]
}
]
}

View file

@ -0,0 +1,196 @@
#!/usr/bin/python3.8
import base64
import json
import os
import re
import uuid
from enum import Enum, auto
import base58
from cli_helpers import _cmd_run
from common import ASSETS_DIR, NEOFS_ENDPOINT, WALLET_CONFIG
from data_formatters import pub_key_hex
from robot.api import logger
from robot.api.deco import keyword
"""
Robot Keywords and helper functions for work with NeoFS ACL.
"""
ROBOT_AUTO_KEYWORDS = False
# path to neofs-cli executable
NEOFS_CLI_EXEC = os.getenv('NEOFS_CLI_EXEC', 'neofs-cli')
EACL_LIFETIME = 100500
class AutoName(Enum):
def _generate_next_value_(name, start, count, last_values):
return name
class Role(AutoName):
USER = auto()
SYSTEM = auto()
OTHERS = auto()
@keyword('Get eACL')
def get_eacl(wallet_path: str, cid: str):
cmd = (
f'{NEOFS_CLI_EXEC} --rpc-endpoint {NEOFS_ENDPOINT} --wallet {wallet_path} '
f'container get-eacl --cid {cid} --config {WALLET_CONFIG}'
)
try:
output = _cmd_run(cmd)
if re.search(r'extended ACL table is not set for this container', output):
return None
return output
except RuntimeError as exc:
logger.info("Extended ACL table is not set for this container")
logger.info(f"Got exception while getting eacl: {exc}")
return None
@keyword('Set eACL')
def set_eacl(wallet_path: str, cid: str, eacl_table_path: str):
cmd = (
f'{NEOFS_CLI_EXEC} --rpc-endpoint {NEOFS_ENDPOINT} --wallet {wallet_path} '
f'container set-eacl --cid {cid} --table {eacl_table_path} --config {WALLET_CONFIG} --await'
)
_cmd_run(cmd)
def _encode_cid_for_eacl(cid: str) -> str:
cid_base58 = base58.b58decode(cid)
return base64.b64encode(cid_base58).decode("utf-8")
@keyword('Create eACL')
def create_eacl(cid: str, rules_list: list):
table = f"{os.getcwd()}/{ASSETS_DIR}/eacl_table_{str(uuid.uuid4())}.json"
rules = ""
for rule in rules_list:
# TODO: check if $Object: is still necessary for filtering in the newest releases
rules += f"--rule '{rule}' "
cmd = (
f"{NEOFS_CLI_EXEC} acl extended create --cid {cid} "
f"{rules}--out {table}"
)
_cmd_run(cmd)
with open(table, 'r') as fout:
table_data = fout.read()
logger.info(f"Generated eACL:\n{table_data}")
return table
@keyword('Form BearerToken File')
def form_bearertoken_file(wif: str, cid: str, eacl_records: list) -> str:
"""
This function fetches eACL for given <cid> on behalf of <wif>,
then extends it with filters taken from <eacl_records>, signs
with bearer token and writes to file
"""
enc_cid = _encode_cid_for_eacl(cid)
file_path = f"{os.getcwd()}/{ASSETS_DIR}/{str(uuid.uuid4())}"
eacl = get_eacl(wif, cid)
json_eacl = dict()
if eacl:
eacl = eacl.replace('eACL: ', '')
eacl = eacl.split('Signature')[0]
json_eacl = json.loads(eacl)
logger.info(json_eacl)
eacl_result = {
"body":
{
"eaclTable":
{
"containerID":
{
"value": enc_cid
},
"records": []
},
"lifetime":
{
"exp": EACL_LIFETIME,
"nbf": "1",
"iat": "0"
}
}
}
if not eacl_records:
raise (f"Got empty eacl_records list: {eacl_records}")
for record in eacl_records:
op_data = {
"operation": record['Operation'],
"action": record['Access'],
"filters": [],
"targets": []
}
if Role(record['Role']):
op_data['targets'] = [
{
"role": record['Role']
}
]
else:
op_data['targets'] = [
{
"keys": [record['Role']]
}
]
if 'Filters' in record.keys():
op_data["filters"].append(record['Filters'])
eacl_result["body"]["eaclTable"]["records"].append(op_data)
# Add records from current eACL
if "records" in json_eacl.keys():
for record in json_eacl["records"]:
eacl_result["body"]["eaclTable"]["records"].append(record)
with open(file_path, 'w', encoding='utf-8') as eacl_file:
json.dump(eacl_result, eacl_file, ensure_ascii=False, indent=4)
logger.info(f"Got these extended ACL records: {eacl_result}")
sign_bearer_token(wif, file_path)
return file_path
@keyword('EACL Rules')
def eacl_rules(access: str, verbs: list, user: str):
"""
This function creates a list of eACL rules.
Args:
access (str): identifies if the following operation(s)
is allowed or denied
verbs (list): a list of operations to set rules for
user (str): a group of users (user/others) or a wallet of
a certain user for whom rules are set
Returns:
(list): a list of eACL rules
"""
if user not in ('others', 'user'):
pubkey = pub_key_hex(user)
user = f"pubkey:{pubkey}"
rules = []
for verb in verbs:
elements = [access, verb, user]
rules.append(' '.join(elements))
return rules
def sign_bearer_token(wallet_path: str, eacl_rules_file: str):
cmd = (
f'{NEOFS_CLI_EXEC} util sign bearer-token --from {eacl_rules_file} '
f'--to {eacl_rules_file} --wallet {wallet_path} --config {WALLET_CONFIG} --json'
)
_cmd_run(cmd)

View file

@ -0,0 +1,219 @@
import json
import logging
import os
import allure
from cli_helpers import _cmd_run, _configure_aws_cli
from common import ASSETS_DIR, S3_GATE
logger = logging.getLogger('NeoLogger')
class AwsCliClient:
def __init__(self, access_key_id: str, secret_access_key: str):
self.access_key_id = access_key_id
self.secret_access_key = secret_access_key
self.config_aws_client()
def config_aws_client(self):
cmd = 'aws configure'
logger.info(f'Executing command: {cmd}')
_configure_aws_cli(cmd, self.access_key_id, self.secret_access_key)
def create_bucket(self, Bucket: str):
cmd = f'aws --no-verify-ssl s3api create-bucket --bucket {Bucket} --endpoint-url {S3_GATE}'
_cmd_run(cmd)
def list_buckets(self) -> dict:
cmd = f'aws --no-verify-ssl s3api list-buckets --endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def get_bucket_versioning(self, Bucket: str) -> dict:
cmd = f'aws --no-verify-ssl s3api get-bucket-versioning --bucket {Bucket}' \
f' --endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def put_bucket_versioning(self, Bucket: str, VersioningConfiguration: dict) -> dict:
cmd = f'aws --no-verify-ssl s3api put-bucket-versioning --bucket {Bucket} ' \
f'--versioning-configuration Status={VersioningConfiguration.get("Status")}' \
f' --endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def list_objects(self, Bucket: str) -> dict:
cmd = f'aws --no-verify-ssl s3api list-objects --bucket {Bucket}' \
f' --endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def list_objects_v2(self, Bucket: str) -> dict:
cmd = f'aws --no-verify-ssl s3api list-objects-v2 --bucket {Bucket}' \
f' --endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def list_object_versions(self, Bucket: str) -> dict:
cmd = f'aws --no-verify-ssl s3api list-object-versions --bucket {Bucket}' \
f' --endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def copy_object(self, Bucket: str, CopySource: str, Key: str) -> dict:
cmd = f'aws --no-verify-ssl s3api copy-object --copy-source {CopySource} --bucket {Bucket} --key {Key}' \
f' --endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def head_bucket(self, Bucket: str) -> dict:
cmd = f'aws --no-verify-ssl s3api head-bucket --bucket {Bucket} --endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def put_object(self, Body: str, Bucket: str, Key: str) -> dict:
cmd = f' aws --no-verify-ssl s3api put-object --bucket {Bucket} --key {Key} --body {Body}' \
f' --endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def head_object(self, Bucket: str, Key: str, VersionId: str = None) -> dict:
version = f' --version-id {VersionId}' if VersionId else ''
cmd = f' aws --no-verify-ssl s3api head-object --bucket {Bucket} --key {Key} {version}' \
f' --endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def get_object(self, Bucket: str, Key: str, file_path: str, VersionId: str = None) -> dict:
version = f' --version-id {VersionId}' if VersionId else ''
cmd = f' aws --no-verify-ssl s3api get-object --bucket {Bucket} ' \
f'--key {Key} {version} {file_path} --endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def delete_objects(self, Bucket: str, Delete: dict) -> dict:
file_path = f"{os.getcwd()}/{ASSETS_DIR}/delete.json"
with open(file_path, 'w') as out_file:
out_file.write(json.dumps(Delete))
cmd = f'aws --no-verify-ssl s3api delete-objects --bucket {Bucket} --delete file://{file_path} ' \
f'--endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def delete_object(self, Bucket: str, Key: str, VersionId: str = None) -> dict:
version = f' --version-id {VersionId}' if VersionId else ''
cmd = f'aws --no-verify-ssl s3api delete-object --bucket {Bucket} --key {Key} {version}' \
f' --endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def get_object_attributes(self, bucket: str, key: str, *attributes: str, version_id: str = None,
max_parts: int = None, part_number: int = None) -> dict:
attrs = ','.join(attributes)
version = f' --version-id {version_id}' if version_id else ''
parts = f'--max-parts {max_parts}' if max_parts else ''
part_number = f'--part-number-marker {part_number}' if part_number else ''
cmd = f'aws --no-verify-ssl s3api get-object-attributes --bucket {bucket} --key {key} {version}' \
f' {parts} {part_number} --object-attributes {attrs} --endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def delete_bucket(self, Bucket: str) -> dict:
cmd = f'aws --no-verify-ssl s3api delete-bucket --bucket {Bucket} --endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def get_bucket_tagging(self, Bucket: str) -> dict:
cmd = f'aws --no-verify-ssl s3api get-bucket-tagging --bucket {Bucket} --endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def put_bucket_tagging(self, Bucket: str, Tagging: dict) -> dict:
cmd = f'aws --no-verify-ssl s3api put-bucket-tagging --bucket {Bucket} --tagging \'{json.dumps(Tagging)}\'' \
f' --endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def delete_bucket_tagging(self, Bucket: str) -> dict:
cmd = f'aws --no-verify-ssl s3api delete-bucket-tagging --bucket {Bucket} --endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def put_object_tagging(self, Bucket: str, Key: str, Tagging: dict) -> dict:
cmd = f'aws --no-verify-ssl s3api put-object-tagging --bucket {Bucket} --key {Key}' \
f' --tagging \'{json.dumps(Tagging)}\'' \
f' --endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def get_object_tagging(self, Bucket: str, Key: str) -> dict:
cmd = f'aws --no-verify-ssl s3api get-object-tagging --bucket {Bucket} --key {Key} --endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def delete_object_tagging(self, Bucket: str, Key: str) -> dict:
cmd = f'aws --no-verify-ssl s3api delete-object-tagging --bucket {Bucket} --key {Key} --endpoint {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
@allure.step('Sync directory S3')
def sync(self, bucket_name: str, dir_path: str) -> dict:
cmd = f'aws --no-verify-ssl s3 sync {dir_path} s3://{bucket_name} --endpoint-url {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def create_multipart_upload(self, Bucket: str, Key: str) -> dict:
cmd = f'aws --no-verify-ssl s3api create-multipart-upload --bucket {Bucket} --key {Key}' \
f' --endpoint-url {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def list_multipart_uploads(self, Bucket: str) -> dict:
cmd = f'aws --no-verify-ssl s3api list-multipart-uploads --bucket {Bucket}' \
f' --endpoint-url {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def abort_multipart_upload(self, Bucket: str, Key: str, UploadId: str) -> dict:
cmd = f'aws --no-verify-ssl s3api abort-multipart-upload --bucket {Bucket} --key {Key}' \
f' --upload-id {UploadId} --endpoint-url {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def upload_part(self, UploadId: str, Bucket: str, Key: str, PartNumber: int, Body: str) -> dict:
cmd = f'aws --no-verify-ssl s3api upload-part --bucket {Bucket} --key {Key} --upload-id {UploadId} ' \
f'--part-number {PartNumber} --body {Body} --endpoint-url {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def list_parts(self, UploadId: str, Bucket: str, Key: str) -> dict:
cmd = f'aws --no-verify-ssl s3api list-parts --bucket {Bucket} --key {Key} --upload-id {UploadId} ' \
f' --endpoint-url {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
def complete_multipart_upload(self, Bucket: str, Key: str, UploadId: str, MultipartUpload: dict) -> dict:
file_path = f"{os.getcwd()}/{ASSETS_DIR}/parts.json"
with open(file_path, 'w') as out_file:
out_file.write(json.dumps(MultipartUpload))
cmd = f'aws --no-verify-ssl s3api complete-multipart-upload --bucket {Bucket} --key {Key}' \
f' --upload-id {UploadId} --multipart-upload file://{file_path}' \
f' --endpoint-url {S3_GATE}'
output = _cmd_run(cmd)
return self._to_json(output)
@staticmethod
def _to_json(output: str) -> dict:
json_output = {}
try:
json_output = json.loads(output)
except Exception:
if '{' not in output and '}' not in output:
logger.warning(f'Could not parse json from output {output}')
return json_output
json_output = json.loads(output[output.index('{'):])
return json_output

View file

@ -0,0 +1,113 @@
#!/usr/bin/python3.9
"""
Helper functions to use with `neofs-cli`, `neo-go` and other CLIs.
"""
import json
import subprocess
import sys
from contextlib import suppress
from datetime import datetime
from textwrap import shorten
from typing import Union
import allure
import pexpect
from robot.api import logger
ROBOT_AUTO_KEYWORDS = False
def _cmd_run(cmd: str, timeout: int = 30) -> str:
"""
Runs given shell command <cmd>, in case of success returns its stdout,
in case of failure returns error message.
"""
try:
logger.info(f"Executing command: {cmd}")
start_time = datetime.utcnow()
compl_proc = subprocess.run(cmd, check=True, universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
timeout=timeout,
shell=True)
output = compl_proc.stdout
return_code = compl_proc.returncode
end_time = datetime.utcnow()
logger.info(f"Output: {output}")
_attach_allure_log(cmd, output, return_code, start_time, end_time)
return output
except subprocess.CalledProcessError as exc:
raise RuntimeError(f"Error:\nreturn code: {exc.returncode} "
f"\nOutput: {exc.output}") from exc
except OSError as exc:
raise RuntimeError(f"Output: {exc.strerror}") from exc
except Exception as exc:
return_code, _ = subprocess.getstatusoutput(cmd)
logger.info(f"Error:\nreturn code: {return_code}\nOutput: "
f"{exc.output.decode('utf-8') if type(exc.output) is bytes else exc.output}")
raise
def _run_with_passwd(cmd: str) -> str:
child = pexpect.spawn(cmd)
child.delaybeforesend = 1
child.expect(".*")
child.sendline('\r')
if sys.platform == "darwin":
child.expect(pexpect.EOF)
cmd = child.before
else:
child.wait()
cmd = child.read()
return cmd.decode()
def _configure_aws_cli(cmd: str, key_id: str, access_key: str, out_format: str = "json") -> str:
child = pexpect.spawn(cmd)
child.delaybeforesend = 1
child.expect("AWS Access Key ID.*")
child.sendline(key_id)
child.expect("AWS Secret Access Key.*")
child.sendline(access_key)
child.expect("Default region name.*")
child.sendline('')
child.expect("Default output format.*")
child.sendline(out_format)
child.wait()
cmd = child.read()
# child.expect(pexpect.EOF)
# cmd = child.before
return cmd.decode()
def _attach_allure_log(cmd: str, output: str, return_code: int, start_time: datetime,
end_time: datetime) -> None:
if 'robot' not in sys.modules:
command_attachment = (
f"COMMAND: '{cmd}'\n"
f'OUTPUT:\n {output}\n'
f'RC: {return_code}\n'
f'Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {end_time - start_time}'
)
with allure.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'):
allure.attach(command_attachment, 'Command execution', allure.attachment_type.TEXT)
def log_command_execution(cmd: str, output: Union[str, dict]) -> None:
logger.info(f'{cmd}: {output}')
if 'robot' not in sys.modules:
with suppress(Exception):
json_output = json.dumps(output, indent=4, sort_keys=True)
output = json_output
command_attachment = (
f"COMMAND: '{cmd}'\n"
f'OUTPUT:\n {output}\n'
)
with allure.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'):
allure.attach(command_attachment, 'Command execution', allure.attachment_type.TEXT)

View file

@ -0,0 +1,82 @@
#!/usr/bin/python3
"""
This module contains functions which are used for Large Object assemling:
getting Last Object and split and getting Link Object. It is not enough to
simply perform a "raw" HEAD request, as noted in the issue:
https://github.com/nspcc-dev/neofs-node/issues/1304. Therefore, the reliable
retrival of the aforementioned objects must be done this way: send direct
"raw" HEAD request to the every Storage Node and return the desired OID on
first non-null response.
"""
from common import NEOFS_NETMAP, WALLET_CONFIG
import neofs_verbs
from robot.api import logger
from robot.api.deco import keyword
from robot.libraries.BuiltIn import BuiltIn
import neofs_verbs
from common import NEOFS_NETMAP
ROBOT_AUTO_KEYWORDS = False
@keyword('Get Link Object')
def get_link_object(wallet: str, cid: str, oid: str, bearer_token: str = "",
wallet_config: str = WALLET_CONFIG):
"""
Args:
wallet (str): path to the wallet on whose behalf the Storage Nodes
are requested
cid (str): Container ID which stores the Large Object
oid (str): Large Object ID
bearer_token (optional, str): path to Bearer token file
wallet_config (optional, str): path to the neofs-cli config file
Returns:
(str): Link Object ID
When no Link Object ID is found after all Storage Nodes polling,
the function throws a native robot error.
"""
for node in NEOFS_NETMAP:
try:
resp = neofs_verbs.head_object(wallet, cid, oid,
endpoint=node,
is_raw=True,
is_direct=True,
bearer_token=bearer_token,
wallet_config=wallet_config)
if resp['link']:
return resp['link']
except Exception:
logger.info(f"No Link Object found on {node}; continue")
BuiltIn().fail(f"No Link Object for {cid}/{oid} found among all Storage Nodes")
return None
@keyword('Get Last Object')
def get_last_object(wallet: str, cid: str, oid: str):
"""
Args:
wallet (str): path to the wallet on whose behalf the Storage Nodes
are requested
cid (str): Container ID which stores the Large Object
oid (str): Large Object ID
Returns:
(str): Last Object ID
When no Last Object ID is found after all Storage Nodes polling,
the function throws a native robot error.
"""
for node in NEOFS_NETMAP:
try:
resp = neofs_verbs.head_object(wallet, cid, oid,
endpoint=node,
is_raw=True,
is_direct=True)
if resp['lastPart']:
return resp['lastPart']
except Exception:
logger.info(f"No Last Object found on {node}; continue")
BuiltIn().fail(f"No Last Object for {cid}/{oid} found among all Storage Nodes")
return None

View file

@ -0,0 +1,159 @@
#!/usr/bin/python3.9
"""
This module contains keywords that utilize `neofs-cli container` commands.
"""
import json
import time
from typing import Optional
import json_transformers
from data_formatters import dict_to_attrs
from cli_helpers import _cmd_run
from common import NEOFS_ENDPOINT, NEOFS_CLI_EXEC, WALLET_CONFIG
from robot.api import logger
from robot.api.deco import keyword
ROBOT_AUTO_KEYWORDS = False
DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
@keyword('Create Container')
def create_container(wallet: str, rule: str = DEFAULT_PLACEMENT_RULE, basic_acl: str = '',
attributes: Optional[dict] = None, session_token: str = '',
session_wallet: str = '', options: str = '') -> str:
"""
A wrapper for `neofs-cli container create` call.
Args:
wallet (str): a wallet on whose behalf a container is created
rule (optional, str): placement rule for container
basic_acl (optional, str): an ACL for container, will be
appended to `--basic-acl` key
attributes (optional, dict): container attributes , will be
appended to `--attributes` key
session_token (optional, str): a path to session token file
session_wallet(optional, str): a path to the wallet which signed
the session token; this parameter makes sense
when paired with `session_token`
options (optional, str): any other options to pass to the call
Returns:
(str): CID of the created container
"""
cmd = (
f'{NEOFS_CLI_EXEC} --rpc-endpoint {NEOFS_ENDPOINT} container create '
f'--wallet {session_wallet if session_wallet else wallet} '
f'--config {WALLET_CONFIG} --policy "{rule}" '
f'{"--basic-acl " + basic_acl if basic_acl else ""} '
f'{"--attributes " + dict_to_attrs(attributes) if attributes else ""} '
f'{"--session " + session_token if session_token else ""} '
f'{options} --await'
)
output = _cmd_run(cmd, timeout=60)
cid = _parse_cid(output)
logger.info("Container created; waiting until it is persisted in sidechain")
deadline_to_persist = 15 # seconds
for i in range(0, deadline_to_persist):
time.sleep(1)
containers = list_containers(wallet)
if cid in containers:
break
logger.info(f"There is no {cid} in {containers} yet; continue")
if i + 1 == deadline_to_persist:
raise RuntimeError(
f"After {deadline_to_persist} seconds the container "
f"{cid} hasn't been persisted; exiting"
)
return cid
@keyword('List Containers')
def list_containers(wallet: str) -> list[str]:
"""
A wrapper for `neofs-cli container list` call. It returns all the
available containers for the given wallet.
Args:
wallet (str): a wallet on whose behalf we list the containers
Returns:
(list): list of containers
"""
cmd = (
f'{NEOFS_CLI_EXEC} --rpc-endpoint {NEOFS_ENDPOINT} --wallet {wallet} '
f'--config {WALLET_CONFIG} container list'
)
output = _cmd_run(cmd)
return output.split()
@keyword('Get Container')
def get_container(wallet: str, cid: str) -> dict:
"""
A wrapper for `neofs-cli container get` call. It extracts container's
attributes and rearranges them into a more compact view.
Args:
wallet (str): path to a wallet on whose behalf we get the container
cid (str): ID of the container to get
Returns:
(dict): dict of container attributes
"""
cmd = (
f'{NEOFS_CLI_EXEC} --rpc-endpoint {NEOFS_ENDPOINT} --wallet {wallet} '
f'--config {WALLET_CONFIG} --cid {cid} container get --json'
)
output = _cmd_run(cmd)
container_info = json.loads(output)
attributes = dict()
for attr in container_info['attributes']:
attributes[attr['key']] = attr['value']
container_info['attributes'] = attributes
container_info['ownerID'] = json_transformers.json_reencode(container_info['ownerID']['value'])
return container_info
@keyword('Delete Container')
# TODO: make the error message about a non-found container more user-friendly
# https://github.com/nspcc-dev/neofs-contract/issues/121
def delete_container(wallet: str, cid: str) -> None:
"""
A wrapper for `neofs-cli container delete` call.
Args:
wallet (str): path to a wallet on whose behalf we delete the container
cid (str): ID of the container to delete
This function doesn't return anything.
"""
cmd = (
f'{NEOFS_CLI_EXEC} --rpc-endpoint {NEOFS_ENDPOINT} --wallet {wallet} '
f'--config {WALLET_CONFIG} container delete --cid {cid}'
)
_cmd_run(cmd)
def _parse_cid(output: str) -> str:
"""
Parses container ID from a given CLI output. The input string we expect:
container ID: 2tz86kVTDpJxWHrhw3h6PbKMwkLtBEwoqhHQCKTre1FN
awaiting...
container has been persisted on sidechain
We want to take 'container ID' value from the string.
Args:
output (str): CLI output to parse
Returns:
(str): extracted CID
"""
try:
# taking first line from command's output
first_line = output.split('\n')[0]
except Exception:
logger.error(f"Got empty output: {output}")
splitted = first_line.split(": ")
if len(splitted) != 2:
raise ValueError(f"no CID was parsed from command output: \t{first_line}")
return splitted[1]

View file

@ -0,0 +1,26 @@
import json
from neo3 import wallet
def dict_to_attrs(attrs: dict) -> str:
"""
This function takes a dictionary of object's attributes and converts them
into string. The string is passed to `--attributes` key of neofs-cli.
Args:
attrs (dict): object attributes in {"a": "b", "c": "d"} format.
Returns:
(str): string in "a=b,c=d" format.
"""
return ",".join(f"{key}={value}" for key, value in attrs.items())
def pub_key_hex(wallet_path: str, wallet_password=""):
wallet_content = ''
with open(wallet_path) as out:
wallet_content = json.load(out)
wallet_from_json = wallet.Wallet.from_json(wallet_content, password=wallet_password)
pub_key_64 = str(wallet_from_json.accounts[0].public_key)
return pub_key_64

View file

@ -0,0 +1,29 @@
#!/usr/bin/python3.9
import contract
from robot.api import logger
from robot.api.deco import keyword
from common import IR_WALLET_PATH, IR_WALLET_PASS, MORPH_ENDPOINT
ROBOT_AUTO_KEYWORDS = False
@keyword('Get Epoch')
def get_epoch():
epoch = int(contract.testinvoke_contract(
contract.get_netmap_contract_hash(MORPH_ENDPOINT),
"epoch",
MORPH_ENDPOINT)
)
logger.info(f"Got epoch {epoch}")
return epoch
@keyword('Tick Epoch')
def tick_epoch():
cur_epoch = get_epoch()
return contract.invoke_contract_multisig(
contract.get_netmap_contract_hash(MORPH_ENDPOINT),
f"newEpoch int:{cur_epoch+1}",
IR_WALLET_PATH, IR_WALLET_PASS, MORPH_ENDPOINT)

View file

@ -0,0 +1,182 @@
#!/usr/bin/python3
import re
import shutil
import sys
import uuid
import zipfile
from urllib.parse import quote_plus
import allure
import requests
from cli_helpers import _cmd_run
from common import HTTP_GATE
from robot.api import logger
from robot.api.deco import keyword
from robot.libraries.BuiltIn import BuiltIn
ROBOT_AUTO_KEYWORDS = False
if "pytest" in sys.modules:
import os
ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/")
else:
ASSETS_DIR = BuiltIn().get_variable_value("${ASSETS_DIR}")
@keyword('Get via HTTP Gate')
def get_via_http_gate(cid: str, oid: str):
"""
This function gets given object from HTTP gate
:param cid: CID to get object from
:param oid: object OID
"""
request = f'{HTTP_GATE}/get/{cid}/{oid}'
resp = requests.get(request, stream=True)
if not resp.ok:
raise Exception(f"""Failed to get object via HTTP gate:
request: {resp.request.path_url},
response: {resp.text},
status code: {resp.status_code} {resp.reason}""")
logger.info(f'Request: {request}')
_attach_allure_step(request, resp.status_code)
filename = f"{ASSETS_DIR}/{cid}_{oid}"
with open(filename, "wb") as get_file:
shutil.copyfileobj(resp.raw, get_file)
return filename
@keyword('Get via Zip HTTP Gate')
def get_via_zip_http_gate(cid: str, prefix: str):
"""
This function gets given object from HTTP gate
:param cid: CID to get object from
:param prefix: common prefix
"""
request = f'{HTTP_GATE}/zip/{cid}/{prefix}'
resp = requests.get(request, stream=True)
if not resp.ok:
raise Exception(f"""Failed to get object via HTTP gate:
request: {resp.request.path_url},
response: {resp.text},
status code: {resp.status_code} {resp.reason}""")
logger.info(f'Request: {request}')
_attach_allure_step(request, resp.status_code)
filename = f'{ASSETS_DIR}/{cid}_archive.zip'
with open(filename, 'wb') as get_file:
shutil.copyfileobj(resp.raw, get_file)
with zipfile.ZipFile(filename, 'r') as zip_ref:
zip_ref.extractall(ASSETS_DIR)
return f'{ASSETS_DIR}/{prefix}'
@keyword('Get via HTTP Gate by attribute')
def get_via_http_gate_by_attribute(cid: str, attribute: dict):
"""
This function gets given object from HTTP gate
:param cid: CID to get object from
:param attribute: attribute name: attribute value pair
"""
attr_name = list(attribute.keys())[0]
attr_value = quote_plus(str(attribute.get(attr_name)))
request = f'{HTTP_GATE}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}'
resp = requests.get(request, stream=True)
if not resp.ok:
raise Exception(f"""Failed to get object via HTTP gate:
request: {resp.request.path_url},
response: {resp.text},
status code: {resp.status_code} {resp.reason}""")
logger.info(f'Request: {request}')
_attach_allure_step(request, resp.status_code)
filename = f"{ASSETS_DIR}/{cid}_{str(uuid.uuid4())}"
with open(filename, "wb") as get_file:
shutil.copyfileobj(resp.raw, get_file)
return filename
@keyword('Upload via HTTP Gate')
def upload_via_http_gate(cid: str, path: str, headers: dict = None) -> str:
"""
This function upload given object through HTTP gate
:param cid: CID to get object from
:param path: File path to upload
:param headers: Object header
"""
request = f'{HTTP_GATE}/upload/{cid}'
files = {'upload_file': open(path, 'rb')}
body = {
'filename': path
}
resp = requests.post(request, files=files, data=body, headers=headers)
if not resp.ok:
raise Exception(f"""Failed to get object via HTTP gate:
request: {resp.request.path_url},
response: {resp.text},
status code: {resp.status_code} {resp.reason}""")
logger.info(f'Request: {request}')
_attach_allure_step(request, resp.json(), req_type='POST')
assert resp.json().get('object_id'), f'OID found in response {resp}'
return resp.json().get('object_id')
@keyword('Upload via HTTP Gate using Curl')
def upload_via_http_gate_curl(cid: str, filepath: str, large_object=False, headers: dict = None) -> str:
"""
This function upload given object through HTTP gate using curl utility.
:param cid: CID to get object from
:param filepath: File path to upload
:param headers: Object header
"""
request = f'{HTTP_GATE}/upload/{cid}'
files = f'file=@{filepath};filename={os.path.basename(filepath)}'
cmd = f'curl -F \'{files}\' {request}'
if large_object:
files = f'file=@pipe;filename={os.path.basename(filepath)}'
cmd = f'mkfifo pipe;cat {filepath} > pipe & curl --no-buffer -F \'{files}\' {request}'
output = _cmd_run(cmd)
oid_re = re.search(r'"object_id": "(.*)"', output)
if not oid_re:
raise AssertionError(f'Could not find "object_id" in {output}')
return oid_re.group(1)
@keyword('Get via HTTP Gate using Curl')
def get_via_http_curl(cid: str, oid: str) -> str:
"""
This function gets given object from HTTP gate using curl utility.
:param cid: CID to get object from
:param oid: object OID
"""
request = f'{HTTP_GATE}/get/{cid}/{oid}'
filename = f"{ASSETS_DIR}/{cid}_{oid}_{str(uuid.uuid4())}"
cmd = f'curl {request} > {filename}'
_cmd_run(cmd)
return filename
def _attach_allure_step(request: str, status_code: int, req_type='GET'):
if 'allure' in sys.modules:
command_attachment = (
f"REQUEST: '{request}'\n"
f'RESPONSE:\n {status_code}\n'
)
with allure.step(f'{req_type} Request'):
allure.attach(command_attachment, f'{req_type} Request', allure.attachment_type.TEXT)

View file

@ -0,0 +1,156 @@
#!/usr/bin/python3
"""
When doing requests to NeoFS, we get JSON output as an automatically decoded
structure from protobuf. Some fields are decoded with boilerplates and binary
values are Base64-encoded.
This module contains functions which rearrange the structure and reencode binary
data from Base64 to Base58.
"""
import base64
import base58
ROBOT_AUTO_KEYWORDS = False
def decode_simple_header(data: dict):
"""
This function reencodes Simple Object header and its attributes.
"""
try:
data = decode_common_fields(data)
# object attributes view normalization
ugly_attrs = data["header"]["attributes"]
data["header"]["attributes"] = {}
for attr in ugly_attrs:
data["header"]["attributes"][attr["key"]] = attr["value"]
except Exception as exc:
raise ValueError(f"failed to decode JSON output: {exc}") from exc
return data
def decode_split_header(data: dict):
"""
This function rearranges Complex Object header.
The header holds SplitID, a random unique
number, which is common among all splitted objects, and IDs of the Linking
Object and the last splitted Object.
"""
try:
data["splitId"] = json_reencode(data["splitId"])
data["lastPart"] = (
json_reencode(data["lastPart"]["value"])
if data["lastPart"] else None
)
data["link"] = (
json_reencode(data["link"]["value"])
if data["link"] else None
)
except Exception as exc:
raise ValueError(f"failed to decode JSON output: {exc}") from exc
return data
def decode_linking_object(data: dict):
"""
This function reencodes Linking Object header.
It contains IDs of child Objects and Split Chain data.
"""
try:
data = decode_simple_header(data)
# reencoding Child Object IDs
# { 'value': <Base58 encoded OID> } -> <Base64 encoded OID>
for ind, val in enumerate(data['header']['split']['children']):
data['header']['split']['children'][ind] = json_reencode(val['value'])
data['header']['split']['splitID'] = json_reencode(data['header']['split']['splitID'])
data['header']['split']['previous'] = (
json_reencode(data['header']['split']['previous']['value'])
if data['header']['split']['previous'] else None
)
data['header']['split']['parent'] = (
json_reencode(data['header']['split']['parent']['value'])
if data['header']['split']['parent'] else None
)
except Exception as exc:
raise ValueError(f"failed to decode JSON output: {exc}") from exc
return data
def decode_storage_group(data: dict):
"""
This function reencodes Storage Group header.
"""
try:
data = decode_common_fields(data)
except Exception as exc:
raise ValueError(f"failed to decode JSON output: {exc}") from exc
return data
def decode_tombstone(data: dict):
"""
This function reencodes Tombstone header.
"""
try:
data = decode_simple_header(data)
data['header']['sessionToken'] = decode_session_token(
data['header']['sessionToken'])
except Exception as exc:
raise ValueError(f"failed to decode JSON output: {exc}") from exc
return data
def decode_session_token(data: dict):
"""
This function reencodes a fragment of header which contains
information about session token.
"""
data['body']['object']['address']['containerID'] = json_reencode(
data['body']['object']['address']['containerID']['value'])
data['body']['object']['address']['objectID'] = json_reencode(
data['body']['object']['address']['objectID']['value'])
return data
def json_reencode(data: str):
"""
According to JSON protocol, binary data (Object/Container/Storage Group IDs, etc)
is converted to string via Base58 encoder. But we usually operate with Base64-encoded
format.
This function reencodes given Base58 string into the Base64 one.
"""
return base58.b58encode(base64.b64decode(data)).decode("utf-8")
def encode_for_json(data: str):
"""
This function encodes binary data for sending them as protobuf
structures.
"""
return base64.b64encode(base58.b58decode(data)).decode('utf-8')
def decode_common_fields(data: dict):
"""
Despite of type (simple/complex Object, Storage Group, etc) every Object
header contains several common fields.
This function rearranges these fields.
"""
# reencoding binary IDs
data["objectID"] = json_reencode(data["objectID"]["value"])
data["header"]["containerID"] = json_reencode(data["header"]["containerID"]["value"])
data["header"]["ownerID"] = json_reencode(data["header"]["ownerID"]["value"])
data["header"]["homomorphicHash"] = json_reencode(data["header"]["homomorphicHash"]["sum"])
data["header"]["payloadHash"] = json_reencode(data["header"]["payloadHash"]["sum"])
data["header"]["version"] = (
f"{data['header']['version']['major']}{data['header']['version']['minor']}"
)
return data

View file

@ -0,0 +1,303 @@
#!/usr/bin/python3
'''
This module contains wrappers for NeoFS verbs executed via neofs-cli.
'''
import json
import os
import random
import re
import uuid
import json_transformers
from cli_helpers import _cmd_run
from common import ASSETS_DIR, NEOFS_ENDPOINT, NEOFS_NETMAP, WALLET_CONFIG
from data_formatters import dict_to_attrs
from robot.api import logger
from robot.api.deco import keyword
ROBOT_AUTO_KEYWORDS = False
# path to neofs-cli executable
NEOFS_CLI_EXEC = os.getenv('NEOFS_CLI_EXEC', 'neofs-cli')
@keyword('Get object')
def get_object(wallet: str, cid: str, oid: str, bearer_token: str = "",
write_object: str = "", endpoint: str = "", options: str = "",
wallet_config: str = WALLET_CONFIG):
"""
GET from NeoFS.
Args:
wallet (str): wallet on whose behalf GET is done
cid (str): ID of Container where we get the Object from
oid (str): Object ID
bearer_token (optional, str): path to Bearer Token file, appends to `--bearer` key
write_object (optional, str): path to downloaded file, appends to `--file` key
endpoint (optional, str): NeoFS endpoint to send request to, appends to `--rpc-endpoint` key
wallet_config(optional, str): path to the wallet config
options (optional, str): any options which `neofs-cli object get` accepts
Returns:
(str): path to downloaded file
"""
if not write_object:
write_object = str(uuid.uuid4())
file_path = f"{ASSETS_DIR}/{write_object}"
if not endpoint:
endpoint = random.sample(NEOFS_NETMAP, 1)[0]
cmd = (
f'{NEOFS_CLI_EXEC} --rpc-endpoint {endpoint} --wallet {wallet} '
f'object get --cid {cid} --oid {oid} --file {file_path} --config {wallet_config} '
f'{"--bearer " + bearer_token if bearer_token else ""} '
f'{options}'
)
_cmd_run(cmd)
return file_path
# TODO: make `bearer_token` optional
@keyword('Get Range Hash')
def get_range_hash(wallet: str, cid: str, oid: str, bearer_token: str, range_cut: str,
wallet_config: str = WALLET_CONFIG, options: str = ""):
"""
GETRANGEHASH of given Object.
Args:
wallet (str): wallet on whose behalf GETRANGEHASH is done
cid (str): ID of Container where we get the Object from
oid (str): Object ID
range_cut (str): Range to take hash from in the form offset1:length1,...,
value to pass to the `--range` parameter
bearer_token (optional, str): path to Bearer Token file, appends to `--bearer` key
wallet_config(optional, str): path to the wallet config
options (optional, str): any options which `neofs-cli object hash` accepts
Returns:
None
"""
cmd = (
f'{NEOFS_CLI_EXEC} --rpc-endpoint {NEOFS_ENDPOINT} --wallet {wallet} '
f'object hash --cid {cid} --oid {oid} --range {range_cut} --config {wallet_config} '
f'{"--bearer " + bearer_token if bearer_token else ""} '
f'{options}'
)
output = _cmd_run(cmd)
# cutting off output about range offset and length
return output.split(':')[1].strip()
@keyword('Put object')
def put_object(wallet: str, path: str, cid: str, bearer: str = "", user_headers: dict = {},
endpoint: str = "", wallet_config: str = WALLET_CONFIG, options: str = ""):
"""
PUT of given file.
Args:
wallet (str): wallet on whose behalf PUT is done
path (str): path to file to be PUT
cid (str): ID of Container where we get the Object from
bearer (optional, str): path to Bearer Token file, appends to `--bearer` key
user_headers (optional, dict): Object attributes, append to `--attributes` key
endpoint(optional, str): NeoFS endpoint to send request to
wallet_config(optional, str): path to the wallet config
options (optional, str): any options which `neofs-cli object put` accepts
Returns:
(str): ID of uploaded Object
"""
if not endpoint:
endpoint = random.sample(NEOFS_NETMAP, 1)[0]
cmd = (
f'{NEOFS_CLI_EXEC} --rpc-endpoint {endpoint} --wallet {wallet} '
f'object put --file {path} --cid {cid} {options} --config {wallet_config} '
f'{"--bearer " + bearer if bearer else ""} '
f'{"--attributes " + dict_to_attrs(user_headers) if user_headers else ""}'
)
output = _cmd_run(cmd)
# splitting CLI output to lines and taking the penultimate line
id_str = output.strip().split('\n')[-2]
oid = id_str.split(':')[1]
return oid.strip()
@keyword('Delete object')
def delete_object(wallet: str, cid: str, oid: str, bearer: str = "", wallet_config: str = WALLET_CONFIG,
options: str = ""):
"""
DELETE an Object.
Args:
wallet (str): wallet on whose behalf DELETE is done
cid (str): ID of Container where we get the Object from
oid (str): ID of Object we are going to delete
bearer (optional, str): path to Bearer Token file, appends to `--bearer` key
wallet_config(optional, str): path to the wallet config
options (optional, str): any options which `neofs-cli object delete` accepts
Returns:
(str): Tombstone ID
"""
cmd = (
f'{NEOFS_CLI_EXEC} --rpc-endpoint {NEOFS_ENDPOINT} --wallet {wallet} '
f'object delete --cid {cid} --oid {oid} {options} --config {wallet_config} '
f'{"--bearer " + bearer if bearer else ""}'
)
output = _cmd_run(cmd)
id_str = output.split('\n')[1]
tombstone = id_str.split(':')[1]
return tombstone.strip()
@keyword('Get Range')
def get_range(wallet: str, cid: str, oid: str, range_cut: str,
wallet_config: str = WALLET_CONFIG, bearer: str = "", options: str = ""):
"""
GETRANGE an Object.
Args:
wallet (str): wallet on whose behalf GETRANGE is done
cid (str): ID of Container where we get the Object from
oid (str): ID of Object we are going to request
range_cut (str): range to take data from in the form offset:length
bearer (optional, str): path to Bearer Token file, appends to `--bearer` key
wallet_config(optional, str): path to the wallet config
options (optional, str): any options which `neofs-cli object range` accepts
Returns:
(void)
"""
range_file = f"{ASSETS_DIR}/{uuid.uuid4()}"
cmd = (
f'{NEOFS_CLI_EXEC} --rpc-endpoint {NEOFS_ENDPOINT} --wallet {wallet} '
f'object range --cid {cid} --oid {oid} --range {range_cut} --config {wallet_config} '
f'{options} --file {range_file} '
f'{"--bearer " + bearer if bearer else ""} '
)
_cmd_run(cmd)
content = ''
with open(range_file, 'rb') as fout:
content = fout.read()
return range_file, content
@keyword('Search object')
def search_object(wallet: str, cid: str, keys: str = "", bearer: str = "", filters: dict = {},
expected_objects_list=[], wallet_config: str = WALLET_CONFIG, options: str = ""):
"""
SEARCH an Object.
Args:
wallet (str): wallet on whose behalf SEARCH is done
cid (str): ID of Container where we get the Object from
keys(optional, str): any keys for Object SEARCH which `neofs-cli object search`
accepts, e.g. `--oid`
bearer (optional, str): path to Bearer Token file, appends to `--bearer` key
filters (optional, dict): key=value pairs to filter Objects
expected_objects_list (optional, list): a list of ObjectIDs to compare found Objects with
wallet_config(optional, str): path to the wallet config
options(optional, str): any other options which `neofs-cli object search` might accept
Returns:
(list): list of found ObjectIDs
"""
filters_result = ""
if filters:
filters_result += "--filters "
logger.info(filters)
filters_result += ','.join(
map(lambda i: f"'{i} EQ {filters[i]}'", filters))
cmd = (
f'{NEOFS_CLI_EXEC} --rpc-endpoint {NEOFS_ENDPOINT} --wallet {wallet} '
f'object search {keys} --cid {cid} {filters_result} --config {wallet_config} '
f'{"--bearer " + bearer if bearer else ""} {options}'
)
output = _cmd_run(cmd)
found_objects = re.findall(r'(\w{43,44})', output)
if expected_objects_list:
if sorted(found_objects) == sorted(expected_objects_list):
logger.info(f"Found objects list '{found_objects}' ",
f"is equal for expected list '{expected_objects_list}'")
else:
logger.warn(f"Found object list {found_objects} ",
f"is not equal to expected list '{expected_objects_list}'")
return found_objects
@keyword('Head object')
def head_object(wallet: str, cid: str, oid: str, bearer_token: str = "",
options: str = "", endpoint: str = "", json_output: bool = True,
is_raw: bool = False, is_direct: bool = False, wallet_config: str = WALLET_CONFIG):
"""
HEAD an Object.
Args:
wallet (str): wallet on whose behalf HEAD is done
cid (str): ID of Container where we get the Object from
oid (str): ObjectID to HEAD
bearer_token (optional, str): path to Bearer Token file, appends to `--bearer` key
options (optional, str): any options which `neofs-cli object head` accepts
endpoint(optional, str): NeoFS endpoint to send request to
json_output(optional, bool): return reponse in JSON format or not; this flag
turns into `--json` key
is_raw(optional, bool): send "raw" request or not; this flag
turns into `--raw` key
is_direct(optional, bool): send request directly to the node or not; this flag
turns into `--ttl 1` key
wallet_config(optional, str): path to the wallet config
Returns:
depending on the `json_output` parameter value, the function returns
(dict): HEAD response in JSON format
or
(str): HEAD response as a plain text
"""
cmd = (
f'{NEOFS_CLI_EXEC} --rpc-endpoint {endpoint if endpoint else NEOFS_ENDPOINT} '
f'--wallet {wallet} --config {wallet_config} '
f'object head --cid {cid} --oid {oid} {options} '
f'{"--bearer " + bearer_token if bearer_token else ""} '
f'{"--json" if json_output else ""} '
f'{"--raw" if is_raw else ""} '
f'{"--ttl 1" if is_direct else ""}'
)
output = _cmd_run(cmd)
if not json_output:
return output
decoded = ""
try:
decoded = json.loads(output)
except Exception as exc:
# If we failed to parse output as JSON, the cause might be
# the plain text string in the beginning of the output.
# Here we cut off first string and try to parse again.
logger.info(f"failed to parse output: {exc}")
logger.info("parsing output in another way")
fst_line_idx = output.find('\n')
decoded = json.loads(output[fst_line_idx:])
# If response is Complex Object header, it has `splitId` key
if 'splitId' in decoded.keys():
logger.info("decoding split header")
return json_transformers.decode_split_header(decoded)
# If response is Last or Linking Object header,
# it has `header` dictionary and non-null `split` dictionary
if 'split' in decoded['header'].keys():
if decoded['header']['split']:
logger.info("decoding linking object")
return json_transformers.decode_linking_object(decoded)
if decoded['header']['objectType'] == 'STORAGE_GROUP':
logger.info("decoding storage group")
return json_transformers.decode_storage_group(decoded)
if decoded['header']['objectType'] == 'TOMBSTONE':
logger.info("decoding tombstone")
return json_transformers.decode_tombstone(decoded)
logger.info("decoding simple header")
return json_transformers.decode_simple_header(decoded)

Some files were not shown because too many files have changed in this diff Show more