From e0b87852d519cf31cee961e9c07f7af7ace9dd4c Mon Sep 17 00:00:00 2001 From: Vitaliy Potyarkin Date: Thu, 1 Jan 1970 00:00:00 +0000 Subject: [PATCH 001/363] Empty default branch for forked repos Signed-off-by: Vitaliy Potyarkin --- README.md | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 README.md diff --git a/README.md b/README.md new file mode 100644 index 0000000..7463f9e --- /dev/null +++ b/README.md @@ -0,0 +1,3 @@ +# WIP area: this repo is just a fork! + +Useful things may be published only in [other branches](../../../branches) From b4bc824b8956ab14cfff86296409a0b283811f49 Mon Sep 17 00:00:00 2001 From: Anastasia Prasolova Date: Wed, 24 Aug 2022 16:36:00 +0300 Subject: [PATCH 002/363] Initial commit --- LICENSE | 674 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 674 insertions(+) create mode 100644 LICENSE diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..f288702 --- /dev/null +++ b/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. From c0bbfd170555c07b39f47302ef336937de4db132 Mon Sep 17 00:00:00 2001 From: Vladimir Domnich Date: Wed, 24 Aug 2022 19:01:07 +0400 Subject: [PATCH 003/363] Add README and gitignore Signed-off-by: Vladimir Domnich --- .gitignore | 8 ++++++++ README.md | 40 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) create mode 100644 .gitignore create mode 100644 README.md diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..e61db30 --- /dev/null +++ b/.gitignore @@ -0,0 +1,8 @@ +# ignore IDE files +.vscode + +# ignore caches under any path +**/__pycache__ + +# ignore virtual environments +venv*/* diff --git a/README.md b/README.md new file mode 100644 index 0000000..736ec88 --- /dev/null +++ b/README.md @@ -0,0 +1,40 @@ +# neofs-testlib +This library provides building blocks and utilities to facilitate development of automated tests for NeoFS system. + +## Repository structure +TODO + +## Installation +TODO + +## Contributing +Any contributions to the library should conform to the [contribution guideline](https://github.com/nspcc-dev/neofs-node/blob/master/CONTRIBUTING.md). + +### Development Environment +To setup development environment for `neofs-testlib`, please, take the following steps: +1. Prepare virtualenv + +``` +$ virtualenv --python=python3.9 venv +$ source venv/bin/activate +``` + +2. Install all dependencies: + +``` +$ pip install -r requirements.txt +``` + +### Unit Tests +Before submitting any changes to the library, please, make sure that all unit tests are passing. To run the tests, please, use the following command: +``` +python -m unittest discover --start-directory tests +``` + +To enable tests that interact with SSH server, please, setup SSH server and set the following environment variables before running the tests: +``` +SSH_SHELL_HOST =
+SSH_SHELL_LOGIN = +SSH_SHELL_PRIVATE_KEY_PATH = +SSH_SHELL_PRIVATE_KEY_PASSPHRASE = +``` \ No newline at end of file From f6ee129354a11abe25ceacfce8a5a4a23252d29d Mon Sep 17 00:00:00 2001 From: Vladimir Domnich Date: Wed, 24 Aug 2022 19:01:07 +0400 Subject: [PATCH 004/363] Implement basic version of local shell Also added two simple reporters that can be used by the shell to report command execution details. Signed-off-by: Vladimir Domnich --- reporter/__init__.py | 14 ++ reporter/allure_reporter.py | 36 +++++ reporter/dummy_reporter.py | 21 +++ reporter/interfaces.py | 29 ++++ requirements.txt | 2 + shell/__init__.py | 0 shell/interfaces.py | 59 ++++++++ shell/local_shell.py | 159 ++++++++++++++++++++++ tests/helpers.py | 9 ++ tests/test_local_shell_interactive.py | 78 +++++++++++ tests/test_local_shell_non_interactive.py | 46 +++++++ 11 files changed, 453 insertions(+) create mode 100644 reporter/__init__.py create mode 100644 reporter/allure_reporter.py create mode 100644 reporter/dummy_reporter.py create mode 100644 reporter/interfaces.py create mode 100644 requirements.txt create mode 100644 shell/__init__.py create mode 100644 shell/interfaces.py create mode 100644 shell/local_shell.py create mode 100644 tests/helpers.py create mode 100644 tests/test_local_shell_interactive.py create mode 100644 tests/test_local_shell_non_interactive.py diff --git a/reporter/__init__.py b/reporter/__init__.py new file mode 100644 index 0000000..d312fcc --- /dev/null +++ b/reporter/__init__.py @@ -0,0 +1,14 @@ +import os + +from .allure_reporter import AllureReporter +from .interfaces import Reporter +from .dummy_reporter import DummyReporter + + +def get_reporter() -> Reporter: + # TODO: in scope of reporter implementation task here we will have extendable + # solution for configuring and providing reporter for the library + if os.getenv("TESTLIB_REPORTER_TYPE", "DUMMY") == "DUMMY": + return DummyReporter() + else: + return AllureReporter() diff --git a/reporter/allure_reporter.py b/reporter/allure_reporter.py new file mode 100644 index 0000000..6522859 --- /dev/null +++ b/reporter/allure_reporter.py @@ -0,0 +1,36 @@ +import os +from contextlib import AbstractContextManager +from textwrap import shorten +from typing import Any + +import allure +from allure import attachment_type + +from .interfaces import Reporter + + +class AllureReporter(Reporter): + """ + Implements storing of test artifacts in Allure report. + """ + + def step(self, name: str) -> AbstractContextManager: + name = shorten(name, width=70, placeholder="...") + return allure.step(name) + + def attach(self, body: Any, file_name: str) -> None: + attachment_name, extension = os.path.splitext(file_name) + attachment_type = self._resolve_attachment_type(extension) + + allure.attach(body, attachment_name, attachment_type) + + def _resolve_attachment_type(self, extension: str) -> attachment_type: + """ + Try to find matching Allure attachment type by extension. If no match was found, + default to TXT format. + """ + extension = extension.lower() + return next( + (allure_type for allure_type in attachment_type if allure_type.extension == extension), + attachment_type.TXT, + ) diff --git a/reporter/dummy_reporter.py b/reporter/dummy_reporter.py new file mode 100644 index 0000000..e559193 --- /dev/null +++ b/reporter/dummy_reporter.py @@ -0,0 +1,21 @@ +from contextlib import AbstractContextManager, contextmanager +from typing import Any + +from .interfaces import Reporter + + +@contextmanager +def _dummy_step(): + yield + + +class DummyReporter(Reporter): + """ + Dummy implementation of reporter, does not store artifacts anywhere. + """ + + def step(self, name: str) -> AbstractContextManager: + return _dummy_step() + + def attach(self, content: Any, file_name: str) -> None: + pass diff --git a/reporter/interfaces.py b/reporter/interfaces.py new file mode 100644 index 0000000..de7bcb7 --- /dev/null +++ b/reporter/interfaces.py @@ -0,0 +1,29 @@ +from abc import ABC, abstractmethod +from contextlib import AbstractContextManager +from typing import Any + + +class Reporter(ABC): + """ + Interface that supports storage of test artifacts in some reporting tool. + """ + + @abstractmethod + def step(self, name: str) -> AbstractContextManager: + """ + Register a new step in test execution. + + :param str name: Name of the step + :return: step context + """ + pass + + @abstractmethod + def attach(self, content: Any, file_name: str) -> None: + """ + Attach specified content with given file name to the test report. + + :param any name: content to attach. If not a string, it will be converted to a string. + :param str file_name: file name of attachment. + """ + pass diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..babc3a7 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,2 @@ +allure-python-commons==2.9.45 +pexpect==4.8.0 diff --git a/shell/__init__.py b/shell/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/shell/interfaces.py b/shell/interfaces.py new file mode 100644 index 0000000..1e194dd --- /dev/null +++ b/shell/interfaces.py @@ -0,0 +1,59 @@ +from abc import ABC, abstractmethod +from dataclasses import dataclass +from typing import Optional + + +@dataclass +class InteractiveInput: + """ + Interactive input for a shell command. + + :attr str prompt_pattern: regular expression that defines expected prompt from the command. + :attr str input: user input that should be supplied to the command in response to the prompt. + """ + prompt_pattern: str + input: str + + +@dataclass +class CommandOptions: + """ + Options that control command execution. + + :attr list interactive_inputs: user inputs that should be interactively supplied to + the command during its' execution. + :attr int timeout: timeout for command execution (in seconds). + :attr bool check: controls whether to check return code of the command. Set to False to + ignore non-zero return codes. + """ + interactive_inputs: Optional[list[InteractiveInput]] = None + timeout: int = 30 + check: bool = True + + +@dataclass +class CommandResult: + """ + Represents a result of a command executed via shell. + """ + stdout: str + stderr: str + return_code: int + + +class Shell(ABC): + """ + Interface of a command shell on some system (local or remote). + """ + + @abstractmethod + def exec(self, command: str, options: Optional[CommandOptions] = None) -> CommandResult: + """ + Executes specified command on this shell. To execute interactive command, user inputs + should be specified in *options*. + + :param str command: command to execute on the shell. + :param CommandOptions options: options that control command execution. + :return command result. + """ + pass diff --git a/shell/local_shell.py b/shell/local_shell.py new file mode 100644 index 0000000..2345ede --- /dev/null +++ b/shell/local_shell.py @@ -0,0 +1,159 @@ +import logging +import subprocess +import tempfile +from datetime import datetime +from typing import IO, Optional + +import pexpect + +from reporter import get_reporter +from shell.interfaces import CommandOptions, CommandResult, Shell + + +logger = logging.getLogger("neofs.testlib.shell") +reporter = get_reporter() + + +class LocalShell(Shell): + def exec(self, command: str, options: Optional[CommandOptions] = None) -> CommandResult: + # If no options were provided, use default options + options = options or CommandOptions() + + logger.info(f"Executing command: {command}") + if options.interactive_inputs: + return self._exec_interactive(command, options) + return self._exec_non_interactive(command, options) + + def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult: + start_time = datetime.utcnow() + log_file = tempfile.TemporaryFile() # File is reliable cross-platform way to capture output + result = None + command_process = None + + try: + command_process = pexpect.spawn(command, timeout=options.timeout) + command_process.delaybeforesend = 1 + command_process.logfile_read = log_file + + for interactive_input in options.interactive_inputs: + command_process.expect(interactive_input.prompt_pattern) + command_process.sendline(interactive_input.input) + + result = self._get_pexpect_process_result(command_process, command) + if options.check and result.return_code != 0: + raise RuntimeError(f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}") + + return result + except pexpect.ExceptionPexpect as exc: + result = self._get_pexpect_process_result(command_process, command) + message = f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}" + if options.check: + raise RuntimeError(message) from exc + else: + logger.exception(message) + return result + except OSError as exc: + result = self._get_pexpect_process_result(command_process, command) + message = f"Command: {command}\nreturn code: {result.return_code}\nOutput: {exc.strerror}" + if options.check: + raise RuntimeError(message) from exc + else: + logger.exception(message) + return result + except Exception: + result = self._get_pexpect_process_result(command_process, command) + raise + finally: + log_file.close() + end_time = datetime.utcnow() + self._report_command_result(command, start_time, end_time, result) + + def _exec_non_interactive(self, command: str, options: CommandOptions) -> CommandResult: + start_time = datetime.utcnow() + result = None + + try: + command_process = subprocess.run( + command, + check=options.check, + universal_newlines=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + timeout=options.timeout, + shell=True + ) + + result = CommandResult( + stdout=command_process.stdout or "", + stderr=command_process.stderr or "", + return_code=command_process.returncode, + ) + return result + except subprocess.CalledProcessError as exc: + # TODO: always set check flag to false and capture command result normally + result = self._get_failing_command_result(command) + raise RuntimeError(f"Command: {command}\nError:\n" + f"return code: {exc.returncode}\n" + f"output: {exc.output}") from exc + except OSError as exc: + raise RuntimeError(f"Command: {command}\nOutput: {exc.strerror}") from exc + except Exception as exc: + result = self._get_failing_command_result(command) + raise + finally: + end_time = datetime.utcnow() + self._report_command_result(command, start_time, end_time, result) + + def _get_failing_command_result(self, command: str) -> CommandResult: + return_code, cmd_output = subprocess.getstatusoutput(command) + return CommandResult( + stdout=cmd_output, + stderr="", + return_code=return_code + ) + + def _get_pexpect_process_result(self, command_process: Optional[pexpect.spawn], + command: str) -> CommandResult: + """ + If command process is not None, captures output of this process. + If command process is None, then command fails when we attempt to start it, in this case + we use regular non-interactive process to get it's output. + """ + if command_process is None: + return self._get_failing_command_result(command) + + # Wait for child process to end it's work + if command_process.isalive(): + command_process.expect(pexpect.EOF) + + # Close the process to obtain the exit code + command_process.close() + return_code = command_process.exitstatus + + # Capture output from the log file + log_file: IO[bytes] = command_process.logfile_read + log_file.seek(0) + output = log_file.read().decode() + + return CommandResult(stdout=output, stderr="", return_code=return_code) + + def _report_command_result(self, command: str, start_time: datetime, end_time: datetime, + result: Optional[CommandResult]) -> None: + # TODO: increase logging level if return code is non 0, should be warning at least + logger.info( + f"Command: {command}\n" + f"{'Success:' if result and result.return_code == 0 else 'Error:'}\n" + f"return code: {result.return_code if result else ''} " + f"\nOutput: {result.stdout if result else ''}") + + if result: + elapsed_time = end_time - start_time + command_attachment = ( + f"COMMAND: {command}\n" + f"RETCODE: {result.return_code}\n\n" + f"STDOUT:\n{result.stdout}\n" + f"STDERR:\n{result.stderr}\n" + f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}" + ) + with reporter.step(f"COMMAND: {command}"): + reporter.attach(command_attachment, "Command execution.txt") diff --git a/tests/helpers.py b/tests/helpers.py new file mode 100644 index 0000000..6035651 --- /dev/null +++ b/tests/helpers.py @@ -0,0 +1,9 @@ +import traceback + + +def format_error_details(error: Exception) -> str: + return "".join(traceback.format_exception( + etype=type(error), + value=error, + tb=error.__traceback__) + ) diff --git a/tests/test_local_shell_interactive.py b/tests/test_local_shell_interactive.py new file mode 100644 index 0000000..278d3b1 --- /dev/null +++ b/tests/test_local_shell_interactive.py @@ -0,0 +1,78 @@ +from unittest import TestCase + +from shell.interfaces import CommandOptions, InteractiveInput +from shell.local_shell import LocalShell +from tests.helpers import format_error_details + + +class TestLocalShellInteractive(TestCase): + @classmethod + def setUpClass(cls): + cls.shell = LocalShell() + + def test_command_with_one_prompt(self): + script = "password = input('Password: '); print(password)" + + inputs = [InteractiveInput(prompt_pattern="Password", input="test")] + result = self.shell.exec( + f"python -c \"{script}\"", + CommandOptions(interactive_inputs=inputs) + ) + + self.assertEqual(0, result.return_code) + self.assertOutputLines(["Password: test", "test"], result.stdout) + self.assertEqual("", result.stderr) + + def test_command_with_several_prompts(self): + script = ( + "input1 = input('Input1: '); print(input1); " + "input2 = input('Input2: '); print(input2)" + ) + inputs = [ + InteractiveInput(prompt_pattern="Input1", input="test1"), + InteractiveInput(prompt_pattern="Input2", input="test2"), + ] + + result = self.shell.exec( + f"python -c \"{script}\"", + CommandOptions(interactive_inputs=inputs) + ) + + self.assertEqual(0, result.return_code) + self.assertOutputLines(["Input1: test1", "test1", "Input2: test2", "test2"], result.stdout) + self.assertEqual("", result.stderr) + + def test_failed_command_with_check(self): + script = "invalid script" + inputs = [InteractiveInput(prompt_pattern=".*", input="test")] + + with self.assertRaises(RuntimeError) as exc: + self.shell.exec(f"python -c \"{script}\"", CommandOptions(interactive_inputs=inputs)) + + error = format_error_details(exc.exception) + self.assertIn("Error", error) + # TODO: it would be nice to have return code as well + # self.assertIn("return code: 1", error) + + def test_failed_command_without_check(self): + script = "invalid script" + inputs = [InteractiveInput(prompt_pattern=".*", input="test")] + + result = self.shell.exec( + f"python -c \"{script}\"", + CommandOptions(interactive_inputs=inputs, check=False), + ) + self.assertEqual(1, result.return_code) + + def test_non_existing_binary(self): + inputs = [InteractiveInput(prompt_pattern=".*", input="test")] + + with self.assertRaises(RuntimeError) as exc: + self.shell.exec("not-a-command", CommandOptions(interactive_inputs=inputs)) + + error = format_error_details(exc.exception) + self.assertIn("command was not found or was not executable", error) + + def assertOutputLines(self, expected_lines: list[str], output: str) -> None: + output_lines = [line.strip() for line in output.split("\n") if line.strip()] + self.assertEqual(expected_lines, output_lines) diff --git a/tests/test_local_shell_non_interactive.py b/tests/test_local_shell_non_interactive.py new file mode 100644 index 0000000..3fe04f5 --- /dev/null +++ b/tests/test_local_shell_non_interactive.py @@ -0,0 +1,46 @@ +from unittest import TestCase + +from shell.interfaces import CommandOptions +from shell.local_shell import LocalShell +from tests.helpers import format_error_details + + +class TestLocalShellNonInteractive(TestCase): + @classmethod + def setUpClass(cls): + cls.shell = LocalShell() + + def test_successful_command(self): + script = "print('test')" + + result = self.shell.exec(f"python -c \"{script}\"") + + self.assertEqual(0, result.return_code) + self.assertEqual("test", result.stdout.strip()) + self.assertEqual("", result.stderr) + + def test_failed_command_with_check(self): + script = "invalid script" + + with self.assertRaises(RuntimeError) as exc: + self.shell.exec(f"python -c \"{script}\"") + + error = format_error_details(exc.exception) + self.assertIn("Error", error) + self.assertIn("return code: 1", error) + + def test_failed_command_without_check(self): + script = "invalid script" + + result = self.shell.exec(f"python -c \"{script}\"", CommandOptions(check=False)) + + self.assertEqual(1, result.return_code) + self.assertIn("Error", result.stdout) + + def test_non_existing_binary(self): + with self.assertRaises(RuntimeError) as exc: + self.shell.exec(f"not-a-command") + + error = format_error_details(exc.exception) + self.assertIn("Error", error) + self.assertIn("return code: 127", error) From d3e5ee22313087b16dfdf99eef28533e4c9704f9 Mon Sep 17 00:00:00 2001 From: Vladimir Domnich Date: Wed, 24 Aug 2022 15:41:11 +0400 Subject: [PATCH 005/363] Implement basic version of ssh shell Signed-off-by: Vladimir Domnich --- .gitignore | 3 - .pre-commit-config.yaml | 11 + README.md | 10 + pyproject.toml | 8 + reporter/__init__.py | 6 +- reporter/allure_reporter.py | 2 +- reporter/dummy_reporter.py | 2 +- reporter/interfaces.py | 5 +- requirements.txt | 4 + shell/interfaces.py | 6 +- shell/local_shell.py | 50 ++-- shell/ssh_shell.py | 239 ++++++++++++++++++ tests/helpers.py | 25 +- ...ell_interactive.py => test_local_shell.py} | 62 ++++- tests/test_local_shell_non_interactive.py | 46 ---- tests/test_ssh_shell.py | 138 ++++++++++ 16 files changed, 525 insertions(+), 92 deletions(-) create mode 100644 .pre-commit-config.yaml create mode 100644 pyproject.toml create mode 100644 shell/ssh_shell.py rename tests/{test_local_shell_interactive.py => test_local_shell.py} (53%) delete mode 100644 tests/test_local_shell_non_interactive.py create mode 100644 tests/test_ssh_shell.py diff --git a/.gitignore b/.gitignore index e61db30..743b23b 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,3 @@ # ignore caches under any path **/__pycache__ - -# ignore virtual environments -venv*/* diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..ad9846a --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,11 @@ +repos: + - repo: https://github.com/psf/black + rev: 22.8.0 + hooks: + - id: black + language_version: python3.9 + - repo: https://github.com/pycqa/isort + rev: 5.10.1 + hooks: + - id: isort + name: isort (python) diff --git a/README.md b/README.md index 736ec88..e493042 100644 --- a/README.md +++ b/README.md @@ -25,6 +25,16 @@ $ source venv/bin/activate $ pip install -r requirements.txt ``` +3. Setup pre-commit hooks to run code formatters on staged files before you run a `git commit` command: + +``` +pre-commit install +``` + +Optionally you might want to integrate code formatters with your code editor to apply formatters to code files as you go: +* isort is supported by [PyCharm](https://plugins.jetbrains.com/plugin/15434-isortconnect), [VS Code](https://cereblanco.medium.com/setup-black-and-isort-in-vscode-514804590bf9). Plugins exist for other IDEs/editors as well. +* black can be integrated with multiple editors, please, instructions are available [here](https://black.readthedocs.io/en/stable/integrations/editors.html). + ### Unit Tests Before submitting any changes to the library, please, make sure that all unit tests are passing. To run the tests, please, use the following command: ``` diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..bd0087b --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,8 @@ +[tool.isort] +profile = "black" +src_paths = ["reporter", "shell", "tests"] +line_length = 100 + +[tool.black] +line-length = 100 +target-version = ["py39"] diff --git a/reporter/__init__.py b/reporter/__init__.py index d312fcc..31bbdf7 100644 --- a/reporter/__init__.py +++ b/reporter/__init__.py @@ -1,8 +1,8 @@ import os -from .allure_reporter import AllureReporter -from .interfaces import Reporter -from .dummy_reporter import DummyReporter +from reporter.allure_reporter import AllureReporter +from reporter.dummy_reporter import DummyReporter +from reporter.interfaces import Reporter def get_reporter() -> Reporter: diff --git a/reporter/allure_reporter.py b/reporter/allure_reporter.py index 6522859..0277214 100644 --- a/reporter/allure_reporter.py +++ b/reporter/allure_reporter.py @@ -6,7 +6,7 @@ from typing import Any import allure from allure import attachment_type -from .interfaces import Reporter +from reporter.interfaces import Reporter class AllureReporter(Reporter): diff --git a/reporter/dummy_reporter.py b/reporter/dummy_reporter.py index e559193..9061101 100644 --- a/reporter/dummy_reporter.py +++ b/reporter/dummy_reporter.py @@ -1,7 +1,7 @@ from contextlib import AbstractContextManager, contextmanager from typing import Any -from .interfaces import Reporter +from reporter.interfaces import Reporter @contextmanager diff --git a/reporter/interfaces.py b/reporter/interfaces.py index de7bcb7..347f71f 100644 --- a/reporter/interfaces.py +++ b/reporter/interfaces.py @@ -16,14 +16,13 @@ class Reporter(ABC): :param str name: Name of the step :return: step context """ - pass @abstractmethod def attach(self, content: Any, file_name: str) -> None: """ Attach specified content with given file name to the test report. - :param any name: content to attach. If not a string, it will be converted to a string. + :param any content: content to attach. If content value is not a string, it will be + converted to a string. :param str file_name: file name of attachment. """ - pass diff --git a/requirements.txt b/requirements.txt index babc3a7..5e62371 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,6 @@ allure-python-commons==2.9.45 +black==22.8.0 +isort==5.10.1 +paramiko==2.10.3 pexpect==4.8.0 +pre-commit==2.20.0 diff --git a/shell/interfaces.py b/shell/interfaces.py index 1e194dd..97ba7cc 100644 --- a/shell/interfaces.py +++ b/shell/interfaces.py @@ -11,6 +11,7 @@ class InteractiveInput: :attr str prompt_pattern: regular expression that defines expected prompt from the command. :attr str input: user input that should be supplied to the command in response to the prompt. """ + prompt_pattern: str input: str @@ -21,11 +22,12 @@ class CommandOptions: Options that control command execution. :attr list interactive_inputs: user inputs that should be interactively supplied to - the command during its' execution. + the command during execution. :attr int timeout: timeout for command execution (in seconds). :attr bool check: controls whether to check return code of the command. Set to False to ignore non-zero return codes. """ + interactive_inputs: Optional[list[InteractiveInput]] = None timeout: int = 30 check: bool = True @@ -36,6 +38,7 @@ class CommandResult: """ Represents a result of a command executed via shell. """ + stdout: str stderr: str return_code: int @@ -56,4 +59,3 @@ class Shell(ABC): :param CommandOptions options: options that control command execution. :return command result. """ - pass diff --git a/shell/local_shell.py b/shell/local_shell.py index 2345ede..f542cc4 100644 --- a/shell/local_shell.py +++ b/shell/local_shell.py @@ -9,12 +9,15 @@ import pexpect from reporter import get_reporter from shell.interfaces import CommandOptions, CommandResult, Shell - logger = logging.getLogger("neofs.testlib.shell") reporter = get_reporter() class LocalShell(Shell): + """ + Implements command shell on a local machine. + """ + def exec(self, command: str, options: Optional[CommandOptions] = None) -> CommandResult: # If no options were provided, use default options options = options or CommandOptions() @@ -41,12 +44,16 @@ class LocalShell(Shell): result = self._get_pexpect_process_result(command_process, command) if options.check and result.return_code != 0: - raise RuntimeError(f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}") + raise RuntimeError( + f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}" + ) return result except pexpect.ExceptionPexpect as exc: result = self._get_pexpect_process_result(command_process, command) - message = f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}" + message = ( + f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}" + ) if options.check: raise RuntimeError(message) from exc else: @@ -54,7 +61,9 @@ class LocalShell(Shell): return result except OSError as exc: result = self._get_pexpect_process_result(command_process, command) - message = f"Command: {command}\nreturn code: {result.return_code}\nOutput: {exc.strerror}" + message = ( + f"Command: {command}\nreturn code: {result.return_code}\nOutput: {exc.strerror}" + ) if options.check: raise RuntimeError(message) from exc else: @@ -80,7 +89,7 @@ class LocalShell(Shell): stdout=subprocess.PIPE, stderr=subprocess.STDOUT, timeout=options.timeout, - shell=True + shell=True, ) result = CommandResult( @@ -92,9 +101,11 @@ class LocalShell(Shell): except subprocess.CalledProcessError as exc: # TODO: always set check flag to false and capture command result normally result = self._get_failing_command_result(command) - raise RuntimeError(f"Command: {command}\nError:\n" - f"return code: {exc.returncode}\n" - f"output: {exc.output}") from exc + raise RuntimeError( + f"Command: {command}\nError:\n" + f"return code: {exc.returncode}\n" + f"output: {exc.output}" + ) from exc except OSError as exc: raise RuntimeError(f"Command: {command}\nOutput: {exc.strerror}") from exc except Exception as exc: @@ -106,14 +117,11 @@ class LocalShell(Shell): def _get_failing_command_result(self, command: str) -> CommandResult: return_code, cmd_output = subprocess.getstatusoutput(command) - return CommandResult( - stdout=cmd_output, - stderr="", - return_code=return_code - ) + return CommandResult(stdout=cmd_output, stderr="", return_code=return_code) - def _get_pexpect_process_result(self, command_process: Optional[pexpect.spawn], - command: str) -> CommandResult: + def _get_pexpect_process_result( + self, command_process: Optional[pexpect.spawn], command: str + ) -> CommandResult: """ If command process is not None, captures output of this process. If command process is None, then command fails when we attempt to start it, in this case @@ -137,14 +145,20 @@ class LocalShell(Shell): return CommandResult(stdout=output, stderr="", return_code=return_code) - def _report_command_result(self, command: str, start_time: datetime, end_time: datetime, - result: Optional[CommandResult]) -> None: + def _report_command_result( + self, + command: str, + start_time: datetime, + end_time: datetime, + result: Optional[CommandResult], + ) -> None: # TODO: increase logging level if return code is non 0, should be warning at least logger.info( f"Command: {command}\n" f"{'Success:' if result and result.return_code == 0 else 'Error:'}\n" f"return code: {result.return_code if result else ''} " - f"\nOutput: {result.stdout if result else ''}") + f"\nOutput: {result.stdout if result else ''}" + ) if result: elapsed_time = end_time - start_time diff --git a/shell/ssh_shell.py b/shell/ssh_shell.py new file mode 100644 index 0000000..5a272a3 --- /dev/null +++ b/shell/ssh_shell.py @@ -0,0 +1,239 @@ +import logging +import socket +import textwrap +from datetime import datetime +from functools import lru_cache, wraps +from time import sleep +from typing import ClassVar, Optional + +from paramiko import ( + AutoAddPolicy, + ECDSAKey, + Ed25519Key, + PKey, + RSAKey, + SSHClient, + SSHException, + ssh_exception, +) +from paramiko.ssh_exception import AuthenticationException + +from reporter import get_reporter +from shell.interfaces import CommandOptions, CommandResult, Shell + +logger = logging.getLogger("neofs.testlib.shell") +reporter = get_reporter() + + +class HostIsNotAvailable(Exception): + """Raised when host is not reachable via SSH connection""" + + def __init__(self, host: str = None): + msg = f"Host {host} is not available" + super().__init__(msg) + + +def log_command(func): + @wraps(func) + def wrapper(shell: "SSHShell", command: str, *args, **kwargs) -> CommandResult: + command_info = command.removeprefix("$ProgressPreference='SilentlyContinue'\n") + with reporter.step(command_info): + logging.info(f'Execute command "{command}" on "{shell.host}"') + + start_time = datetime.utcnow() + result = func(shell, command, *args, **kwargs) + end_time = datetime.utcnow() + + elapsed_time = end_time - start_time + log_message = ( + f"HOST: {shell.host}\n" + f"COMMAND:\n{textwrap.indent(command, ' ')}\n" + f"RC:\n {result.return_code}\n" + f"STDOUT:\n{textwrap.indent(result.stdout, ' ')}\n" + f"STDERR:\n{textwrap.indent(result.stderr, ' ')}\n" + f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}" + ) + + logger.info(log_message) + reporter.attach(log_message, "SSH command.txt") + return result + + return wrapper + + +@lru_cache +def _load_private_key(file_path: str, password: Optional[str]) -> PKey: + """ + Loads private key from specified file. + + We support several type formats, however paramiko doesn't provide functionality to determine + key type in advance. So we attempt to load file with each of the supported formats and then + cache the result so that we don't need to figure out type again on subsequent calls. + """ + logger.debug(f"Loading ssh key from {file_path}") + for key_type in (Ed25519Key, ECDSAKey, RSAKey): + try: + return key_type.from_private_key_file(file_path, password) + except SSHException as ex: + logger.warn(f"SSH key {file_path} can't be loaded with {key_type}: {ex}") + continue + raise SSHException(f"SSH key {file_path} is not supported") + + +class SSHShell(Shell): + """ + Implements command shell on a remote machine via SSH connection. + """ + + # Time in seconds to delay after remote command has completed. The delay is required + # to allow remote command to flush its output buffer + DELAY_AFTER_EXIT = 0.2 + + SSH_CONNECTION_ATTEMPTS: ClassVar[int] = 3 + CONNECTION_TIMEOUT = 90 + + def __init__( + self, + host: str, + login: str, + password: Optional[str] = None, + private_key_path: Optional[str] = None, + private_key_passphrase: Optional[str] = None, + port: str = "22", + ) -> None: + self.host = host + self.port = port + self.login = login + self.password = password + self.private_key_path = private_key_path + self.private_key_passphrase = private_key_passphrase + self.__connection: Optional[SSHClient] = None + + @property + def _connection(self): + if not self.__connection: + self.__connection = self._create_connection() + return self.__connection + + def drop(self): + self._reset_connection() + + def exec(self, command: str, options: Optional[CommandOptions] = None) -> CommandResult: + options = options or CommandOptions() + + if options.interactive_inputs: + result = self._exec_interactive(command, options) + else: + result = self._exec_non_interactive(command, options) + + if options.check and result.return_code != 0: + raise RuntimeError( + f"Command: {command}\nreturn code: {result.return_code}" + f"\nOutput: {result.stdout}" + ) + return result + + @log_command + def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult: + stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout) + for interactive_input in options.interactive_inputs: + input = interactive_input.input + if not input.endswith("\n"): + input = f"{input}\n" + try: + stdin.write(input) + except OSError: + logger.exception(f"Error while feeding {input} into command {command}") + # stdin.close() + + # Wait for command to complete and flush its buffer before we attempt to read output + sleep(self.DELAY_AFTER_EXIT) + return_code = stdout.channel.recv_exit_status() + sleep(self.DELAY_AFTER_EXIT) + + result = CommandResult( + stdout=stdout.read().decode(errors="ignore"), + stderr=stderr.read().decode(errors="ignore"), + return_code=return_code, + ) + return result + + @log_command + def _exec_non_interactive(self, command: str, options: CommandOptions) -> CommandResult: + try: + _, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout) + + # Wait for command to complete and flush its buffer before we attempt to read output + return_code = stdout.channel.recv_exit_status() + sleep(self.DELAY_AFTER_EXIT) + + return CommandResult( + stdout=stdout.read().decode(errors="ignore"), + stderr=stderr.read().decode(errors="ignore"), + return_code=return_code, + ) + except ( + SSHException, + TimeoutError, + ssh_exception.NoValidConnectionsError, + ConnectionResetError, + AttributeError, + socket.timeout, + ) as exc: + logger.exception(f"Can't execute command {command} on host: {self.host}") + self._reset_connection() + raise HostIsNotAvailable(self.host) from exc + + def _create_connection(self, attempts: int = SSH_CONNECTION_ATTEMPTS) -> SSHClient: + for attempt in range(attempts): + connection = SSHClient() + connection.set_missing_host_key_policy(AutoAddPolicy()) + try: + if self.private_key_path: + logging.info( + f"Trying to connect to host {self.host} as {self.login} using SSH key " + f"{self.private_key_path} (attempt {attempt})" + ) + connection.connect( + hostname=self.host, + port=self.port, + username=self.login, + pkey=_load_private_key(self.private_key_path, self.private_key_passphrase), + timeout=self.CONNECTION_TIMEOUT, + ) + else: + logging.info( + f"Trying to connect to host {self.host} as {self.login} using password " + f"(attempt {attempt})" + ) + connection.connect( + hostname=self.host, + port=self.port, + username=self.login, + password=self.password, + timeout=self.CONNECTION_TIMEOUT, + ) + return connection + except AuthenticationException: + connection.close() + logger.exception(f"Can't connect to host {self.host}") + raise + except ( + SSHException, + ssh_exception.NoValidConnectionsError, + AttributeError, + socket.timeout, + OSError, + ) as exc: + connection.close() + can_retry = attempt + 1 < attempts + if can_retry: + logger.warn(f"Can't connect to host {self.host}, will retry. Error: {exc}") + continue + logger.exception(f"Can't connect to host {self.host}") + raise HostIsNotAvailable(self.host) from exc + + def _reset_connection(self) -> None: + if self.__connection: + self.__connection.close() + self.__connection = None diff --git a/tests/helpers.py b/tests/helpers.py index 6035651..b80be61 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -1,9 +1,30 @@ import traceback +from shell.interfaces import CommandResult + def format_error_details(error: Exception) -> str: - return "".join(traceback.format_exception( + """ + Converts specified exception instance into a string that includes error message + and full stack trace. + + :param Exception error: exception to convert. + :return: string containing exception details. + """ + detail_lines = traceback.format_exception( etype=type(error), value=error, - tb=error.__traceback__) + tb=error.__traceback__, ) + return "".join(detail_lines) + + +def get_output_lines(result: CommandResult) -> list[str]: + """ + Converts output of specified command result into separate lines trimmed from whitespaces. + Empty lines are excluded. + + :param CommandResult result: result which output should be converted. + :return: list of lines extracted from the output. + """ + return [line.strip() for line in result.stdout.split("\n") if line.strip()] diff --git a/tests/test_local_shell_interactive.py b/tests/test_local_shell.py similarity index 53% rename from tests/test_local_shell_interactive.py rename to tests/test_local_shell.py index 278d3b1..52e3861 100644 --- a/tests/test_local_shell_interactive.py +++ b/tests/test_local_shell.py @@ -2,7 +2,7 @@ from unittest import TestCase from shell.interfaces import CommandOptions, InteractiveInput from shell.local_shell import LocalShell -from tests.helpers import format_error_details +from tests.helpers import format_error_details, get_output_lines class TestLocalShellInteractive(TestCase): @@ -15,12 +15,11 @@ class TestLocalShellInteractive(TestCase): inputs = [InteractiveInput(prompt_pattern="Password", input="test")] result = self.shell.exec( - f"python -c \"{script}\"", - CommandOptions(interactive_inputs=inputs) + f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs) ) self.assertEqual(0, result.return_code) - self.assertOutputLines(["Password: test", "test"], result.stdout) + self.assertEqual(["Password: test", "test"], get_output_lines(result)) self.assertEqual("", result.stderr) def test_command_with_several_prompts(self): @@ -34,12 +33,13 @@ class TestLocalShellInteractive(TestCase): ] result = self.shell.exec( - f"python -c \"{script}\"", - CommandOptions(interactive_inputs=inputs) + f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs) ) self.assertEqual(0, result.return_code) - self.assertOutputLines(["Input1: test1", "test1", "Input2: test2", "test2"], result.stdout) + self.assertEqual( + ["Input1: test1", "test1", "Input2: test2", "test2"], get_output_lines(result) + ) self.assertEqual("", result.stderr) def test_failed_command_with_check(self): @@ -47,7 +47,7 @@ class TestLocalShellInteractive(TestCase): inputs = [InteractiveInput(prompt_pattern=".*", input="test")] with self.assertRaises(RuntimeError) as exc: - self.shell.exec(f"python -c \"{script}\"", CommandOptions(interactive_inputs=inputs)) + self.shell.exec(f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs)) error = format_error_details(exc.exception) self.assertIn("Error", error) @@ -59,7 +59,7 @@ class TestLocalShellInteractive(TestCase): inputs = [InteractiveInput(prompt_pattern=".*", input="test")] result = self.shell.exec( - f"python -c \"{script}\"", + f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs, check=False), ) self.assertEqual(1, result.return_code) @@ -71,8 +71,44 @@ class TestLocalShellInteractive(TestCase): self.shell.exec("not-a-command", CommandOptions(interactive_inputs=inputs)) error = format_error_details(exc.exception) - self.assertIn("command was not found or was not executable", error) + self.assertIn("return code: 127", error) - def assertOutputLines(self, expected_lines: list[str], output: str) -> None: - output_lines = [line.strip() for line in output.split("\n") if line.strip()] - self.assertEqual(expected_lines, output_lines) + +class TestLocalShellNonInteractive(TestCase): + @classmethod + def setUpClass(cls): + cls.shell = LocalShell() + + def test_successful_command(self): + script = "print('test')" + + result = self.shell.exec(f'python3 -c "{script}"') + + self.assertEqual(0, result.return_code) + self.assertEqual("test", result.stdout.strip()) + self.assertEqual("", result.stderr) + + def test_invalid_command_with_check(self): + script = "invalid script" + + with self.assertRaises(RuntimeError) as exc: + self.shell.exec(f'python3 -c "{script}"') + + error = format_error_details(exc.exception) + self.assertIn("Error", error) + self.assertIn("return code: 1", error) + + def test_invalid_command_without_check(self): + script = "invalid script" + + result = self.shell.exec(f'python3 -c "{script}"', CommandOptions(check=False)) + + self.assertEqual(1, result.return_code) + self.assertIn("Error", result.stdout) + + def test_non_existing_binary(self): + with self.assertRaises(RuntimeError) as exc: + self.shell.exec("not-a-command") + + error = format_error_details(exc.exception) + self.assertIn("return code: 127", error) diff --git a/tests/test_local_shell_non_interactive.py b/tests/test_local_shell_non_interactive.py deleted file mode 100644 index 3fe04f5..0000000 --- a/tests/test_local_shell_non_interactive.py +++ /dev/null @@ -1,46 +0,0 @@ -from unittest import TestCase - -from shell.interfaces import CommandOptions -from shell.local_shell import LocalShell -from tests.helpers import format_error_details - - -class TestLocalShellNonInteractive(TestCase): - @classmethod - def setUpClass(cls): - cls.shell = LocalShell() - - def test_successful_command(self): - script = "print('test')" - - result = self.shell.exec(f"python -c \"{script}\"") - - self.assertEqual(0, result.return_code) - self.assertEqual("test", result.stdout.strip()) - self.assertEqual("", result.stderr) - - def test_failed_command_with_check(self): - script = "invalid script" - - with self.assertRaises(RuntimeError) as exc: - self.shell.exec(f"python -c \"{script}\"") - - error = format_error_details(exc.exception) - self.assertIn("Error", error) - self.assertIn("return code: 1", error) - - def test_failed_command_without_check(self): - script = "invalid script" - - result = self.shell.exec(f"python -c \"{script}\"", CommandOptions(check=False)) - - self.assertEqual(1, result.return_code) - self.assertIn("Error", result.stdout) - - def test_non_existing_binary(self): - with self.assertRaises(RuntimeError) as exc: - self.shell.exec(f"not-a-command") - - error = format_error_details(exc.exception) - self.assertIn("Error", error) - self.assertIn("return code: 127", error) diff --git a/tests/test_ssh_shell.py b/tests/test_ssh_shell.py new file mode 100644 index 0000000..213b7cf --- /dev/null +++ b/tests/test_ssh_shell.py @@ -0,0 +1,138 @@ +import os +from unittest import SkipTest, TestCase + +from shell.interfaces import CommandOptions, InteractiveInput +from shell.ssh_shell import SSHShell +from tests.helpers import format_error_details, get_output_lines + + +def init_shell() -> SSHShell: + host = os.getenv("SSH_SHELL_HOST") + port = os.getenv("SSH_SHELL_PORT", "22") + login = os.getenv("SSH_SHELL_LOGIN") + private_key_path = os.getenv("SSH_SHELL_PRIVATE_KEY_PATH") + private_key_passphrase = os.getenv("SSH_SHELL_PRIVATE_KEY_PASSPHRASE") + + if not all([host, login, private_key_path, private_key_passphrase]): + # TODO: in the future we might use https://pypi.org/project/mock-ssh-server, + # at the moment it is not suitable for us because of its issues with stdin + raise SkipTest("SSH connection is not configured") + + return SSHShell( + host=host, + port=port, + login=login, + private_key_path=private_key_path, + private_key_passphrase=private_key_passphrase, + ) + + +class TestSSHShellInteractive(TestCase): + @classmethod + def setUpClass(cls): + cls.shell = init_shell() + + def test_command_with_one_prompt(self): + script = "password = input('Password: '); print('\\n' + password)" + + inputs = [InteractiveInput(prompt_pattern="Password", input="test")] + result = self.shell.exec( + f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs) + ) + + # TODO: we have inconsistency with local shell here, ssh does not echo input into stdout + self.assertEqual(0, result.return_code) + self.assertEqual(["Password:", "test"], get_output_lines(result)) + self.assertEqual("", result.stderr) + + def test_command_with_several_prompts(self): + script = ( + "input1 = input('Input1: '); print('\\n' + input1); " + "input2 = input('Input2: '); print('\\n' + input2)" + ) + inputs = [ + InteractiveInput(prompt_pattern="Input1", input="test1"), + InteractiveInput(prompt_pattern="Input2", input="test2"), + ] + + result = self.shell.exec( + f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs) + ) + + # TODO: we have inconsistency with local shell here, ssh does not echo input into stdout + self.assertEqual(0, result.return_code) + self.assertEqual(["Input1:", "test1", "Input2:", "test2"], get_output_lines(result)) + self.assertEqual("", result.stderr) + + def test_invalid_command_with_check(self): + script = "invalid script" + inputs = [InteractiveInput(prompt_pattern=".*", input="test")] + + with self.assertRaises(RuntimeError) as raised: + self.shell.exec(f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs)) + + error = format_error_details(raised.exception) + self.assertIn("Error", error) + self.assertIn("return code: 1", error) + + def test_invalid_command_without_check(self): + script = "invalid script" + inputs = [InteractiveInput(prompt_pattern=".*", input="test")] + + result = self.shell.exec( + f'python3 -c "{script}"', + CommandOptions(interactive_inputs=inputs, check=False), + ) + self.assertEqual(1, result.return_code) + + def test_non_existing_binary(self): + inputs = [InteractiveInput(prompt_pattern=".*", input="test")] + + with self.assertRaises(RuntimeError) as raised: + self.shell.exec("not-a-command", CommandOptions(interactive_inputs=inputs)) + + error = format_error_details(raised.exception) + self.assertIn("return code: 127", error) + + +class TestSSHShellNonInteractive(TestCase): + @classmethod + def setUpClass(cls): + cls.shell = init_shell() + + def test_correct_command(self): + script = "print('test')" + + result = self.shell.exec(f'python3 -c "{script}"') + + self.assertEqual(0, result.return_code) + self.assertEqual("test", result.stdout.strip()) + self.assertEqual("", result.stderr) + + def test_invalid_command_with_check(self): + script = "invalid script" + + with self.assertRaises(RuntimeError) as raised: + self.shell.exec(f'python3 -c "{script}"') + + error = format_error_details(raised.exception) + self.assertIn("Error", error) + self.assertIn("return code: 1", error) + + def test_invalid_command_without_check(self): + script = "invalid script" + + result = self.shell.exec(f'python3 -c "{script}"', CommandOptions(check=False)) + + self.assertEqual(1, result.return_code) + # TODO: we have inconsistency with local shell here, the local shell captures error info + # in stdout while ssh shell captures it in stderr + self.assertIn("Error", result.stderr) + + def test_non_existing_binary(self): + with self.assertRaises(RuntimeError) as exc: + self.shell.exec("not-a-command") + + error = format_error_details(exc.exception) + self.assertIn("Error", error) + self.assertIn("return code: 127", error) From c48f7b7ff2dd440551576d4bc24b09a844fee11e Mon Sep 17 00:00:00 2001 From: Vladimir Avdeev Date: Mon, 19 Sep 2022 20:00:46 +0300 Subject: [PATCH 006/363] Implemented neofs-go, neo-go, neofs-authmate lib --- cli/__init__.py | 3 + cli/adm/__init__.py | 1 + cli/adm/adm.py | 24 ++ cli/adm/config.py | 24 ++ cli/adm/morph.py | 372 ++++++++++++++++++++++++++++ cli/adm/storage_config.py | 25 ++ cli/adm/subnet.py | 255 +++++++++++++++++++ cli/adm/version.py | 13 + cli/authmate/__init__.py | 1 + cli/authmate/authmate.py | 20 ++ cli/authmate/secret.py | 92 +++++++ cli/authmate/version.py | 13 + cli/cli_command.py | 58 +++++ cli/go/__init__.py | 2 + cli/go/blockchain_network_type.py | 7 + cli/go/candidate.py | 120 +++++++++ cli/go/contract.py | 358 +++++++++++++++++++++++++++ cli/go/db.py | 74 ++++++ cli/go/go.py | 44 ++++ cli/go/nep17.py | 243 ++++++++++++++++++ cli/go/node.py | 18 ++ cli/go/query.py | 128 ++++++++++ cli/go/version.py | 13 + cli/go/wallet.py | 395 ++++++++++++++++++++++++++++++ shell/__init__.py | 3 + 25 files changed, 2306 insertions(+) create mode 100644 cli/__init__.py create mode 100644 cli/adm/__init__.py create mode 100644 cli/adm/adm.py create mode 100644 cli/adm/config.py create mode 100644 cli/adm/morph.py create mode 100644 cli/adm/storage_config.py create mode 100644 cli/adm/subnet.py create mode 100644 cli/adm/version.py create mode 100644 cli/authmate/__init__.py create mode 100644 cli/authmate/authmate.py create mode 100644 cli/authmate/secret.py create mode 100644 cli/authmate/version.py create mode 100644 cli/cli_command.py create mode 100644 cli/go/__init__.py create mode 100644 cli/go/blockchain_network_type.py create mode 100644 cli/go/candidate.py create mode 100644 cli/go/contract.py create mode 100644 cli/go/db.py create mode 100644 cli/go/go.py create mode 100644 cli/go/nep17.py create mode 100644 cli/go/node.py create mode 100644 cli/go/query.py create mode 100644 cli/go/version.py create mode 100644 cli/go/wallet.py diff --git a/cli/__init__.py b/cli/__init__.py new file mode 100644 index 0000000..112c15b --- /dev/null +++ b/cli/__init__.py @@ -0,0 +1,3 @@ +from .adm import NeofsAdm +from .authmate import NeofsAuthmate +from .go import NeoGo, NetworkType diff --git a/cli/adm/__init__.py b/cli/adm/__init__.py new file mode 100644 index 0000000..1596cb3 --- /dev/null +++ b/cli/adm/__init__.py @@ -0,0 +1 @@ +from .adm import NeofsAdm diff --git a/cli/adm/adm.py b/cli/adm/adm.py new file mode 100644 index 0000000..0313b78 --- /dev/null +++ b/cli/adm/adm.py @@ -0,0 +1,24 @@ +from typing import Optional + +from shell import Shell + +from .config import NeofsAdmConfig +from .morph import NeofsAdmMorph +from .subnet import NeofsAdmMorphSubnet +from .storage_config import NeofsAdmStorageConfig +from .version import NeofsAdmVersion + + +class NeofsAdm: + config: Optional[NeofsAdmConfig] = None + morph: Optional[NeofsAdmMorph] = None + subnet: Optional[NeofsAdmMorphSubnet] = None + storage_config: Optional[NeofsAdmStorageConfig] = None + version: Optional[NeofsAdmVersion] = None + + def __init__(self, shell: Shell, neofs_adm_exec_path: str, config_file: Optional[str] = None): + self.config = NeofsAdmConfig(shell, neofs_adm_exec_path, config=config_file) + self.morph = NeofsAdmMorph(shell, neofs_adm_exec_path, config=config_file) + self.subnet = NeofsAdmMorphSubnet(shell, neofs_adm_exec_path, config=config_file) + self.storage_config = NeofsAdmStorageConfig(shell, neofs_adm_exec_path, config=config_file) + self.version = NeofsAdmVersion(shell, neofs_adm_exec_path, config=config_file) diff --git a/cli/adm/config.py b/cli/adm/config.py new file mode 100644 index 0000000..7c21bd5 --- /dev/null +++ b/cli/adm/config.py @@ -0,0 +1,24 @@ +from cli.cli_command import NeofsCliCommand +from shell import CommandResult + + +class NeofsAdmConfig(NeofsCliCommand): + def init(self, path: str = "~/.neofs/adm/config.yml") -> CommandResult: + """Initialize basic neofs-adm configuration file. + + Args: + path (str): path to config (default ~/.neofs/adm/config.yml) + + + Returns: + str: Command string + + """ + return self._execute( + "config init", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) diff --git a/cli/adm/morph.py b/cli/adm/morph.py new file mode 100644 index 0000000..4fa2c9b --- /dev/null +++ b/cli/adm/morph.py @@ -0,0 +1,372 @@ +from typing import Optional + +from cli.cli_command import NeofsCliCommand +from shell import CommandResult + + +class NeofsAdmMorph(NeofsCliCommand): + def deposit_notary( + self, + rpc_endpoint: str, + account: str, + gas: str, + storage_wallet: Optional[str] = None, + till: Optional[str] = None, + ) -> CommandResult: + """Deposit GAS for notary service. + + Args: + account (str): wallet account address + gas (str): amount of GAS to deposit + rpc_endpoint (str): N3 RPC node endpoint + storage_wallet (str): path to storage node wallet + till (str): notary deposit duration in blocks + + + Returns: + str: Command string + + """ + return self._execute( + "morph deposit-notary", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, + ) + + def dump_balances( + self, + rpc_endpoint: str, + alphabet: Optional[str] = None, + proxy: Optional[str] = None, + script_hash: Optional[str] = None, + storage: Optional[str] = None, + ) -> CommandResult: + """Dump GAS balances + + Args: + alphabet (str): dump balances of alphabet contracts + proxy (str): dump balances of the proxy contract + rpc_endpoint (str): N3 RPC node endpoint + script_hash (str): use script-hash format for addresses + storage (str): dump balances of storage nodes from the current netmap + + + Returns: + str: Command string + + """ + return self._execute( + "morph dump-balances", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, + ) + + def dump_config(self, rpc_endpoint: str) -> CommandResult: + """Section for morph network configuration commands. + + Args: + rpc_endpoint (str): N3 RPC node endpoint + + + Returns: + str: Command string + + """ + return self._execute( + "morph dump-config", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, + ) + + def dump_containers( + self, + rpc_endpoint: str, + cid: Optional[str] = None, + container_contract: Optional[str] = None, + dump: str = './testlib_dump_container', + ) -> CommandResult: + """Dump NeoFS containers to file. + + Args: + cid (str): containers to dump + container_contract (str): container contract hash (for networks without NNS) + dump (str): file where to save dumped containers + (default: ./testlib_dump_container) + rpc_endpoint (str): N3 RPC node endpoint + + + Returns: + str: Command string + + """ + return self._execute( + "morph dump-containers", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, + ) + + def dump_hashes(self, rpc_endpoint: str) -> CommandResult: + """Dump deployed contract hashes. + + Args: + rpc_endpoint (str): N3 RPC node endpoint + + + Returns: + str: Command string + + """ + return self._execute( + "morph dump-hashes", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, + ) + + def force_new_epoch( + self, rpc_endpoint: Optional[str] = None, alphabet: Optional[str] = None + ) -> CommandResult: + """Create new NeoFS epoch event in the side chain + + Args: + alphabet (str): path to alphabet wallets dir + rpc_endpoint (str): N3 RPC node endpoint + + + Returns: + str: Command string + + """ + return self._execute( + "morph force-new-epoch", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, + ) + + def generate_alphabet(self, rpc_endpoint: str, alphabet_wallets: str, size: int = 7) -> CommandResult: + """Generate alphabet wallets for consensus nodes of the morph network + + Args: + alphabet_wallets (str): path to alphabet wallets dir + size (int): amount of alphabet wallets to generate (default 7) + rpc_endpoint (str): N3 RPC node endpoint + + + Returns: + str: Command string + + """ + return self._execute( + "morph generate-alphabet", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, + ) + + def generate_storage_wallet( + self, + rpc_endpoint: str, + alphabet_wallets: str, + storage_wallet: str, + initial_gas: Optional[str] = None, + ) -> CommandResult: + """Generate storage node wallet for the morph network + + Args: + alphabet_wallets (str): path to alphabet wallets dir + initial_gas (str): initial amount of GAS to transfer + rpc_endpoint (str): N3 RPC node endpoint + storage_wallet (str): path to new storage node wallet + + + Returns: + str: Command string + + """ + return self._execute( + "morph generate-storage-wallet", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, + ) + + def init( + self, + rpc_endpoint: str, + alphabet_wallets: str, + contracts: str, + protocol: str, + container_alias_fee: int = 500, + container_fee: int = 1000, + epoch_duration: int = 240, + homomorphic_disabled: bool = False, + local_dump: Optional[str] = None, + max_object_size: int = 67108864, + ) -> CommandResult: + """Section for morph network configuration commands. + + Args: + alphabet_wallets (str): path to alphabet wallets dir + container_alias_fee (int): container alias fee (default 500) + container_fee (int): container registration fee (default 1000) + contracts (str): path to archive with compiled NeoFS contracts + (default fetched from latest github release) + epoch_duration (int): amount of side chain blocks in one NeoFS epoch + (default 240) + homomorphic_disabled: (bool): disable object homomorphic hashing + local_dump (str): path to the blocks dump file + max_object_size (int): max single object size in bytes (default 67108864) + protocol (str): path to the consensus node configuration + rpc_endpoint (str): N3 RPC node endpoint + + + Returns: + str: Command string + + """ + return self._execute( + "morph init", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, + ) + + def refill_gas( + self, + rpc_endpoint: str, + alphabet_wallets: str, + storage_wallet: str, + gas: Optional[str] = None, + ) -> CommandResult: + """Refill GAS of storage node's wallet in the morph network + + Args: + alphabet_wallets (str): path to alphabet wallets dir + gas (str): additional amount of GAS to transfer + rpc_endpoint (str): N3 RPC node endpoint + storage_wallet (str): path to new storage node wallet + + + Returns: + str: Command string + + """ + return self._execute( + "morph refill-gas", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, + ) + + def restore_containers( + self, rpc_endpoint: str, alphabet_wallets: str, cid: str, dump: str + ) -> CommandResult: + """Restore NeoFS containers from file. + + Args: + alphabet_wallets (str): path to alphabet wallets dir + cid (str): containers to restore + dump (str): file to restore containers from + rpc_endpoint (str): N3 RPC node endpoint + + + Returns: + str: Command string + + """ + return self._execute( + "morph restore-containers", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, + ) + + def set_policy( + self, + rpc_endpoint: str, + alphabet_wallets: str, + exec_fee_factor: Optional[int] = None, + storage_price: Optional[int] = None, + fee_per_byte: Optional[int] = None, + ) -> CommandResult: + """Set global policy values + + Args: + alphabet_wallets (str): path to alphabet wallets dir + exec_fee_factor (int): ExecFeeFactor= + storage_price (int): StoragePrice= + fee_per_byte (int): FeePerByte= + rpc_endpoint (str): N3 RPC node endpoint + + + Returns: + str: Command string + + """ + non_param_attribute = "" + if exec_fee_factor: + non_param_attribute += f"ExecFeeFactor={exec_fee_factor} " + if storage_price: + non_param_attribute += f"StoragePrice={storage_price} " + if fee_per_byte: + non_param_attribute += f"FeePerByte={fee_per_byte} " + return self._execute( + f"morph restore-containers {non_param_attribute}", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self", "exec_fee_factor", "storage_price", "fee_per_byte"] + }, + ) + + def update_contracts( + self, rpc_endpoint: str, alphabet_wallets: str, contracts: Optional[str] = None + ) -> CommandResult: + """Update NeoFS contracts. + + Args: + alphabet_wallets (str): path to alphabet wallets dir + contracts (str): path to archive with compiled NeoFS contracts + (default fetched from latest github release) + rpc_endpoint (str): N3 RPC node endpoint + + + Returns: + str: Command string + + """ + return self._execute( + "morph update-contracts", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, + ) diff --git a/cli/adm/storage_config.py b/cli/adm/storage_config.py new file mode 100644 index 0000000..031bd0f --- /dev/null +++ b/cli/adm/storage_config.py @@ -0,0 +1,25 @@ +from cli.cli_command import NeofsCliCommand +from shell import CommandResult + + +class NeofsAdmStorageConfig(NeofsCliCommand): + def set(self, account: str, wallet: str) -> CommandResult: + """Initialize basic neofs-adm configuration file. + + Args: + account (str): wallet account + wallet (str): path to wallet + + + Returns: + str: Command string + + """ + return self._execute( + "storage-config", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) diff --git a/cli/adm/subnet.py b/cli/adm/subnet.py new file mode 100644 index 0000000..e19c468 --- /dev/null +++ b/cli/adm/subnet.py @@ -0,0 +1,255 @@ +from typing import Optional + +from cli.cli_command import NeofsCliCommand +from shell import CommandResult + + +class NeofsAdmMorphSubnet(NeofsCliCommand): + def create(self, rpc_endpoint: str, address: str, wallet: str, notary: bool = False) -> CommandResult: + """Create NeoFS subnet. + + Args: + address (str): Address in the wallet, optional + notary (bool): Flag to create subnet in notary environment + rpc_endpoint (str): N3 RPC node endpoint + wallet (str): Path to file with wallet + + + Returns: + str: Command string + + """ + return self._execute( + "morph subnet create", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) + + def get(self, rpc_endpoint: str, subnet: str) -> CommandResult: + """Read information about the NeoFS subnet. + + Args: + rpc_endpoint (str): N3 RPC node endpoint + subnet (str): ID of the subnet to read + + + Returns: + str: Command string + + """ + return self._execute( + "morph subnet get", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) + + def remove( + self, rpc_endpoint: str, wallet: str, subnet: str, address: Optional[str] = None + ) -> CommandResult: + """Remove NeoFS subnet. + + Args: + address (str): Address in the wallet, optional + rpc_endpoint (str): N3 RPC node endpoint + subnet (str): ID of the subnet to read + wallet (str): Path to file with wallet + + + Returns: + str: Command string + + """ + return self._execute( + "morph subnet remove", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) + + def admin_add( + self, + rpc_endpoint: str, + wallet: str, + admin: str, + subnet: str, + client: Optional[str] = None, + group: Optional[str] = None, + address: Optional[str] = None, + ) -> CommandResult: + """Add admin to the NeoFS subnet. + + Args: + address (str): Address in the wallet, optional + admin (str): Hex-encoded public key of the admin + client (str): Add client admin instead of node one + group (str): Client group ID in text format (needed with --client only) + rpc_endpoint (str): N3 RPC node endpoint + subnet (str): ID of the subnet to read + wallet (str): Path to file with wallet + + + Returns: + str: Command string + + """ + return self._execute( + "morph subnet admin add", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) + + def admin_remove( + self, + rpc_endpoint: str, + wallet: str, + admin: str, + subnet: str, + client: Optional[str] = None, + address: Optional[str] = None, + ) -> CommandResult: + """Remove admin of the NeoFS subnet. + + Args: + address (str): Address in the wallet, optional + admin (str): Hex-encoded public key of the admin + client (str): Remove client admin instead of node one + rpc_endpoint (str): N3 RPC node endpoint + subnet (str): ID of the subnet to read + wallet (str): Path to file with wallet + + + Returns: + str: Command string + + """ + return self._execute( + "morph subnet admin remove", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) + + def client_add( + self, + rpc_endpoint: str, + wallet: str, + subnet: str, + client: Optional[str] = None, + group: Optional[str] = None, + address: Optional[str] = None, + ) -> CommandResult: + """Add client to the NeoFS subnet. + + Args: + address (str): Address in the wallet, optional + client (str): Add client admin instead of node one + group (str): Client group ID in text format (needed with --client only) + rpc_endpoint (str): N3 RPC node endpoint + subnet (str): ID of the subnet to read + wallet (str): Path to file with wallet + + + Returns: + str: Command string + + """ + return self._execute( + "morph subnet client add", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) + + def client_remove( + self, + rpc_endpoint: str, + wallet: str, + client: str, + group: str, + subnet: str, + address: Optional[str] = None, + ) -> CommandResult: + """Remove client of the NeoFS subnet. + + Args: + address (str): Address in the wallet, optional + client (str): Remove client admin instead of node one + group (str): ID of the client group to work with + rpc_endpoint (str): N3 RPC node endpoint + subnet (str): ID of the subnet to read + wallet (str): Path to file with wallet + + + Returns: + str: Command string + + """ + return self._execute( + "morph subnet client remove", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) + + def node_add(self, rpc_endpoint: str, wallet: str, node: str, subnet: str) -> CommandResult: + """Add node to the NeoFS subnet. + + Args: + node (str): Hex-encoded public key of the node + rpc_endpoint (str): N3 RPC node endpoint + subnet (str): ID of the subnet to read + wallet (str): Path to file with wallet + + + Returns: + str: Command string + + """ + return self._execute( + "morph subnet node add", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) + + def node_remove(self, rpc_endpoint: str, wallet: str, node: str, subnet: str) -> CommandResult: + """Remove node from the NeoFS subnet. + + Args: + node (str): Hex-encoded public key of the node + rpc_endpoint (str): N3 RPC node endpoint + subnet (str): ID of the subnet to read + wallet (str): Path to file with wallet + + + Returns: + str: Command string + + """ + return self._execute( + "morph subnet node remove", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) diff --git a/cli/adm/version.py b/cli/adm/version.py new file mode 100644 index 0000000..6a2aedd --- /dev/null +++ b/cli/adm/version.py @@ -0,0 +1,13 @@ +from cli.cli_command import NeofsCliCommand +from shell import CommandResult + + +class NeofsAdmVersion(NeofsCliCommand): + def get(self) -> CommandResult: + """Application version + + Returns: + str: Command string + + """ + return self._execute("", version=True) diff --git a/cli/authmate/__init__.py b/cli/authmate/__init__.py new file mode 100644 index 0000000..112b6a9 --- /dev/null +++ b/cli/authmate/__init__.py @@ -0,0 +1 @@ +from .authmate import NeofsAuthmate diff --git a/cli/authmate/authmate.py b/cli/authmate/authmate.py new file mode 100644 index 0000000..58ee873 --- /dev/null +++ b/cli/authmate/authmate.py @@ -0,0 +1,20 @@ +from typing import Optional + +from shell import Shell + +from .secret import NeofsAuthmateSecret +from .version import NeofsAuthmateVersion + + +class NeofsAuthmate: + secret: Optional[NeofsAuthmateSecret] = None + version: Optional[NeofsAuthmateVersion] = None + + def __init__( + self, + shell: Shell, + neofs_authmate_exec_path: str, + ): + + self.secret = NeofsAuthmateSecret(shell, neofs_authmate_exec_path) + self.version = NeofsAuthmateVersion(shell, neofs_authmate_exec_path) diff --git a/cli/authmate/secret.py b/cli/authmate/secret.py new file mode 100644 index 0000000..66d7e81 --- /dev/null +++ b/cli/authmate/secret.py @@ -0,0 +1,92 @@ +from typing import Optional + +from cli.cli_command import NeofsCliCommand +from shell import CommandResult + + +class NeofsAuthmateSecret(NeofsCliCommand): + def obtain( + self, + wallet: str, + peer: str, + gate_wallet: str, + access_key_id: str, + address: Optional[str] = None, + gate_address: Optional[str] = None, + ) -> CommandResult: + """Obtain a secret from NeoFS network + + Args: + wallet (str): path to the wallet + address (str): address of wallet account + peer (str): address of neofs peer to connect to + gate_wallet (str): path to the wallet + gate_address (str): address of wallet account + access_key_id (str): access key id for s3 + + Returns: + str: Command string + + """ + return self._execute( + "obtain-secret", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) + + def issue( + self, + wallet: str, + peer: str, + bearer_rules: str, + gate_public_key: str, + address: Optional[str] = None, + container_id: Optional[str] = None, + container_friendly_name: Optional[str] = None, + container_placement_policy: Optional[str] = None, + session_tokens: Optional[str] = None, + lifetime: Optional[str] = None, + container_policy: Optional[str] = None, + aws_cli_credentials: Optional[str] = None, + ) -> CommandResult: + """Obtain a secret from NeoFS network + + Args: + wallet (str): path to the wallet + address (str): address of wallet account + peer (str): address of a neofs peer to connect to + bearer_rules (str): rules for bearer token as plain json string + gate_public_key (str): public 256r1 key of a gate (use flags repeatedly for + multiple gates) + container_id (str): auth container id to put the secret into + container_friendly_name (str): friendly name of auth container to put the + secret into + container_placement_policy (str): placement policy of auth container to put the + secret into + (default: "REP 2 IN X CBF 3 SELECT 2 FROM * AS X") + session_tokens (str): create session tokens with rules, if the rules are + set as 'none', no session tokens will be created + lifetime (str): Lifetime of tokens. For example 50h30m + (note: max time unit is an hour so to set a day you + should use 24h). It will be ceil rounded to the + nearest amount of epoch. (default: 720h0m0s) + container_policy (str): mapping AWS storage class to NeoFS storage policy as + plain json string or path to json file + aws_cli_credentials (str): path to the aws cli credential file + + + Returns: + str: Command string + + """ + return self._execute( + "issue-secret", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) diff --git a/cli/authmate/version.py b/cli/authmate/version.py new file mode 100644 index 0000000..4432b2d --- /dev/null +++ b/cli/authmate/version.py @@ -0,0 +1,13 @@ +from cli.cli_command import NeofsCliCommand +from shell import CommandResult + + +class NeofsAuthmateVersion(NeofsCliCommand): + def get(self) -> CommandResult: + """Application version + + Returns: + str: Command string + + """ + return self._execute("", version=True) diff --git a/cli/cli_command.py b/cli/cli_command.py new file mode 100644 index 0000000..a622324 --- /dev/null +++ b/cli/cli_command.py @@ -0,0 +1,58 @@ +from typing import Optional + +from shell import CommandResult, Shell + + +class NeofsCliCommand: + + WALLET_SOURCE_ERROR_MSG = 'Provide either wallet or wallet_config to specify wallet location' + + neofs_cli_exec: Optional[str] = None + __base_params: Optional[str] = None + map_params = { + "json_mode": "json", + "await_mode": "await", + "hash_type": "hash", + "doc_type": "type", + } + + def __init__(self, shell: Shell, neofs_cli_exec: str, **base_params): + self.shell = shell + self.neofs_cli_exec = neofs_cli_exec + self.__base_params = " ".join( + [f"--{param} {value}" for param, value in base_params.items() if value] + ) + + def _format_command(self, command: str, **params) -> str: + param_str = [] + for param, value in params.items(): + if param in self.map_params.keys(): + param = self.map_params[param] + param = param.replace("_", "-") + if not value: + continue + if isinstance(value, bool): + param_str.append(f"--{param}") + elif isinstance(value, int): + param_str.append(f"--{param} {value}") + elif isinstance(value, list): + for value_item in value: + val_str = str(value_item).replace("'", "\\'") + param_str.append(f"--{param} '{val_str}'") + elif isinstance(value, dict): + param_str.append( + f'--{param} \'{",".join(f"{key}={val}" for key, val in value.items())}\'' + ) + else: + if "'" in str(value): + value_str = str(value).replace('"', '\\"') + param_str.append(f'--{param} "{value_str}"') + else: + param_str.append(f"--{param} '{value}'") + + param_str = " ".join(param_str) + + return f'{self.neofs_cli_exec} {self.__base_params} {command or ""} {param_str}' + + def _execute(self, command: Optional[str], **params) -> CommandResult: + return self.shell.exec(self._format_command(command, **params)) diff --git a/cli/go/__init__.py b/cli/go/__init__.py new file mode 100644 index 0000000..d3fa193 --- /dev/null +++ b/cli/go/__init__.py @@ -0,0 +1,2 @@ +from .blockchain_network_type import NetworkType +from .go import NeoGo diff --git a/cli/go/blockchain_network_type.py b/cli/go/blockchain_network_type.py new file mode 100644 index 0000000..9129f88 --- /dev/null +++ b/cli/go/blockchain_network_type.py @@ -0,0 +1,7 @@ +from enum import Enum + + +class NetworkType(Enum): + PRIVATE = "privnet" + MAIN = "mainnet" + TEST = "testnet" diff --git a/cli/go/candidate.py b/cli/go/candidate.py new file mode 100644 index 0000000..99a42bf --- /dev/null +++ b/cli/go/candidate.py @@ -0,0 +1,120 @@ +from typing import Optional + +from cli.cli_command import NeofsCliCommand +from shell import CommandResult + + +class NeoGoCandidate(NeofsCliCommand): + + def register( + self, + address: str, + rpc_endpoint: str, + wallet: Optional[str] = None, + wallet_config: Optional[str] = None, + gas: Optional[float] = None, + timeout: int = 10, + ) -> CommandResult: + """ register as a new candidate + + Args: + address (str): Address to register + wallet (str): Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config (str): Target location of the wallet config file; + conflicts with --wallet flag. + gas (float): network fee to add to the transaction (prioritizing it) + rpc_endpoint (str): RPC node address + timeout (int): Timeout for the operation (default: 10s) + + + Returns: + str: Command string + + """ + assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + + return self._execute( + "wallet candidate register", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) + + def unregister( + self, + address: str, + rpc_endpoint: str, + wallet: Optional[str] = None, + wallet_config: Optional[str] = None, + gas: Optional[float] = None, + timeout: int = 10, + ) -> CommandResult: + """ unregister self as a candidate + + Args: + address (str): Address to unregister + wallet (str): Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config (str): Target location of the wallet config file; + conflicts with --wallet flag. + gas (float): network fee to add to the transaction (prioritizing it) + rpc_endpoint (str): RPC node address + timeout (int): Timeout for the operation (default: 10s) + + + Returns: + str: Command string + + """ + assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + + return self._execute( + "wallet candidate unregister", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) + + def vote( + self, + candidate: str, + rpc_endpoint: str, + wallet: Optional[str] = None, + wallet_config: Optional[str] = None, + gas: Optional[float] = None, + timeout: int = 10, + ) -> CommandResult: + """ Votes for a validator by calling "vote" method of a NEO native + contract. Do not provide candidate argument to perform unvoting. + + + Args: + candidate (str): Public key of candidate to vote for + wallet (str): Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config (str): Target location of the wallet config file; + conflicts with --wallet flag. + gas (float): network fee to add to the transaction (prioritizing it) + rpc_endpoint (str): RPC node address + timeout (int): Timeout for the operation (default: 10s) + + + Returns: + str: Command string + + """ + assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + + return self._execute( + "wallet candidate vote", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) diff --git a/cli/go/contract.py b/cli/go/contract.py new file mode 100644 index 0000000..5797e06 --- /dev/null +++ b/cli/go/contract.py @@ -0,0 +1,358 @@ +from typing import Optional + +from cli.cli_command import NeofsCliCommand +from shell import CommandResult + + +class NeoGoContract(NeofsCliCommand): + def compile( + self, + input_file: str, + out: str, + manifest: str, + config: str, + no_standards: bool = False, + no_events: bool = False, + no_permissions: bool = False, + bindings: Optional[str] = None, + ) -> CommandResult: + """compile a smart contract to a .nef file + + Args: + input_file (str): Input file for the smart contract to be compiled + out (str): Output of the compiled contract + manifest (str): Emit contract manifest (*.manifest.json) file into separate + file using configuration input file (*.yml) + config (str): Configuration input file (*.yml) + no_standards (bool): do not check compliance with supported standards + no_events (bool): do not check emitted events with the manifest + no_permissions (bool): do not check if invoked contracts are allowed in manifest + bindings (str): output file for smart-contract bindings configuration + + Returns: + str: Command string + + """ + return self._execute( + "contract compile", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, + ) + + def deploy( + self, + address: str, + input_file: str, + sysgas: float, + manifest: str, + rpc_endpoint: str, + wallet: Optional[str] = None, + wallet_config: Optional[str] = None, + gas: Optional[float] = None, + out: Optional[str] = None, + force: bool = False, + timeout: int = 10, + + ) -> CommandResult: + """deploy a smart contract (.nef with description) + + Args: + wallet (str): wallet to use to get the key for transaction signing; + conflicts with wallet_config + wallet_config (str): path to wallet config to use to get the key for transaction + signing; conflicts with wallet + address (str): address to use as transaction signee (and gas source) + gas (float): network fee to add to the transaction (prioritizing it) + sysgas (float): system fee to add to transaction (compensating for execution) + out (str): file to put JSON transaction to + force (bool): Do not ask for a confirmation + rpc_endpoint (str): RPC node address + timeout (int): Timeout for the operation (default: 10s) + input_file (str): Input file for the smart contract (*.nef) + manifest (str): Emit contract manifest (*.manifest.json) file into separate + file using configuration input file (*.yml) + + Returns: + str: Command string + + """ + assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + + return self._execute( + "contract deploy", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, + ) + + def generate_wrapper( + self, + out: str, + hash: str, + config: Optional[str] = None, + manifest: Optional[str] = None, + ) -> CommandResult: + """generate wrapper to use in other contracts + + Args: + config (str): Configuration file to use + manifest (str): Read contract manifest (*.manifest.json) file + out (str): Output of the compiled contract + hash (str): Smart-contract hash + + Returns: + str: Command string + + """ + return self._execute( + "contract generate-wrapper", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, + ) + + def invokefunction( + self, + address: str, + scripthash: str, + wallet: Optional[str] = None, + method: Optional[str] = None, + arguments: Optional[str] = None, + multisig_hash: Optional[str] = None, + wallet_config: Optional[str] = None, + gas: Optional[float] = None, + sysgas: Optional[float] = None, + out: Optional[str] = None, + force: bool = False, + rpc_endpoint: Optional[str] = None, + timeout: int = 10, + ) -> CommandResult: + """Executes given (as a script hash) deployed script with the given method, + arguments and signers. Sender is included in the list of signers by default + with None witness scope. If you'd like to change default sender's scope, + specify it via signers parameter. See testinvokefunction documentation for + the details about parameters. It differs from testinvokefunction in that this + command sends an invocation transaction to the network. + + Args: + scripthash (str): Function hash + method (str): Call method + arguments (str): Method arguments + multisig_hash (str): Multisig hash + wallet (str): wallet to use to get the key for transaction signing; + conflicts with wallet_config + wallet_config (str): path to wallet config to use to get the key for transaction + signing; conflicts with wallet + address (str): address to use as transaction signee (and gas source) + gas (float): network fee to add to the transaction (prioritizing it) + sysgas (float): system fee to add to transaction (compensating for execution) + out (str): file to put JSON transaction to + force (bool): force-push the transaction in case of bad VM state after + test script invocation + rpc_endpoint (str): RPC node address + timeout (int): Timeout for the operation (default: 10s) + + Returns: + str: Command string + + """ + multisig_hash = f"-- {multisig_hash}" or "" + return self._execute( + "contract invokefunction " + f"{scripthash} {method or ''} {arguments or ''} {multisig_hash}", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self", "scripthash", "method", "arguments", "multisig_hash"] + }, + ) + + def testinvokefunction( + self, + scripthash: str, + wallet: Optional[str] = None, + method: Optional[str] = None, + arguments: Optional[str] = None, + multisig_hash: Optional[str] = None, + rpc_endpoint: Optional[str] = None, + timeout: int = 10, + ) -> CommandResult: + """Executes given (as a script hash) deployed script with the given method, + arguments and signers (sender is not included by default). If no method is given + "" is passed to the script, if no arguments are given, an empty array is + passed, if no signers are given no array is passed. If signers are specified, + the first one of them is treated as a sender. All of the given arguments are + encapsulated into array before invoking the script. The script thus should + follow the regular convention of smart contract arguments (method string and + an array of other arguments). + + See more information and samples in `neo-go contract testinvokefunction --help` + + Args: + scripthash (str): Function hash + method (str): Call method + arguments (str): Method arguments + multisig_hash (str): Multisig hash + rpc_endpoint (str): RPC node address + timeout (int): Timeout for the operation (default: 10s) + + Returns: + str: Command string + + """ + multisig_hash = f"-- {multisig_hash}" or "" + return self._execute( + "contract testinvokefunction " + f"{scripthash} {method or ''} {arguments or ''} {multisig_hash}", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self", "scripthash", "method", "arguments", "multisig_hash"] + }, + ) + + def testinvokescript( + self, + input_file: str, + rpc_endpoint: Optional[str] = None, + timeout: int = 10, + ) -> CommandResult: + """Executes given compiled AVM instructions in NEF format with the given set of + signers not included sender by default. See testinvokefunction documentation + for the details about parameters. + + + Args: + input_file (str): Input location of the .nef file that needs to be invoked + conflicts with wallet_config + rpc_endpoint (str): RPC node address + timeout (int): Timeout for the operation (default: 10s) + + Returns: + str: Command string + + """ + return self._execute( + f"contract testinvokescript", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, + ) + + def init( + self, + name: str, + skip_details: bool = False, + ) -> CommandResult: + """initialize a new smart-contract in a directory with boiler plate code + + Args: + name (str): name of the smart-contract to be initialized + skip_details (bool): skip filling in the projects and contract details + + Returns: + str: Command string + + """ + return self._execute( + "contract init", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, + ) + + def inspect( + self, + input_file: Optional[str] = None, + compile: Optional[str] = None, + ) -> CommandResult: + """creates a user readable dump of the program instructions + + Args: + input_file (str): input file of the program (either .go or .nef) + compile (str): compile input file (it should be go code then) + + Returns: + str: Command string + + """ + return self._execute( + "contract inspect", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, + ) + + def calc_hash( + self, + input_file: str, + manifest: str, + sender: Optional[str] = None, + ) -> CommandResult: + """calculates hash of a contract after deployment + + Args: + input_file (str): path to NEF file + sender (str): sender script hash or address + manifest (str): path to manifest file + + Returns: + str: Command string + + """ + return self._execute( + "contract calc-hash", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, + ) + + def add_group( + self, + manifest: str, + address: str, + wallet: Optional[str] = None, + wallet_config: Optional[str] = None, + sender: Optional[str] = None, + nef: Optional[str] = None, + ) -> CommandResult: + """adds group to the manifest + + Args: + wallet (str): wallet to use to get the key for transaction signing; + conflicts with wallet_config + wallet_config (str): path to wallet config to use to get the key for transaction + signing; conflicts with wallet + sender (str): deploy transaction sender + address (str): account to sign group with + nef (str): path to the NEF file + manifest (str): path to the manifest + + + Returns: + str: Command string + + """ + return self._execute( + "contract manifest add-group", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, + ) diff --git a/cli/go/db.py b/cli/go/db.py new file mode 100644 index 0000000..1faf28b --- /dev/null +++ b/cli/go/db.py @@ -0,0 +1,74 @@ +from typing import Optional + +from cli.cli_command import NeofsCliCommand +from shell import CommandResult + +from .blockchain_network_type import NetworkType + + +class NeoGoDb(NeofsCliCommand): + def dump( + self, + config_path: str, + out: str, + network: NetworkType = NetworkType.PRIVATE, + count: int = 0, + start: int = 0, + ) -> CommandResult: + """ dump blocks (starting with block #1) to the file + + Args: + config_path (str): path to config + network (NetworkType): Select network type (default: private) + count (int): number of blocks to be processed (default or 0: all chain) + (default: 0) + start (int): block number to start from (default: 0) (default: 0) + out (srt): Output file (stdout if not given) + + Returns: + str: Command string + + """ + return self._execute( + "db dump", + **{network.value: True}, + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) + + def restore( + self, + config_path: str, + input_file: str, + network: NetworkType = NetworkType.PRIVATE, + count: int = 0, + dump: Optional[str] = None, + incremental: bool = False, + ) -> CommandResult: + """ dump blocks (starting with block #1) to the file + + Args: + config_path (str): path to config + network (NetworkType): Select network type (default: private) + count (int): number of blocks to be processed (default or 0: all chain) + (default: 0) + input_file (str): Input file (stdin if not given) + dump (str): directory for storing JSON dumps + incremental (bool): use if dump is incremental + + Returns: + str: Command string + + """ + return self._execute( + "db restore", + **{network.value: True}, + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) diff --git a/cli/go/go.py b/cli/go/go.py new file mode 100644 index 0000000..635d42e --- /dev/null +++ b/cli/go/go.py @@ -0,0 +1,44 @@ +from typing import Optional + +from shell import Shell + +from .candidate import NeoGoCandidate +from .contract import NeoGoContract +from .db import NeoGoDb +from .nep17 import NeoGoNep17 +from .node import NeoGoNode +from .query import NeoGoQuery +from .version import NeoGoVersion +from .wallet import NeoGoWallet + + +class NeoGo: + neo_go_exec_path: Optional[str] = None + config_path: Optional[str] = None + candidate: Optional[NeoGoCandidate] = None + contract: Optional[NeoGoContract] = None + db: Optional[NeoGoDb] = None + nep17: Optional[NeoGoNep17] = None + node: Optional[NeoGoNode] = None + query: Optional[NeoGoQuery] = None + version: Optional[NeoGoVersion] = None + wallet: Optional[NeoGoWallet] = None + + def __init__( + self, + shell: Shell, + neo_go_exec_path: Optional[str] = None, + config_path: Optional[str] = None, + ): + self.candidate = NeoGoCandidate( + shell, neo_go_exec_path, config_path=config_path + ) + self.contract = NeoGoContract( + self.neo_go_exec_path, config_path=config_path + ) + self.db = NeoGoDb(shell, neo_go_exec_path, config_path=config_path) + self.nep17 = NeoGoNep17(shell, neo_go_exec_path, config_path=config_path) + self.node = NeoGoNode(shell, neo_go_exec_path, config_path=config_path) + self.query = NeoGoQuery(shell, neo_go_exec_path, config_path=config_path) + self.version = NeoGoVersion(shell, neo_go_exec_path, config_path=config_path) + self.wallet = NeoGoWallet(shell, neo_go_exec_path, config_path=config_path) diff --git a/cli/go/nep17.py b/cli/go/nep17.py new file mode 100644 index 0000000..62c936d --- /dev/null +++ b/cli/go/nep17.py @@ -0,0 +1,243 @@ +from typing import List, Optional + +from cli.cli_command import NeofsCliCommand +from shell import CommandResult + + +class NeoGoNep17(NeofsCliCommand): + def balance( + self, + address: str, + token: str, + rpc_endpoint: str, + wallet: Optional[str] = None, + wallet_config: Optional[str] = None, + timeout: int = 10, + ) -> CommandResult: + """Get address balance + + Args: + address (str): Address to use + wallet (str): Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config (str): Target location of the wallet config file; + conflicts with --wallet flag. + token (str): Token to use (hash or name (for NEO/GAS or imported tokens)) + rpc_endpoint (str): RPC node address + timeout (int): Timeout for the operation (default: 10s) + + Returns: + str: Command string + + """ + assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + + return self._execute( + "wallet nep17 balance", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) + + def import_token( + self, + address: str, + wallet: Optional[str] = None, + wallet_config: Optional[str] = None, + token: Optional[str] = None, + rpc_endpoint: Optional[str] = None, + timeout: int = 10, + ) -> CommandResult: + """import NEP-17 token to a wallet + + Args: + address (str): Token contract address or hash in LE + wallet (str): Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config (str): Target location of the wallet config file; + conflicts with --wallet flag. + token (str): Token to use (hash or name (for NEO/GAS or imported tokens)) + rpc_endpoint (str): RPC node address + timeout (int): Timeout for the operation (default: 10s) + + Returns: + str: Command string + + """ + assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + + return self._execute( + "wallet nep17 import", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) + + def info( + self, + token: str, + wallet: Optional[str] = None, + wallet_config: Optional[str] = None, + ) -> CommandResult: + """print imported NEP-17 token info + + Args: + wallet (str): Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config (str): Target location of the wallet config file; + conflicts with --wallet flag. + token (str): Token to use (hash or name (for NEO/GAS or imported tokens)) + + Returns: + str: Command string + + """ + assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + + return self._execute( + "wallet nep17 info", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) + + def remove( + self, + token: str, + wallet: Optional[str] = None, + wallet_config: Optional[str] = None, + force: bool = False, + ) -> CommandResult: + """remove NEP-17 token from the wallet + + Args: + wallet (str): Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config (str): Target location of the wallet config file; + conflicts with --wallet flag. + token (str): Token to use (hash or name (for NEO/GAS or imported tokens)) + force (bool): Do not ask for a confirmation + + Returns: + str: Command string + + """ + return self._execute( + "wallet nep17 remove", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) + + def transfer( + self, + token: str, + to_address: str, + sysgas: float, + rpc_endpoint: str, + wallet: Optional[str] = None, + wallet_config: Optional[str] = None, + out: Optional[str] = None, + from_address: Optional[str] = None, + force: bool = False, + gas: Optional[float] = None, + amount: float = 0, + timeout: int = 10, + ) -> CommandResult: + """Transfers specified NEP-17 token amount with optional 'data' parameter and cosigners + list attached to the transfer. See 'contract testinvokefunction' documentation + for the details about 'data' parameter and cosigners syntax. If no 'data' is + given then default nil value will be used. If no cosigners are given then the + sender with CalledByEntry scope will be used as the only signer. + + Args: + wallet (str): Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config (str): Target location of the wallet config file; + conflicts with --wallet flag. + out (str): file to put JSON transaction to + from_address (str): Address to send an asset from + to_address (str): Address to send an asset to + token (str): Token to use (hash or name (for NEO/GAS or imported tokens)) + force (bool): Do not ask for a confirmation + gas (float): network fee to add to the transaction (prioritizing it) + sysgas (float): system fee to add to transaction (compensating for execution) + force (bool): Do not ask for a confirmation + amount (float) Amount of asset to send + rpc_endpoint (str): RPC node address + timeout (int): Timeout for the operation (default: 10s) + + + Returns: + str: Command string + + """ + assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + + return self._execute( + "wallet nep17 transfer", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) + + def multitransfer( + self, + token: str, + to_address: List[str], + sysgas: float, + rpc_endpoint: str, + wallet: Optional[str] = None, + wallet_config: Optional[str] = None, + out: Optional[str] = None, + from_address: Optional[str] = None, + force: bool = False, + gas: Optional[float] = None, + amount: float = 0, + + timeout: int = 10, + ) -> CommandResult: + """transfer NEP-17 tokens to multiple recipients + + Args: + wallet (str): Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config (str): Target location of the wallet config file; + conflicts with --wallet flag. + out (str): file to put JSON transaction to + from_address (str): Address to send an asset from + to_address (str): Address to send an asset to + token (str): Token to use (hash or name (for NEO/GAS or imported tokens)) + force (bool): Do not ask for a confirmation + gas (float): network fee to add to the transaction (prioritizing it) + sysgas (float): system fee to add to transaction (compensating for execution) + force (bool): Do not ask for a confirmation + amount (float) Amount of asset to send + rpc_endpoint (str): RPC node address + timeout (int): Timeout for the operation (default: 10s) + + + Returns: + str: Command string + + """ + assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + + return self._execute( + "wallet nep17 multitransfer", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) diff --git a/cli/go/node.py b/cli/go/node.py new file mode 100644 index 0000000..363dc9b --- /dev/null +++ b/cli/go/node.py @@ -0,0 +1,18 @@ +from cli.cli_command import NeofsCliCommand +from shell import CommandResult + +from .blockchain_network_type import NetworkType + + +class NeoGoNode(NeofsCliCommand): + def start(self, network: NetworkType = NetworkType.PRIVATE) -> CommandResult: + """Start a NEO node + + Args: + network (NetworkType): Select network type (default: private) + + Returns: + str: Command string + + """ + return self._execute("start", **{network.value: True}) diff --git a/cli/go/query.py b/cli/go/query.py new file mode 100644 index 0000000..bdcff77 --- /dev/null +++ b/cli/go/query.py @@ -0,0 +1,128 @@ +from typing import Optional + +from cli.cli_command import NeofsCliCommand +from shell import CommandResult + + +class NeoGoQuery(NeofsCliCommand): + def candidates( + self, + rpc_endpoint: str, + timeout: int = 10, + ) -> CommandResult: + """Get candidates and votes + + Args: + rpc_endpoint (str): RPC node address + timeout (int): Timeout for the operation (default: 10s) + + Returns: + str: Command string + + """ + return self._execute( + "query candidates", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, + ) + + def committee( + self, + rpc_endpoint: str, + timeout: int = 10, + ) -> CommandResult: + """Get committee list + + Args: + rpc_endpoint (str): RPC node address + timeout (int): Timeout for the operation (default: 10s) + + Returns: + str: Command string + + """ + return self._execute( + "query committee", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, + ) + + def height( + self, + rpc_endpoint: str, + timeout: int = 10, + ) -> CommandResult: + """Get node height + + Args: + rpc_endpoint (str): RPC node address + timeout (int): Timeout for the operation (default: 10s) + + Returns: + str: Command string + + """ + return self._execute( + "query height", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, + ) + + def tx( + self, + tx_hash: str, + rpc_endpoint: str, + timeout: int = 10, + ) -> CommandResult: + """Query transaction status + + Args: + tx_hash (str): Hash of transaction + rpc_endpoint (str): RPC node address + timeout (int): Timeout for the operation (default: 10s) + + Returns: + str: Command string + + """ + return self._execute( + f"query tx {tx_hash}", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self", "hash"] + }, + ) + + def voter( + self, + rpc_endpoint: str, + timeout: int = 10, + ) -> CommandResult: + """Print NEO holder account state + + Args: + rpc_endpoint (str): RPC node address + timeout (int): Timeout for the operation (default: 10s) + + Returns: + str: Command string + + """ + return self._execute( + "query voter", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, + ) diff --git a/cli/go/version.py b/cli/go/version.py new file mode 100644 index 0000000..beb3cfa --- /dev/null +++ b/cli/go/version.py @@ -0,0 +1,13 @@ +from cli.cli_command import NeofsCliCommand +from shell import CommandResult + + +class NeoGoVersion(NeofsCliCommand): + def get(self) -> CommandResult: + """Application version + + Returns: + str: Command string + + """ + return self._execute("", version=True) diff --git a/cli/go/wallet.py b/cli/go/wallet.py new file mode 100644 index 0000000..3d20c25 --- /dev/null +++ b/cli/go/wallet.py @@ -0,0 +1,395 @@ +from typing import Optional + +from cli.cli_command import NeofsCliCommand +from shell import CommandResult + + +class NeoGoWallet(NeofsCliCommand): + def claim( + self, + address: str, + rpc_endpoint: str, + wallet: Optional[str] = None, + wallet_config: Optional[str] = None, + timeout: int = 10, + ) -> CommandResult: + """claim GAS + + Args: + address (str): Address to claim GAS for + wallet (str): Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config (str): Target location of the wallet config file; + conflicts with --wallet flag. + rpc_endpoint (str): RPC node address + timeout (int): Timeout for the operation (default: 10s) + + Returns: + str: Command string + + """ + assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + + return self._execute( + "wallet claim", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) + + def init( + self, + wallet: Optional[str] = None, + wallet_config: Optional[str] = None, + account: bool = False, + ) -> CommandResult: + """create a new wallet + + Args: + wallet (str): Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config (str): Target location of the wallet config file; + conflicts with --wallet flag. + account (bool): Create a new account + + Returns: + str: Command string + + """ + assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + + return self._execute( + "wallet init", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) + + def convert( + self, + out: str, + wallet: Optional[str] = None, + wallet_config: Optional[str] = None, + ) -> CommandResult: + """convert addresses from existing NEO2 NEP6-wallet to NEO3 format + + Args: + wallet (str): Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config (str): Target location of the wallet config file; + conflicts with --wallet flag. + out (str): where to write converted wallet + + Returns: + str: Command string + + """ + assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + + return self._execute( + "wallet convert", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) + + def create( + self, + wallet: Optional[str] = None, + wallet_config: Optional[str] = None, + ) -> CommandResult: + """add an account to the existing wallet + + Args: + wallet (str): Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config (str): Target location of the wallet config file; + conflicts with --wallet flag. + + Returns: + str: Command string + + """ + assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + + return self._execute( + "wallet create", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) + + def dump( + self, + wallet: Optional[str] = None, + wallet_config: Optional[str] = None, + decrypt: bool = False, + ) -> CommandResult: + """check and dump an existing NEO wallet + + Args: + wallet (str): Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config (str): Target location of the wallet config file; + conflicts with --wallet flag. + decrypt (bool): Decrypt encrypted keys. + + Returns: + str: Command string + + """ + assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + + return self._execute( + "wallet dump", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) + + def dump_keys( + self, + address: str, + wallet: Optional[str] = None, + wallet_config: Optional[str] = None, + ) -> CommandResult: + """check and dump an existing NEO wallet + + Args: + wallet (str): Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config (str): Target location of the wallet config file; + conflicts with --wallet flag. + address (str): address to print public keys for + + Returns: + str: Command string + + """ + assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + + return self._execute( + "wallet dump-keys", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) + + def export( + self, + wallet: Optional[str] = None, + wallet_config: Optional[str] = None, + decrypt: bool = False, + ) -> CommandResult: + """export keys for address + + Args: + wallet (str): Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config (str): Target location of the wallet config file; + conflicts with --wallet flag. + decrypt (bool): Decrypt encrypted keys. + + Returns: + str: Command string + + """ + assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + + return self._execute( + "wallet export", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) + + def import_wif( + self, + wif: str, + name: str, + contract: str, + wallet: Optional[str] = None, + wallet_config: Optional[str] = None, + ) -> CommandResult: + """import WIF of a standard signature contract + + Args: + wallet (str): Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config (str): Target location of the wallet config file; + conflicts with --wallet flag. + wif (str): WIF to import + name (str): Optional account name + contract (str): Verification script for custom contracts + + Returns: + str: Command string + + """ + assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + + return self._execute( + "wallet import", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) + + def import_multisig( + self, + wif: str, + name: Optional[str] = None, + min_number: int = 0, + wallet: Optional[str] = None, + wallet_config: Optional[str] = None, + ) -> CommandResult: + """import multisig contract + + Args: + wallet (str): Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config (str): Target location of the wallet config file; + conflicts with --wallet flag. + wif (str): WIF to import + name (str): Optional account name + min_number (int): Minimal number of signatures (default: 0) + + Returns: + str: Command string + + """ + assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + + return self._execute( + "wallet import-multisig", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) + + def import_deployed( + self, + wif: str, + rpc_endpoint: str, + name: Optional[str] = None, + wallet: Optional[str] = None, + wallet_config: Optional[str] = None, + contract: Optional[str] = None, + + timeout: int = 10, + ) -> CommandResult: + """import multisig contract + + Args: + wallet (str): Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config (str): Target location of the wallet config file; + conflicts with --wallet flag. + wif (str): WIF to import + name (str): Optional account name + contract (str): Contract hash or address + rpc_endpoint (str): RPC node address + timeout (int): Timeout for the operation (default: 10s) + + Returns: + str: Command string + + """ + assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + + return self._execute( + "wallet import-deployed", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) + + def remove( + self, + address: str, + wallet: Optional[str] = None, + wallet_config: Optional[str] = None, + force: bool = False, + ) -> CommandResult: + """check and dump an existing NEO wallet + + Args: + wallet (str): Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config (str): Target location of the wallet config file; + conflicts with --wallet flag. + address (str): Account address or hash in LE form to be removed + force (bool): Do not ask for a confirmation + + Returns: + str: Command string + + """ + assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + + return self._execute( + "wallet remove", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) + + def sign( + self, + input_file: str, + address: str, + rpc_endpoint: str, + wallet: Optional[str] = None, + wallet_config: Optional[str] = None, + out: Optional[str] = None, + timeout: int = 10, + ) -> CommandResult: + """import multisig contract + + Args: + wallet (str): Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config (str): Target location of the wallet config file; + conflicts with --wallet flag. + out (str): file to put JSON transaction to + input_file (str): file with JSON transaction + address (str): Address to use + rpc_endpoint (str): RPC node address + timeout (int): Timeout for the operation (default: 10s) + + Returns: + str: Command string + + """ + assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + + return self._execute( + "wallet sign", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + } + ) diff --git a/shell/__init__.py b/shell/__init__.py index e69de29..b867f00 100644 --- a/shell/__init__.py +++ b/shell/__init__.py @@ -0,0 +1,3 @@ +from .interfaces import CommandResult, Shell +from .local_shell import LocalShell +from .ssh_shell import SSHShell From f5cd6a195407a735e9c558d98b0194cc4121a84d Mon Sep 17 00:00:00 2001 From: Vladimir Domnich Date: Fri, 23 Sep 2022 10:12:04 +0400 Subject: [PATCH 007/363] [#3] Move source code of testlib to src directory Signed-off-by: Vladimir Domnich --- README.md | 33 +++++++++++++- cli/__init__.py | 3 -- cli/adm/__init__.py | 1 - cli/authmate/__init__.py | 1 - cli/go/__init__.py | 2 - shell/__init__.py | 3 -- src/neofs_testlib/cli/__init__.py | 3 ++ {cli => src/neofs_testlib/cli}/cli_command.py | 14 +++--- src/neofs_testlib/cli/neofs_adm/__init__.py | 1 + .../neofs_testlib/cli/neofs_adm}/adm.py | 14 +++--- .../neofs_testlib/cli/neofs_adm}/config.py | 8 ++-- .../neofs_testlib/cli/neofs_adm}/morph.py | 26 ++++++++--- .../cli/neofs_adm}/storage_config.py | 8 ++-- .../neofs_testlib/cli/neofs_adm}/subnet.py | 28 ++++++------ .../neofs_testlib/cli/neofs_adm}/version.py | 6 +-- .../cli/neofs_authmate/__init__.py | 1 + .../cli/neofs_authmate}/authmate.py | 14 ++---- .../cli/neofs_authmate}/secret.py | 24 +++++------ .../cli/neofs_authmate}/version.py | 6 +-- src/neofs_testlib/cli/neogo/__init__.py | 2 + .../neofs_testlib/cli/neogo}/candidate.py | 36 ++++++++-------- .../neofs_testlib/cli/neogo}/contract.py | 21 +++++---- {cli/go => src/neofs_testlib/cli/neogo}/db.py | 43 +++++++++---------- {cli/go => src/neofs_testlib/cli/neogo}/go.py | 29 +++++-------- .../neofs_testlib/cli/neogo}/nep17.py | 23 +++++----- .../neofs_testlib/cli/neogo/network_type.py | 0 .../neofs_testlib/cli/neogo}/node.py | 9 ++-- .../neofs_testlib/cli/neogo}/query.py | 8 ++-- .../neofs_testlib/cli/neogo}/version.py | 6 +-- .../neofs_testlib/cli/neogo}/wallet.py | 31 +++++++------ .../neofs_testlib/reporter}/__init__.py | 6 +-- .../reporter}/allure_reporter.py | 2 +- .../neofs_testlib/reporter}/dummy_reporter.py | 2 +- .../neofs_testlib/reporter}/interfaces.py | 0 src/neofs_testlib/shell/__init__.py | 3 ++ .../neofs_testlib/shell}/interfaces.py | 0 .../neofs_testlib/shell}/local_shell.py | 4 +- .../neofs_testlib/shell}/ssh_shell.py | 4 +- tests/helpers.py | 2 +- tests/test_local_shell.py | 5 ++- tests/test_ssh_shell.py | 5 ++- 41 files changed, 230 insertions(+), 207 deletions(-) delete mode 100644 cli/__init__.py delete mode 100644 cli/adm/__init__.py delete mode 100644 cli/authmate/__init__.py delete mode 100644 cli/go/__init__.py delete mode 100644 shell/__init__.py create mode 100644 src/neofs_testlib/cli/__init__.py rename {cli => src/neofs_testlib/cli}/cli_command.py (80%) create mode 100644 src/neofs_testlib/cli/neofs_adm/__init__.py rename {cli/adm => src/neofs_testlib/cli/neofs_adm}/adm.py (68%) rename {cli/adm => src/neofs_testlib/cli/neofs_adm}/config.py (77%) rename {cli/adm => src/neofs_testlib/cli/neofs_adm}/morph.py (95%) rename {cli/adm => src/neofs_testlib/cli/neofs_adm}/storage_config.py (76%) rename {cli/adm => src/neofs_testlib/cli/neofs_adm}/subnet.py (94%) rename {cli/authmate => src/neofs_testlib/cli/neofs_adm}/version.py (57%) create mode 100644 src/neofs_testlib/cli/neofs_authmate/__init__.py rename {cli/authmate => src/neofs_testlib/cli/neofs_authmate}/authmate.py (54%) rename {cli/authmate => src/neofs_testlib/cli/neofs_authmate}/secret.py (90%) rename {cli/adm => src/neofs_testlib/cli/neofs_authmate}/version.py (56%) create mode 100644 src/neofs_testlib/cli/neogo/__init__.py rename {cli/go => src/neofs_testlib/cli/neogo}/candidate.py (84%) rename {cli/go => src/neofs_testlib/cli/neogo}/contract.py (95%) rename {cli/go => src/neofs_testlib/cli/neogo}/db.py (69%) rename {cli/go => src/neofs_testlib/cli/neogo}/go.py (60%) rename {cli/go => src/neofs_testlib/cli/neogo}/nep17.py (96%) rename cli/go/blockchain_network_type.py => src/neofs_testlib/cli/neogo/network_type.py (100%) rename {cli/go => src/neofs_testlib/cli/neogo}/node.py (63%) rename {cli/go => src/neofs_testlib/cli/neogo}/query.py (95%) rename {cli/go => src/neofs_testlib/cli/neogo}/version.py (57%) rename {cli/go => src/neofs_testlib/cli/neogo}/wallet.py (97%) rename {reporter => src/neofs_testlib/reporter}/__init__.py (64%) rename {reporter => src/neofs_testlib/reporter}/allure_reporter.py (95%) rename {reporter => src/neofs_testlib/reporter}/dummy_reporter.py (88%) rename {reporter => src/neofs_testlib/reporter}/interfaces.py (100%) create mode 100644 src/neofs_testlib/shell/__init__.py rename {shell => src/neofs_testlib/shell}/interfaces.py (100%) rename {shell => src/neofs_testlib/shell}/local_shell.py (98%) rename {shell => src/neofs_testlib/shell}/ssh_shell.py (98%) diff --git a/README.md b/README.md index e493042..995c946 100644 --- a/README.md +++ b/README.md @@ -47,4 +47,35 @@ SSH_SHELL_HOST =
SSH_SHELL_LOGIN = SSH_SHELL_PRIVATE_KEY_PATH = SSH_SHELL_PRIVATE_KEY_PASSPHRASE = -``` \ No newline at end of file +``` + +### Editable installation +If you would like to modify code of the library in the integration with your test suite, you can use editable installation. For that, in virtual environment of your test suite (not in the virtual environment of the testlib itself!) run the following command (path to `neofs-testlib` directory might be different on your machine): +```shell +$ pip install -e ../neofs-testlib +``` + +### Building and publishing package +To build Python package of the library, please run the following command in the library root directory: +```shell +$ python -m build +``` + +This command will put wheel file and source archive under `dist` directory. + +To check that package description will be correctly rendered at PyPI, please, use command: +```shell +$ twine check dist/* +``` + +To upload package to [test PyPI](https://test.pypi.org/project/neofs-testlib/), please, use command: +```shell +$ twine upload -r testpypi dist/* +``` +It will prompt for your username and password. You would need to [create test PyPI account](https://test.pypi.org/account/register/) in order to execute it. + +To upload package to actual PyPI, please, use command: +```shell +$ twine upload dist/* +``` +It will prompt for your username and password. You would need to [create account](https://pypi.org/account/register/) in order to execute it. diff --git a/cli/__init__.py b/cli/__init__.py deleted file mode 100644 index 112c15b..0000000 --- a/cli/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .adm import NeofsAdm -from .authmate import NeofsAuthmate -from .go import NeoGo, NetworkType diff --git a/cli/adm/__init__.py b/cli/adm/__init__.py deleted file mode 100644 index 1596cb3..0000000 --- a/cli/adm/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .adm import NeofsAdm diff --git a/cli/authmate/__init__.py b/cli/authmate/__init__.py deleted file mode 100644 index 112b6a9..0000000 --- a/cli/authmate/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .authmate import NeofsAuthmate diff --git a/cli/go/__init__.py b/cli/go/__init__.py deleted file mode 100644 index d3fa193..0000000 --- a/cli/go/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .blockchain_network_type import NetworkType -from .go import NeoGo diff --git a/shell/__init__.py b/shell/__init__.py deleted file mode 100644 index b867f00..0000000 --- a/shell/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .interfaces import CommandResult, Shell -from .local_shell import LocalShell -from .ssh_shell import SSHShell diff --git a/src/neofs_testlib/cli/__init__.py b/src/neofs_testlib/cli/__init__.py new file mode 100644 index 0000000..27ffbc2 --- /dev/null +++ b/src/neofs_testlib/cli/__init__.py @@ -0,0 +1,3 @@ +from neofs_testlib.cli.neofs_adm.adm import NeofsAdm +from neofs_testlib.cli.neofs_authmate.authmate import NeofsAuthmate +from neofs_testlib.cli.neogo.go import NeoGo diff --git a/cli/cli_command.py b/src/neofs_testlib/cli/cli_command.py similarity index 80% rename from cli/cli_command.py rename to src/neofs_testlib/cli/cli_command.py index a622324..13268f2 100644 --- a/cli/cli_command.py +++ b/src/neofs_testlib/cli/cli_command.py @@ -1,13 +1,13 @@ from typing import Optional -from shell import CommandResult, Shell +from neofs_testlib.shell import CommandResult, Shell -class NeofsCliCommand: +class CliCommand: - WALLET_SOURCE_ERROR_MSG = 'Provide either wallet or wallet_config to specify wallet location' + WALLET_SOURCE_ERROR_MSG = "Provide either wallet or wallet_config to specify wallet location" - neofs_cli_exec: Optional[str] = None + cli_exec_path: Optional[str] = None __base_params: Optional[str] = None map_params = { "json_mode": "json", @@ -16,9 +16,9 @@ class NeofsCliCommand: "doc_type": "type", } - def __init__(self, shell: Shell, neofs_cli_exec: str, **base_params): + def __init__(self, shell: Shell, cli_exec_path: str, **base_params): self.shell = shell - self.neofs_cli_exec = neofs_cli_exec + self.cli_exec_path = cli_exec_path self.__base_params = " ".join( [f"--{param} {value}" for param, value in base_params.items() if value] ) @@ -52,7 +52,7 @@ class NeofsCliCommand: param_str = " ".join(param_str) - return f'{self.neofs_cli_exec} {self.__base_params} {command or ""} {param_str}' + return f"{self.cli_exec_path} {self.__base_params} {command or ''} {param_str}" def _execute(self, command: Optional[str], **params) -> CommandResult: return self.shell.exec(self._format_command(command, **params)) diff --git a/src/neofs_testlib/cli/neofs_adm/__init__.py b/src/neofs_testlib/cli/neofs_adm/__init__.py new file mode 100644 index 0000000..dd91220 --- /dev/null +++ b/src/neofs_testlib/cli/neofs_adm/__init__.py @@ -0,0 +1 @@ +from neofs_testlib.cli.neofs_adm.adm import NeofsAdm diff --git a/cli/adm/adm.py b/src/neofs_testlib/cli/neofs_adm/adm.py similarity index 68% rename from cli/adm/adm.py rename to src/neofs_testlib/cli/neofs_adm/adm.py index 0313b78..4fff981 100644 --- a/cli/adm/adm.py +++ b/src/neofs_testlib/cli/neofs_adm/adm.py @@ -1,16 +1,14 @@ from typing import Optional -from shell import Shell - -from .config import NeofsAdmConfig -from .morph import NeofsAdmMorph -from .subnet import NeofsAdmMorphSubnet -from .storage_config import NeofsAdmStorageConfig -from .version import NeofsAdmVersion +from neofs_testlib.cli.neofs_adm.config import NeofsAdmConfig +from neofs_testlib.cli.neofs_adm.morph import NeofsAdmMorph +from neofs_testlib.cli.neofs_adm.storage_config import NeofsAdmStorageConfig +from neofs_testlib.cli.neofs_adm.subnet import NeofsAdmMorphSubnet +from neofs_testlib.cli.neofs_adm.version import NeofsAdmVersion +from neofs_testlib.shell import Shell class NeofsAdm: - config: Optional[NeofsAdmConfig] = None morph: Optional[NeofsAdmMorph] = None subnet: Optional[NeofsAdmMorphSubnet] = None storage_config: Optional[NeofsAdmStorageConfig] = None diff --git a/cli/adm/config.py b/src/neofs_testlib/cli/neofs_adm/config.py similarity index 77% rename from cli/adm/config.py rename to src/neofs_testlib/cli/neofs_adm/config.py index 7c21bd5..f8acfd8 100644 --- a/cli/adm/config.py +++ b/src/neofs_testlib/cli/neofs_adm/config.py @@ -1,8 +1,8 @@ -from cli.cli_command import NeofsCliCommand -from shell import CommandResult +from neofs_testlib.cli.cli_command import CliCommand +from neofs_testlib.shell import CommandResult -class NeofsAdmConfig(NeofsCliCommand): +class NeofsAdmConfig(CliCommand): def init(self, path: str = "~/.neofs/adm/config.yml") -> CommandResult: """Initialize basic neofs-adm configuration file. @@ -20,5 +20,5 @@ class NeofsAdmConfig(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) diff --git a/cli/adm/morph.py b/src/neofs_testlib/cli/neofs_adm/morph.py similarity index 95% rename from cli/adm/morph.py rename to src/neofs_testlib/cli/neofs_adm/morph.py index 4fa2c9b..93c545b 100644 --- a/cli/adm/morph.py +++ b/src/neofs_testlib/cli/neofs_adm/morph.py @@ -1,10 +1,10 @@ from typing import Optional -from cli.cli_command import NeofsCliCommand -from shell import CommandResult +from neofs_testlib.cli.cli_command import CliCommand +from neofs_testlib.shell import CommandResult -class NeofsAdmMorph(NeofsCliCommand): +class NeofsAdmMorph(CliCommand): def deposit_notary( self, rpc_endpoint: str, @@ -92,7 +92,7 @@ class NeofsAdmMorph(NeofsCliCommand): rpc_endpoint: str, cid: Optional[str] = None, container_contract: Optional[str] = None, - dump: str = './testlib_dump_container', + dump: str = "./testlib_dump_container", ) -> CommandResult: """Dump NeoFS containers to file. @@ -160,7 +160,12 @@ class NeofsAdmMorph(NeofsCliCommand): }, ) - def generate_alphabet(self, rpc_endpoint: str, alphabet_wallets: str, size: int = 7) -> CommandResult: + def generate_alphabet( + self, + rpc_endpoint: str, + alphabet_wallets: str, + size: int = 7, + ) -> CommandResult: """Generate alphabet wallets for consensus nodes of the morph network Args: @@ -284,7 +289,11 @@ class NeofsAdmMorph(NeofsCliCommand): ) def restore_containers( - self, rpc_endpoint: str, alphabet_wallets: str, cid: str, dump: str + self, + rpc_endpoint: str, + alphabet_wallets: str, + cid: str, + dump: str, ) -> CommandResult: """Restore NeoFS containers from file. @@ -347,7 +356,10 @@ class NeofsAdmMorph(NeofsCliCommand): ) def update_contracts( - self, rpc_endpoint: str, alphabet_wallets: str, contracts: Optional[str] = None + self, + rpc_endpoint: str, + alphabet_wallets: str, + contracts: Optional[str] = None, ) -> CommandResult: """Update NeoFS contracts. diff --git a/cli/adm/storage_config.py b/src/neofs_testlib/cli/neofs_adm/storage_config.py similarity index 76% rename from cli/adm/storage_config.py rename to src/neofs_testlib/cli/neofs_adm/storage_config.py index 031bd0f..aa48f6d 100644 --- a/cli/adm/storage_config.py +++ b/src/neofs_testlib/cli/neofs_adm/storage_config.py @@ -1,8 +1,8 @@ -from cli.cli_command import NeofsCliCommand -from shell import CommandResult +from neofs_testlib.cli.cli_command import CliCommand +from neofs_testlib.shell import CommandResult -class NeofsAdmStorageConfig(NeofsCliCommand): +class NeofsAdmStorageConfig(CliCommand): def set(self, account: str, wallet: str) -> CommandResult: """Initialize basic neofs-adm configuration file. @@ -21,5 +21,5 @@ class NeofsAdmStorageConfig(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) diff --git a/cli/adm/subnet.py b/src/neofs_testlib/cli/neofs_adm/subnet.py similarity index 94% rename from cli/adm/subnet.py rename to src/neofs_testlib/cli/neofs_adm/subnet.py index e19c468..47a1757 100644 --- a/cli/adm/subnet.py +++ b/src/neofs_testlib/cli/neofs_adm/subnet.py @@ -1,11 +1,13 @@ from typing import Optional -from cli.cli_command import NeofsCliCommand -from shell import CommandResult +from neofs_testlib.cli.cli_command import CliCommand +from neofs_testlib.shell import CommandResult -class NeofsAdmMorphSubnet(NeofsCliCommand): - def create(self, rpc_endpoint: str, address: str, wallet: str, notary: bool = False) -> CommandResult: +class NeofsAdmMorphSubnet(CliCommand): + def create( + self, rpc_endpoint: str, address: str, wallet: str, notary: bool = False + ) -> CommandResult: """Create NeoFS subnet. Args: @@ -25,7 +27,7 @@ class NeofsAdmMorphSubnet(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) def get(self, rpc_endpoint: str, subnet: str) -> CommandResult: @@ -46,7 +48,7 @@ class NeofsAdmMorphSubnet(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) def remove( @@ -71,7 +73,7 @@ class NeofsAdmMorphSubnet(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) def admin_add( @@ -106,7 +108,7 @@ class NeofsAdmMorphSubnet(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) def admin_remove( @@ -139,7 +141,7 @@ class NeofsAdmMorphSubnet(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) def client_add( @@ -172,7 +174,7 @@ class NeofsAdmMorphSubnet(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) def client_remove( @@ -205,7 +207,7 @@ class NeofsAdmMorphSubnet(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) def node_add(self, rpc_endpoint: str, wallet: str, node: str, subnet: str) -> CommandResult: @@ -228,7 +230,7 @@ class NeofsAdmMorphSubnet(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) def node_remove(self, rpc_endpoint: str, wallet: str, node: str, subnet: str) -> CommandResult: @@ -251,5 +253,5 @@ class NeofsAdmMorphSubnet(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) diff --git a/cli/authmate/version.py b/src/neofs_testlib/cli/neofs_adm/version.py similarity index 57% rename from cli/authmate/version.py rename to src/neofs_testlib/cli/neofs_adm/version.py index 4432b2d..8d7c02d 100644 --- a/cli/authmate/version.py +++ b/src/neofs_testlib/cli/neofs_adm/version.py @@ -1,8 +1,8 @@ -from cli.cli_command import NeofsCliCommand -from shell import CommandResult +from neofs_testlib.cli.cli_command import CliCommand +from neofs_testlib.shell import CommandResult -class NeofsAuthmateVersion(NeofsCliCommand): +class NeofsAdmVersion(CliCommand): def get(self) -> CommandResult: """Application version diff --git a/src/neofs_testlib/cli/neofs_authmate/__init__.py b/src/neofs_testlib/cli/neofs_authmate/__init__.py new file mode 100644 index 0000000..5d43b3e --- /dev/null +++ b/src/neofs_testlib/cli/neofs_authmate/__init__.py @@ -0,0 +1 @@ +from neofs_testlib.cli.neofs_authmate.authmate import NeofsAuthmate diff --git a/cli/authmate/authmate.py b/src/neofs_testlib/cli/neofs_authmate/authmate.py similarity index 54% rename from cli/authmate/authmate.py rename to src/neofs_testlib/cli/neofs_authmate/authmate.py index 58ee873..5f86a74 100644 --- a/cli/authmate/authmate.py +++ b/src/neofs_testlib/cli/neofs_authmate/authmate.py @@ -1,20 +1,14 @@ from typing import Optional -from shell import Shell - -from .secret import NeofsAuthmateSecret -from .version import NeofsAuthmateVersion +from neofs_testlib.cli.neofs_authmate.secret import NeofsAuthmateSecret +from neofs_testlib.cli.neofs_authmate.version import NeofsAuthmateVersion +from neofs_testlib.shell import Shell class NeofsAuthmate: secret: Optional[NeofsAuthmateSecret] = None version: Optional[NeofsAuthmateVersion] = None - def __init__( - self, - shell: Shell, - neofs_authmate_exec_path: str, - ): - + def __init__(self, shell: Shell, neofs_authmate_exec_path: str): self.secret = NeofsAuthmateSecret(shell, neofs_authmate_exec_path) self.version = NeofsAuthmateVersion(shell, neofs_authmate_exec_path) diff --git a/cli/authmate/secret.py b/src/neofs_testlib/cli/neofs_authmate/secret.py similarity index 90% rename from cli/authmate/secret.py rename to src/neofs_testlib/cli/neofs_authmate/secret.py index 66d7e81..12871c8 100644 --- a/cli/authmate/secret.py +++ b/src/neofs_testlib/cli/neofs_authmate/secret.py @@ -1,18 +1,18 @@ from typing import Optional -from cli.cli_command import NeofsCliCommand -from shell import CommandResult +from neofs_testlib.cli.cli_command import CliCommand +from neofs_testlib.shell import CommandResult -class NeofsAuthmateSecret(NeofsCliCommand): +class NeofsAuthmateSecret(CliCommand): def obtain( - self, - wallet: str, - peer: str, - gate_wallet: str, - access_key_id: str, - address: Optional[str] = None, - gate_address: Optional[str] = None, + self, + wallet: str, + peer: str, + gate_wallet: str, + access_key_id: str, + address: Optional[str] = None, + gate_address: Optional[str] = None, ) -> CommandResult: """Obtain a secret from NeoFS network @@ -34,7 +34,7 @@ class NeofsAuthmateSecret(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) def issue( @@ -88,5 +88,5 @@ class NeofsAuthmateSecret(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) diff --git a/cli/adm/version.py b/src/neofs_testlib/cli/neofs_authmate/version.py similarity index 56% rename from cli/adm/version.py rename to src/neofs_testlib/cli/neofs_authmate/version.py index 6a2aedd..e146f52 100644 --- a/cli/adm/version.py +++ b/src/neofs_testlib/cli/neofs_authmate/version.py @@ -1,8 +1,8 @@ -from cli.cli_command import NeofsCliCommand -from shell import CommandResult +from neofs_testlib.cli.cli_command import CliCommand +from neofs_testlib.shell import CommandResult -class NeofsAdmVersion(NeofsCliCommand): +class NeofsAuthmateVersion(CliCommand): def get(self) -> CommandResult: """Application version diff --git a/src/neofs_testlib/cli/neogo/__init__.py b/src/neofs_testlib/cli/neogo/__init__.py new file mode 100644 index 0000000..585be9e --- /dev/null +++ b/src/neofs_testlib/cli/neogo/__init__.py @@ -0,0 +1,2 @@ +from neofs_testlib.cli.neogo.go import NeoGo +from neofs_testlib.cli.neogo.network_type import NetworkType diff --git a/cli/go/candidate.py b/src/neofs_testlib/cli/neogo/candidate.py similarity index 84% rename from cli/go/candidate.py rename to src/neofs_testlib/cli/neogo/candidate.py index 99a42bf..4e796cc 100644 --- a/cli/go/candidate.py +++ b/src/neofs_testlib/cli/neogo/candidate.py @@ -1,21 +1,20 @@ from typing import Optional -from cli.cli_command import NeofsCliCommand -from shell import CommandResult +from neofs_testlib.cli.cli_command import CliCommand +from neofs_testlib.shell import CommandResult -class NeoGoCandidate(NeofsCliCommand): - +class NeoGoCandidate(CliCommand): def register( - self, - address: str, - rpc_endpoint: str, - wallet: Optional[str] = None, - wallet_config: Optional[str] = None, - gas: Optional[float] = None, - timeout: int = 10, + self, + address: str, + rpc_endpoint: str, + wallet: Optional[str] = None, + wallet_config: Optional[str] = None, + gas: Optional[float] = None, + timeout: int = 10, ) -> CommandResult: - """ register as a new candidate + """Register as a new candidate Args: address (str): Address to register @@ -40,7 +39,7 @@ class NeoGoCandidate(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) def unregister( @@ -52,7 +51,7 @@ class NeoGoCandidate(NeofsCliCommand): gas: Optional[float] = None, timeout: int = 10, ) -> CommandResult: - """ unregister self as a candidate + """Unregister self as a candidate Args: address (str): Address to unregister @@ -77,7 +76,7 @@ class NeoGoCandidate(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) def vote( @@ -89,8 +88,8 @@ class NeoGoCandidate(NeofsCliCommand): gas: Optional[float] = None, timeout: int = 10, ) -> CommandResult: - """ Votes for a validator by calling "vote" method of a NEO native - contract. Do not provide candidate argument to perform unvoting. + """Votes for a validator by calling "vote" method of a NEO native + contract. Do not provide candidate argument to perform unvoting. Args: @@ -103,7 +102,6 @@ class NeoGoCandidate(NeofsCliCommand): rpc_endpoint (str): RPC node address timeout (int): Timeout for the operation (default: 10s) - Returns: str: Command string @@ -116,5 +114,5 @@ class NeoGoCandidate(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) diff --git a/cli/go/contract.py b/src/neofs_testlib/cli/neogo/contract.py similarity index 95% rename from cli/go/contract.py rename to src/neofs_testlib/cli/neogo/contract.py index 5797e06..a097e4d 100644 --- a/cli/go/contract.py +++ b/src/neofs_testlib/cli/neogo/contract.py @@ -1,10 +1,10 @@ from typing import Optional -from cli.cli_command import NeofsCliCommand -from shell import CommandResult +from neofs_testlib.cli.cli_command import CliCommand +from neofs_testlib.shell import CommandResult -class NeoGoContract(NeofsCliCommand): +class NeoGoContract(CliCommand): def compile( self, input_file: str, @@ -16,7 +16,7 @@ class NeoGoContract(NeofsCliCommand): no_permissions: bool = False, bindings: Optional[str] = None, ) -> CommandResult: - """compile a smart contract to a .nef file + """Compile a smart contract to a .nef file Args: input_file (str): Input file for the smart contract to be compiled @@ -55,9 +55,8 @@ class NeoGoContract(NeofsCliCommand): out: Optional[str] = None, force: bool = False, timeout: int = 10, - ) -> CommandResult: - """deploy a smart contract (.nef with description) + """Deploy a smart contract (.nef with description) Args: wallet (str): wallet to use to get the key for transaction signing; @@ -97,7 +96,7 @@ class NeoGoContract(NeofsCliCommand): config: Optional[str] = None, manifest: Optional[str] = None, ) -> CommandResult: - """generate wrapper to use in other contracts + """Generate wrapper to use in other contracts Args: config (str): Configuration file to use @@ -253,7 +252,7 @@ class NeoGoContract(NeofsCliCommand): name: str, skip_details: bool = False, ) -> CommandResult: - """initialize a new smart-contract in a directory with boiler plate code + """Initialize a new smart-contract in a directory with boiler plate code Args: name (str): name of the smart-contract to be initialized @@ -277,7 +276,7 @@ class NeoGoContract(NeofsCliCommand): input_file: Optional[str] = None, compile: Optional[str] = None, ) -> CommandResult: - """creates a user readable dump of the program instructions + """Creates a user readable dump of the program instructions Args: input_file (str): input file of the program (either .go or .nef) @@ -302,7 +301,7 @@ class NeoGoContract(NeofsCliCommand): manifest: str, sender: Optional[str] = None, ) -> CommandResult: - """calculates hash of a contract after deployment + """Calculates hash of a contract after deployment Args: input_file (str): path to NEF file @@ -331,7 +330,7 @@ class NeoGoContract(NeofsCliCommand): sender: Optional[str] = None, nef: Optional[str] = None, ) -> CommandResult: - """adds group to the manifest + """Adds group to the manifest Args: wallet (str): wallet to use to get the key for transaction signing; diff --git a/cli/go/db.py b/src/neofs_testlib/cli/neogo/db.py similarity index 69% rename from cli/go/db.py rename to src/neofs_testlib/cli/neogo/db.py index 1faf28b..05bece2 100644 --- a/cli/go/db.py +++ b/src/neofs_testlib/cli/neogo/db.py @@ -1,21 +1,20 @@ from typing import Optional -from cli.cli_command import NeofsCliCommand -from shell import CommandResult - -from .blockchain_network_type import NetworkType +from neofs_testlib.cli.cli_command import CliCommand +from neofs_testlib.cli.neogo.network_type import NetworkType +from neofs_testlib.shell import CommandResult -class NeoGoDb(NeofsCliCommand): +class NeoGoDb(CliCommand): def dump( - self, - config_path: str, - out: str, - network: NetworkType = NetworkType.PRIVATE, - count: int = 0, - start: int = 0, + self, + config_path: str, + out: str, + network: NetworkType = NetworkType.PRIVATE, + count: int = 0, + start: int = 0, ) -> CommandResult: - """ dump blocks (starting with block #1) to the file + """Dump blocks (starting with block #1) to the file Args: config_path (str): path to config @@ -36,19 +35,19 @@ class NeoGoDb(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) def restore( - self, - config_path: str, - input_file: str, - network: NetworkType = NetworkType.PRIVATE, - count: int = 0, - dump: Optional[str] = None, - incremental: bool = False, + self, + config_path: str, + input_file: str, + network: NetworkType = NetworkType.PRIVATE, + count: int = 0, + dump: Optional[str] = None, + incremental: bool = False, ) -> CommandResult: - """ dump blocks (starting with block #1) to the file + """Dump blocks (starting with block #1) to the file Args: config_path (str): path to config @@ -70,5 +69,5 @@ class NeoGoDb(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) diff --git a/cli/go/go.py b/src/neofs_testlib/cli/neogo/go.py similarity index 60% rename from cli/go/go.py rename to src/neofs_testlib/cli/neogo/go.py index 635d42e..02aac73 100644 --- a/cli/go/go.py +++ b/src/neofs_testlib/cli/neogo/go.py @@ -1,20 +1,17 @@ from typing import Optional -from shell import Shell - -from .candidate import NeoGoCandidate -from .contract import NeoGoContract -from .db import NeoGoDb -from .nep17 import NeoGoNep17 -from .node import NeoGoNode -from .query import NeoGoQuery -from .version import NeoGoVersion -from .wallet import NeoGoWallet +from neofs_testlib.cli.neogo.candidate import NeoGoCandidate +from neofs_testlib.cli.neogo.contract import NeoGoContract +from neofs_testlib.cli.neogo.db import NeoGoDb +from neofs_testlib.cli.neogo.nep17 import NeoGoNep17 +from neofs_testlib.cli.neogo.node import NeoGoNode +from neofs_testlib.cli.neogo.query import NeoGoQuery +from neofs_testlib.cli.neogo.version import NeoGoVersion +from neofs_testlib.cli.neogo.wallet import NeoGoWallet +from neofs_testlib.shell import Shell class NeoGo: - neo_go_exec_path: Optional[str] = None - config_path: Optional[str] = None candidate: Optional[NeoGoCandidate] = None contract: Optional[NeoGoContract] = None db: Optional[NeoGoDb] = None @@ -30,12 +27,8 @@ class NeoGo: neo_go_exec_path: Optional[str] = None, config_path: Optional[str] = None, ): - self.candidate = NeoGoCandidate( - shell, neo_go_exec_path, config_path=config_path - ) - self.contract = NeoGoContract( - self.neo_go_exec_path, config_path=config_path - ) + self.candidate = NeoGoCandidate(shell, neo_go_exec_path, config_path=config_path) + self.contract = NeoGoContract(shell, neo_go_exec_path, config_path=config_path) self.db = NeoGoDb(shell, neo_go_exec_path, config_path=config_path) self.nep17 = NeoGoNep17(shell, neo_go_exec_path, config_path=config_path) self.node = NeoGoNode(shell, neo_go_exec_path, config_path=config_path) diff --git a/cli/go/nep17.py b/src/neofs_testlib/cli/neogo/nep17.py similarity index 96% rename from cli/go/nep17.py rename to src/neofs_testlib/cli/neogo/nep17.py index 62c936d..8d89c25 100644 --- a/cli/go/nep17.py +++ b/src/neofs_testlib/cli/neogo/nep17.py @@ -1,10 +1,10 @@ -from typing import List, Optional +from typing import Optional -from cli.cli_command import NeofsCliCommand -from shell import CommandResult +from neofs_testlib.cli.cli_command import CliCommand +from neofs_testlib.shell import CommandResult -class NeoGoNep17(NeofsCliCommand): +class NeoGoNep17(CliCommand): def balance( self, address: str, @@ -38,7 +38,7 @@ class NeoGoNep17(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) def import_token( @@ -74,7 +74,7 @@ class NeoGoNep17(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) def info( @@ -104,7 +104,7 @@ class NeoGoNep17(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) def remove( @@ -134,7 +134,7 @@ class NeoGoNep17(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) def transfer( @@ -188,13 +188,13 @@ class NeoGoNep17(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) def multitransfer( self, token: str, - to_address: List[str], + to_address: list[str], sysgas: float, rpc_endpoint: str, wallet: Optional[str] = None, @@ -204,7 +204,6 @@ class NeoGoNep17(NeofsCliCommand): force: bool = False, gas: Optional[float] = None, amount: float = 0, - timeout: int = 10, ) -> CommandResult: """transfer NEP-17 tokens to multiple recipients @@ -239,5 +238,5 @@ class NeoGoNep17(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) diff --git a/cli/go/blockchain_network_type.py b/src/neofs_testlib/cli/neogo/network_type.py similarity index 100% rename from cli/go/blockchain_network_type.py rename to src/neofs_testlib/cli/neogo/network_type.py diff --git a/cli/go/node.py b/src/neofs_testlib/cli/neogo/node.py similarity index 63% rename from cli/go/node.py rename to src/neofs_testlib/cli/neogo/node.py index 363dc9b..0d79561 100644 --- a/cli/go/node.py +++ b/src/neofs_testlib/cli/neogo/node.py @@ -1,10 +1,9 @@ -from cli.cli_command import NeofsCliCommand -from shell import CommandResult - -from .blockchain_network_type import NetworkType +from neofs_testlib.cli.cli_command import CliCommand +from neofs_testlib.cli.neogo.network_type import NetworkType +from neofs_testlib.shell import CommandResult -class NeoGoNode(NeofsCliCommand): +class NeoGoNode(CliCommand): def start(self, network: NetworkType = NetworkType.PRIVATE) -> CommandResult: """Start a NEO node diff --git a/cli/go/query.py b/src/neofs_testlib/cli/neogo/query.py similarity index 95% rename from cli/go/query.py rename to src/neofs_testlib/cli/neogo/query.py index bdcff77..1567026 100644 --- a/cli/go/query.py +++ b/src/neofs_testlib/cli/neogo/query.py @@ -1,10 +1,8 @@ -from typing import Optional - -from cli.cli_command import NeofsCliCommand -from shell import CommandResult +from neofs_testlib.cli.cli_command import CliCommand +from neofs_testlib.shell import CommandResult -class NeoGoQuery(NeofsCliCommand): +class NeoGoQuery(CliCommand): def candidates( self, rpc_endpoint: str, diff --git a/cli/go/version.py b/src/neofs_testlib/cli/neogo/version.py similarity index 57% rename from cli/go/version.py rename to src/neofs_testlib/cli/neogo/version.py index beb3cfa..18f52bd 100644 --- a/cli/go/version.py +++ b/src/neofs_testlib/cli/neogo/version.py @@ -1,8 +1,8 @@ -from cli.cli_command import NeofsCliCommand -from shell import CommandResult +from neofs_testlib.cli.cli_command import CliCommand +from neofs_testlib.shell import CommandResult -class NeoGoVersion(NeofsCliCommand): +class NeoGoVersion(CliCommand): def get(self) -> CommandResult: """Application version diff --git a/cli/go/wallet.py b/src/neofs_testlib/cli/neogo/wallet.py similarity index 97% rename from cli/go/wallet.py rename to src/neofs_testlib/cli/neogo/wallet.py index 3d20c25..c5cf012 100644 --- a/cli/go/wallet.py +++ b/src/neofs_testlib/cli/neogo/wallet.py @@ -1,10 +1,10 @@ from typing import Optional -from cli.cli_command import NeofsCliCommand -from shell import CommandResult +from neofs_testlib.cli.cli_command import CliCommand +from neofs_testlib.shell import CommandResult -class NeoGoWallet(NeofsCliCommand): +class NeoGoWallet(CliCommand): def claim( self, address: str, @@ -36,7 +36,7 @@ class NeoGoWallet(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) def init( @@ -66,7 +66,7 @@ class NeoGoWallet(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) def convert( @@ -96,7 +96,7 @@ class NeoGoWallet(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) def create( @@ -124,7 +124,7 @@ class NeoGoWallet(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) def dump( @@ -154,7 +154,7 @@ class NeoGoWallet(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) def dump_keys( @@ -184,7 +184,7 @@ class NeoGoWallet(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) def export( @@ -214,7 +214,7 @@ class NeoGoWallet(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) def import_wif( @@ -248,7 +248,7 @@ class NeoGoWallet(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) def import_multisig( @@ -282,7 +282,7 @@ class NeoGoWallet(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) def import_deployed( @@ -293,7 +293,6 @@ class NeoGoWallet(NeofsCliCommand): wallet: Optional[str] = None, wallet_config: Optional[str] = None, contract: Optional[str] = None, - timeout: int = 10, ) -> CommandResult: """import multisig contract @@ -321,7 +320,7 @@ class NeoGoWallet(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) def remove( @@ -353,7 +352,7 @@ class NeoGoWallet(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) def sign( @@ -391,5 +390,5 @@ class NeoGoWallet(NeofsCliCommand): param: param_value for param, param_value in locals().items() if param not in ["self"] - } + }, ) diff --git a/reporter/__init__.py b/src/neofs_testlib/reporter/__init__.py similarity index 64% rename from reporter/__init__.py rename to src/neofs_testlib/reporter/__init__.py index 31bbdf7..5e3c5fc 100644 --- a/reporter/__init__.py +++ b/src/neofs_testlib/reporter/__init__.py @@ -1,8 +1,8 @@ import os -from reporter.allure_reporter import AllureReporter -from reporter.dummy_reporter import DummyReporter -from reporter.interfaces import Reporter +from neofs_testlib.reporter.allure_reporter import AllureReporter +from neofs_testlib.reporter.dummy_reporter import DummyReporter +from neofs_testlib.reporter.interfaces import Reporter def get_reporter() -> Reporter: diff --git a/reporter/allure_reporter.py b/src/neofs_testlib/reporter/allure_reporter.py similarity index 95% rename from reporter/allure_reporter.py rename to src/neofs_testlib/reporter/allure_reporter.py index 0277214..2d99527 100644 --- a/reporter/allure_reporter.py +++ b/src/neofs_testlib/reporter/allure_reporter.py @@ -6,7 +6,7 @@ from typing import Any import allure from allure import attachment_type -from reporter.interfaces import Reporter +from neofs_testlib.reporter.interfaces import Reporter class AllureReporter(Reporter): diff --git a/reporter/dummy_reporter.py b/src/neofs_testlib/reporter/dummy_reporter.py similarity index 88% rename from reporter/dummy_reporter.py rename to src/neofs_testlib/reporter/dummy_reporter.py index 9061101..1d8cfde 100644 --- a/reporter/dummy_reporter.py +++ b/src/neofs_testlib/reporter/dummy_reporter.py @@ -1,7 +1,7 @@ from contextlib import AbstractContextManager, contextmanager from typing import Any -from reporter.interfaces import Reporter +from neofs_testlib.reporter.interfaces import Reporter @contextmanager diff --git a/reporter/interfaces.py b/src/neofs_testlib/reporter/interfaces.py similarity index 100% rename from reporter/interfaces.py rename to src/neofs_testlib/reporter/interfaces.py diff --git a/src/neofs_testlib/shell/__init__.py b/src/neofs_testlib/shell/__init__.py new file mode 100644 index 0000000..c51f3b9 --- /dev/null +++ b/src/neofs_testlib/shell/__init__.py @@ -0,0 +1,3 @@ +from neofs_testlib.shell.interfaces import CommandResult, Shell +from neofs_testlib.shell.local_shell import LocalShell +from neofs_testlib.shell.ssh_shell import SSHShell diff --git a/shell/interfaces.py b/src/neofs_testlib/shell/interfaces.py similarity index 100% rename from shell/interfaces.py rename to src/neofs_testlib/shell/interfaces.py diff --git a/shell/local_shell.py b/src/neofs_testlib/shell/local_shell.py similarity index 98% rename from shell/local_shell.py rename to src/neofs_testlib/shell/local_shell.py index f542cc4..0b8681a 100644 --- a/shell/local_shell.py +++ b/src/neofs_testlib/shell/local_shell.py @@ -6,8 +6,8 @@ from typing import IO, Optional import pexpect -from reporter import get_reporter -from shell.interfaces import CommandOptions, CommandResult, Shell +from neofs_testlib.reporter import get_reporter +from neofs_testlib.shell.interfaces import CommandOptions, CommandResult, Shell logger = logging.getLogger("neofs.testlib.shell") reporter = get_reporter() diff --git a/shell/ssh_shell.py b/src/neofs_testlib/shell/ssh_shell.py similarity index 98% rename from shell/ssh_shell.py rename to src/neofs_testlib/shell/ssh_shell.py index 5a272a3..6ed4f7a 100644 --- a/shell/ssh_shell.py +++ b/src/neofs_testlib/shell/ssh_shell.py @@ -18,8 +18,8 @@ from paramiko import ( ) from paramiko.ssh_exception import AuthenticationException -from reporter import get_reporter -from shell.interfaces import CommandOptions, CommandResult, Shell +from neofs_testlib.reporter import get_reporter +from neofs_testlib.shell.interfaces import CommandOptions, CommandResult, Shell logger = logging.getLogger("neofs.testlib.shell") reporter = get_reporter() diff --git a/tests/helpers.py b/tests/helpers.py index b80be61..1cb393a 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -1,6 +1,6 @@ import traceback -from shell.interfaces import CommandResult +from neofs_testlib.shell.interfaces import CommandResult def format_error_details(error: Exception) -> str: diff --git a/tests/test_local_shell.py b/tests/test_local_shell.py index 52e3861..f9c2d89 100644 --- a/tests/test_local_shell.py +++ b/tests/test_local_shell.py @@ -1,7 +1,8 @@ from unittest import TestCase -from shell.interfaces import CommandOptions, InteractiveInput -from shell.local_shell import LocalShell +from neofs_testlib.shell.interfaces import CommandOptions, InteractiveInput +from neofs_testlib.shell.local_shell import LocalShell + from tests.helpers import format_error_details, get_output_lines diff --git a/tests/test_ssh_shell.py b/tests/test_ssh_shell.py index 213b7cf..849a2fd 100644 --- a/tests/test_ssh_shell.py +++ b/tests/test_ssh_shell.py @@ -1,8 +1,9 @@ import os from unittest import SkipTest, TestCase -from shell.interfaces import CommandOptions, InteractiveInput -from shell.ssh_shell import SSHShell +from neofs_testlib.shell.interfaces import CommandOptions, InteractiveInput +from neofs_testlib.shell.ssh_shell import SSHShell + from tests.helpers import format_error_details, get_output_lines From 3df12e286963e01b82633a02cb5e816727d7970f Mon Sep 17 00:00:00 2001 From: Vladimir Domnich Date: Fri, 23 Sep 2022 13:01:38 +0400 Subject: [PATCH 008/363] [#3] Add tools required to build PyPI package Signed-off-by: Vladimir Domnich --- .gitignore | 7 +++++- README.md | 28 +++++++++++++--------- pyproject.toml | 44 ++++++++++++++++++++++++++++++++++- requirements.txt | 12 ++++++++-- src/neofs_testlib/__init__.py | 1 + 5 files changed, 77 insertions(+), 15 deletions(-) create mode 100644 src/neofs_testlib/__init__.py diff --git a/.gitignore b/.gitignore index 743b23b..8a7034d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,10 @@ # ignore IDE files .vscode -# ignore caches under any path +# ignore temp files under any path +.DS_Store **/__pycache__ + +# ignore build artifacts +/dist +*.egg-info diff --git a/README.md b/README.md index 995c946..bd92d6e 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,17 @@ # neofs-testlib This library provides building blocks and utilities to facilitate development of automated tests for NeoFS system. -## Repository structure -TODO - ## Installation -TODO +Library can be installed via pip: +```shell +$ pip install neofs-testlib +``` + +## Library structure +The library provides the following primary components: + * `cli` - wrappers on top of neoFS command-line tools. These wrappers execute on a shell and provide type-safe interface for interacting with the tools. + * `reporter` - abstraction on top of test reporting tool like Allure. Components of the library will report their steps and attach artifacts to the configured reporter instance. + * `shell` - shells that can be used to execute commands. Currently library provides local shell (on machine that runs the code) or SSH shell that connects to a remote machine via SSH. ## Contributing Any contributions to the library should conform to the [contribution guideline](https://github.com/nspcc-dev/neofs-node/blob/master/CONTRIBUTING.md). @@ -14,21 +20,21 @@ Any contributions to the library should conform to the [contribution guideline]( To setup development environment for `neofs-testlib`, please, take the following steps: 1. Prepare virtualenv -``` +```shell $ virtualenv --python=python3.9 venv $ source venv/bin/activate ``` 2. Install all dependencies: -``` +```shell $ pip install -r requirements.txt ``` 3. Setup pre-commit hooks to run code formatters on staged files before you run a `git commit` command: -``` -pre-commit install +```shell +$ pre-commit install ``` Optionally you might want to integrate code formatters with your code editor to apply formatters to code files as you go: @@ -37,8 +43,8 @@ Optionally you might want to integrate code formatters with your code editor to ### Unit Tests Before submitting any changes to the library, please, make sure that all unit tests are passing. To run the tests, please, use the following command: -``` -python -m unittest discover --start-directory tests +```shell +$ python -m unittest discover --start-directory tests ``` To enable tests that interact with SSH server, please, setup SSH server and set the following environment variables before running the tests: @@ -78,4 +84,4 @@ To upload package to actual PyPI, please, use command: ```shell $ twine upload dist/* ``` -It will prompt for your username and password. You would need to [create account](https://pypi.org/account/register/) in order to execute it. +It will prompt for your username and password. You would need to [create PyPI account](https://pypi.org/account/register/) in order to execute it. diff --git a/pyproject.toml b/pyproject.toml index bd0087b..7d5b913 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,8 +1,50 @@ +[build-system] +requires = ["setuptools>=63.0.0", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "neofs-testlib" +version = "0.1.0" +description = "Building blocks and utilities to facilitate development of automated tests for NeoFS system" +readme = "README.md" +authors = [{ name = "NSPCC", email = "info@nspcc.ru" }] +license = { text = "GNU General Public License v3 (GPLv3)" } +classifiers = [ + "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", + "Programming Language :: Python", + "Programming Language :: Python :: 3", +] +keywords = ["neofs", "test"] +dependencies = [ + "allure-python-commons>=2.9.45", + "paramiko>=2.10.3", + "pexpect>=4.8.0", +] +requires-python = ">=3.9" + +[project.optional-dependencies] +dev = ["black", "bumpver", "isort", "pre-commit"] + +[project.urls] +Homepage = "https://github.com/nspcc-dev/neofs-testlib" + [tool.isort] profile = "black" -src_paths = ["reporter", "shell", "tests"] +src_paths = ["src", "tests"] line_length = 100 [tool.black] line-length = 100 target-version = ["py39"] + +[tool.bumpver] +current_version = "0.1.0" +version_pattern = "MAJOR.MINOR.PATCH" +commit_message = "Bump version {old_version} -> {new_version}" +commit = false +tag = false +push = false + +[tool.bumpver.file_patterns] +"pyproject.toml" = ['current_version = "{version}"', 'version = "{version}"'] +"src/neofs_testlib/__init__.py" = ["{version}"] diff --git a/requirements.txt b/requirements.txt index 5e62371..9b7968c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,14 @@ allure-python-commons==2.9.45 -black==22.8.0 -isort==5.10.1 paramiko==2.10.3 pexpect==4.8.0 + +# Dev dependencies +black==22.8.0 +bumpver==2022.1118 +isort==5.10.1 pre-commit==2.20.0 + +# Packaging dependencies +build==0.8.0 +setuptools==63.2.0 +twine==4.0.1 diff --git a/src/neofs_testlib/__init__.py b/src/neofs_testlib/__init__.py new file mode 100644 index 0000000..3dc1f76 --- /dev/null +++ b/src/neofs_testlib/__init__.py @@ -0,0 +1 @@ +__version__ = "0.1.0" From 2112665844f3baf4be81bea0884dba4b3aa099ac Mon Sep 17 00:00:00 2001 From: anastasia prasolova Date: Tue, 4 Oct 2022 12:36:54 +0300 Subject: [PATCH 009/363] added DCO check Signed-off-by: anastasia prasolova --- .github/workflows/dco.yml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 .github/workflows/dco.yml diff --git a/.github/workflows/dco.yml b/.github/workflows/dco.yml new file mode 100644 index 0000000..40ed8fc --- /dev/null +++ b/.github/workflows/dco.yml @@ -0,0 +1,21 @@ +name: DCO check + +on: + pull_request: + branches: + - master + +jobs: + commits_check_job: + runs-on: ubuntu-latest + name: Commits Check + steps: + - name: Get PR Commits + id: 'get-pr-commits' + uses: tim-actions/get-pr-commits@master + with: + token: ${{ secrets.GITHUB_TOKEN }} + - name: DCO Check + uses: tim-actions/dco@master + with: + commits: ${{ steps.get-pr-commits.outputs.commits }} From d3f51ee398ca6302a2a87f61a14f7b47198f616a Mon Sep 17 00:00:00 2001 From: Vladimir Domnich Date: Wed, 5 Oct 2022 13:14:51 +0400 Subject: [PATCH 010/363] [#7] Add contribution guideline with code style Signed-off-by: Vladimir Domnich --- CONTRIBUTING.md | 214 +++++++++++++++++ README.md | 72 +----- src/neofs_testlib/cli/neofs_adm/config.py | 6 +- src/neofs_testlib/cli/neofs_adm/morph.py | 170 ++++++-------- .../cli/neofs_adm/storage_config.py | 8 +- src/neofs_testlib/cli/neofs_adm/subnet.py | 122 +++++----- src/neofs_testlib/cli/neofs_adm/version.py | 3 +- .../cli/neofs_authmate/secret.py | 59 +++-- .../cli/neofs_authmate/version.py | 3 +- src/neofs_testlib/cli/neogo/candidate.py | 65 +++--- src/neofs_testlib/cli/neogo/contract.py | 218 ++++++++---------- src/neofs_testlib/cli/neogo/db.py | 34 ++- src/neofs_testlib/cli/neogo/nep17.py | 150 ++++++------ src/neofs_testlib/cli/neogo/node.py | 7 +- src/neofs_testlib/cli/neogo/query.py | 78 +++---- src/neofs_testlib/cli/neogo/version.py | 5 +- src/neofs_testlib/cli/neogo/wallet.py | 203 ++++++++-------- src/neofs_testlib/reporter/dummy_reporter.py | 4 +- src/neofs_testlib/reporter/interfaces.py | 24 +- src/neofs_testlib/shell/interfaces.py | 49 ++-- src/neofs_testlib/shell/local_shell.py | 7 +- src/neofs_testlib/shell/ssh_shell.py | 9 +- tests/helpers.py | 26 ++- 23 files changed, 770 insertions(+), 766 deletions(-) create mode 100644 CONTRIBUTING.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..da8ea41 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,214 @@ +# Contribution guide + +First, thank you for contributing! We love and encourage pull requests from +everyone. Please follow the guidelines: + +- Check the open [issues](https://github.com/nspcc-dev/neofs-testlib/issues) and + [pull requests](https://github.com/nspcc-dev/neofs-testlib/pulls) for existing + discussions. + +- Open an issue first, to discuss a new feature or enhancement. + +- Write tests, and make sure the test suite passes locally. + +- Open a pull request, and reference the relevant issue(s). + +- Make sure your commits are logically separated and have good comments + explaining the details of your change. + +- After receiving feedback, amend your commits or add new ones as appropriate. + +- **Have fun!** + +## Development Workflow + +Start by forking the `neofs-testlib` repository, make changes in a branch and then +send a pull request. We encourage pull requests to discuss code changes. Here +are the steps in details: + +### Set up your GitHub Repository +Fork [NeoFS testlib upstream](https://github.com/nspcc-dev/neofs-testlib/fork) source +repository to your own personal repository. Copy the URL of your fork and clone it: + +```shell +$ git clone +``` + +### Set up git remote as ``upstream`` +```shell +$ cd neofs-testlib +$ git remote add upstream https://github.com/nspcc-dev/neofs-testlib +$ git fetch upstream +``` + +### Set up development environment +To setup development environment for `neofs-testlib`, please, take the following steps: +1. Prepare virtualenv + +```shell +$ virtualenv --python=python3.9 venv +$ source venv/bin/activate +``` + +2. Install all dependencies: + +```shell +$ pip install -r requirements.txt +``` + +3. Setup pre-commit hooks to run code formatters on staged files before you run a `git commit` command: + +```shell +$ pre-commit install +``` + +Optionally you might want to integrate code formatters with your code editor to apply formatters to code files as you go: +* isort is supported by [PyCharm](https://plugins.jetbrains.com/plugin/15434-isortconnect), [VS Code](https://cereblanco.medium.com/setup-black-and-isort-in-vscode-514804590bf9). Plugins exist for other IDEs/editors as well. +* black can be integrated with multiple editors, please, instructions are available [here](https://black.readthedocs.io/en/stable/integrations/editors.html). + +### Create your feature branch +Before making code changes, make sure you create a separate branch for these +changes. Maybe you will find it convenient to name branch in +`/-` format. + +```shell +$ git checkout -b feature/123-something_awesome +``` + +### Test your changes +Before submitting any changes to the library, please, make sure that all unit tests are passing. To run the tests, please, use the following command: +```shell +$ python -m unittest discover --start-directory tests +``` + +To enable tests that interact with SSH server, please, setup SSH server and set the following environment variables before running the tests: +``` +SSH_SHELL_HOST =
+SSH_SHELL_LOGIN = +SSH_SHELL_PRIVATE_KEY_PATH = +SSH_SHELL_PRIVATE_KEY_PASSPHRASE = +``` + +### Commit changes +After verification, commit your changes. There is a [great +post](https://chris.beams.io/posts/git-commit/) on how to write useful commit +messages. Try following this template: + +``` +[#Issue] Summary +Description + + +``` + +```shell +$ git commit -am '[#123] Add some feature' +``` + +### Push to the branch +Push your locally committed changes to the remote origin (your fork): +```shell +$ git push origin feature/123-something_awesome +``` + +### Create a Pull Request +Pull requests can be created via GitHub. Refer to [this +document](https://help.github.com/articles/creating-a-pull-request/) for +detailed steps on how to create a pull request. After a Pull Request gets peer +reviewed and approved, it will be merged. + +## DCO Sign off + +All authors to the project retain copyright to their work. However, to ensure +that they are only submitting work that they have rights to, we are requiring +everyone to acknowledge this by signing their work. + +Any copyright notices in this repository should specify the authors as "the +contributors". + +To sign your work, just add a line like this at the end of your commit message: + +``` +Signed-off-by: Samii Sakisaka +``` + +This can easily be done with the `--signoff` option to `git commit`. + +By doing this you state that you can certify the following (from [The Developer +Certificate of Origin](https://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +1 Letterman Drive +Suite D4700 +San Francisco, CA, 94129 +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. +Developer's Certificate of Origin 1.1 +By making a contribution to this project, I certify that: +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +## Code Style +We use `black` and `isort` for code formatting. Please, refer to [Black code style](https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html) for details. + +Type hints are mandatory for library's code: + - class attributes; + - function or method's parameters; + - function or method's return type. + +The only exception is return type of test functions or methods - there's no much use in specifying `None` as return type for each test function. + +Do not use relative imports. Even if the module is in the same package, use the full package name. + +To format docstrings, please, use [Google Style Docstrings](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html). Type annotations should be specified in the code and not in docstrings (please, refer to [this sample](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/index.html#type-annotations)). + +## Editable installation +If you would like to modify code of the library in the integration with your test suite, you can use editable installation. For that, in virtual environment of your test suite (not in the virtual environment of the testlib itself!) run the following command (path to `neofs-testlib` directory might be different on your machine): +```shell +$ pip install -e ../neofs-testlib +``` + +## Building and publishing package +To build Python package of the library, please run the following command in the library root directory: +```shell +$ python -m build +``` + +This command will put wheel file and source archive under `dist` directory. + +To check that package description will be correctly rendered at PyPI, please, use command: +```shell +$ twine check dist/* +``` + +To upload package to [test PyPI](https://test.pypi.org/project/neofs-testlib/), please, use command: +```shell +$ twine upload -r testpypi dist/* +``` +It will prompt for your username and password. You would need to [create test PyPI account](https://test.pypi.org/account/register/) in order to execute it. + +To upload package to actual PyPI, please, use command: +```shell +$ twine upload dist/* +``` +It will prompt for your username and password. You would need to [create PyPI account](https://pypi.org/account/register/) in order to execute it. diff --git a/README.md b/README.md index bd92d6e..f097eee 100644 --- a/README.md +++ b/README.md @@ -14,74 +14,4 @@ The library provides the following primary components: * `shell` - shells that can be used to execute commands. Currently library provides local shell (on machine that runs the code) or SSH shell that connects to a remote machine via SSH. ## Contributing -Any contributions to the library should conform to the [contribution guideline](https://github.com/nspcc-dev/neofs-node/blob/master/CONTRIBUTING.md). - -### Development Environment -To setup development environment for `neofs-testlib`, please, take the following steps: -1. Prepare virtualenv - -```shell -$ virtualenv --python=python3.9 venv -$ source venv/bin/activate -``` - -2. Install all dependencies: - -```shell -$ pip install -r requirements.txt -``` - -3. Setup pre-commit hooks to run code formatters on staged files before you run a `git commit` command: - -```shell -$ pre-commit install -``` - -Optionally you might want to integrate code formatters with your code editor to apply formatters to code files as you go: -* isort is supported by [PyCharm](https://plugins.jetbrains.com/plugin/15434-isortconnect), [VS Code](https://cereblanco.medium.com/setup-black-and-isort-in-vscode-514804590bf9). Plugins exist for other IDEs/editors as well. -* black can be integrated with multiple editors, please, instructions are available [here](https://black.readthedocs.io/en/stable/integrations/editors.html). - -### Unit Tests -Before submitting any changes to the library, please, make sure that all unit tests are passing. To run the tests, please, use the following command: -```shell -$ python -m unittest discover --start-directory tests -``` - -To enable tests that interact with SSH server, please, setup SSH server and set the following environment variables before running the tests: -``` -SSH_SHELL_HOST =
-SSH_SHELL_LOGIN = -SSH_SHELL_PRIVATE_KEY_PATH = -SSH_SHELL_PRIVATE_KEY_PASSPHRASE = -``` - -### Editable installation -If you would like to modify code of the library in the integration with your test suite, you can use editable installation. For that, in virtual environment of your test suite (not in the virtual environment of the testlib itself!) run the following command (path to `neofs-testlib` directory might be different on your machine): -```shell -$ pip install -e ../neofs-testlib -``` - -### Building and publishing package -To build Python package of the library, please run the following command in the library root directory: -```shell -$ python -m build -``` - -This command will put wheel file and source archive under `dist` directory. - -To check that package description will be correctly rendered at PyPI, please, use command: -```shell -$ twine check dist/* -``` - -To upload package to [test PyPI](https://test.pypi.org/project/neofs-testlib/), please, use command: -```shell -$ twine upload -r testpypi dist/* -``` -It will prompt for your username and password. You would need to [create test PyPI account](https://test.pypi.org/account/register/) in order to execute it. - -To upload package to actual PyPI, please, use command: -```shell -$ twine upload dist/* -``` -It will prompt for your username and password. You would need to [create PyPI account](https://pypi.org/account/register/) in order to execute it. +Any contributions to the library should conform to the [contribution guideline](https://github.com/nspcc-dev/neofs-testlib/blob/master/CONTRIBUTING.md). diff --git a/src/neofs_testlib/cli/neofs_adm/config.py b/src/neofs_testlib/cli/neofs_adm/config.py index f8acfd8..86d684b 100644 --- a/src/neofs_testlib/cli/neofs_adm/config.py +++ b/src/neofs_testlib/cli/neofs_adm/config.py @@ -7,12 +7,10 @@ class NeofsAdmConfig(CliCommand): """Initialize basic neofs-adm configuration file. Args: - path (str): path to config (default ~/.neofs/adm/config.yml) - + path: Path to config (default ~/.neofs/adm/config.yml). Returns: - str: Command string - + Command's result. """ return self._execute( "config init", diff --git a/src/neofs_testlib/cli/neofs_adm/morph.py b/src/neofs_testlib/cli/neofs_adm/morph.py index 93c545b..6c67d79 100644 --- a/src/neofs_testlib/cli/neofs_adm/morph.py +++ b/src/neofs_testlib/cli/neofs_adm/morph.py @@ -16,16 +16,14 @@ class NeofsAdmMorph(CliCommand): """Deposit GAS for notary service. Args: - account (str): wallet account address - gas (str): amount of GAS to deposit - rpc_endpoint (str): N3 RPC node endpoint - storage_wallet (str): path to storage node wallet - till (str): notary deposit duration in blocks - + account: Wallet account address. + gas: Amount of GAS to deposit. + rpc_endpoint: N3 RPC node endpoint. + storage_wallet: Path to storage node wallet. + till: Notary deposit duration in blocks. Returns: - str: Command string - + Command's result. """ return self._execute( "morph deposit-notary", @@ -44,19 +42,17 @@ class NeofsAdmMorph(CliCommand): script_hash: Optional[str] = None, storage: Optional[str] = None, ) -> CommandResult: - """Dump GAS balances + """Dump GAS balances. Args: - alphabet (str): dump balances of alphabet contracts - proxy (str): dump balances of the proxy contract - rpc_endpoint (str): N3 RPC node endpoint - script_hash (str): use script-hash format for addresses - storage (str): dump balances of storage nodes from the current netmap - + alphabet: Dump balances of alphabet contracts. + proxy: Dump balances of the proxy contract. + rpc_endpoint: N3 RPC node endpoint. + script_hash: Use script-hash format for addresses. + storage: Dump balances of storage nodes from the current netmap. Returns: - str: Command string - + Command's result. """ return self._execute( "morph dump-balances", @@ -71,12 +67,10 @@ class NeofsAdmMorph(CliCommand): """Section for morph network configuration commands. Args: - rpc_endpoint (str): N3 RPC node endpoint - + rpc_endpoint: N3 RPC node endpoint Returns: - str: Command string - + Command's result. """ return self._execute( "morph dump-config", @@ -97,16 +91,13 @@ class NeofsAdmMorph(CliCommand): """Dump NeoFS containers to file. Args: - cid (str): containers to dump - container_contract (str): container contract hash (for networks without NNS) - dump (str): file where to save dumped containers - (default: ./testlib_dump_container) - rpc_endpoint (str): N3 RPC node endpoint - + cid: Containers to dump. + container_contract: Container contract hash (for networks without NNS). + dump: File where to save dumped containers (default: ./testlib_dump_container). + rpc_endpoint: N3 RPC node endpoint. Returns: - str: Command string - + Command's result. """ return self._execute( "morph dump-containers", @@ -121,12 +112,10 @@ class NeofsAdmMorph(CliCommand): """Dump deployed contract hashes. Args: - rpc_endpoint (str): N3 RPC node endpoint - + rpc_endpoint: N3 RPC node endpoint. Returns: - str: Command string - + Command's result. """ return self._execute( "morph dump-hashes", @@ -140,16 +129,14 @@ class NeofsAdmMorph(CliCommand): def force_new_epoch( self, rpc_endpoint: Optional[str] = None, alphabet: Optional[str] = None ) -> CommandResult: - """Create new NeoFS epoch event in the side chain + """Create new NeoFS epoch event in the side chain. Args: - alphabet (str): path to alphabet wallets dir - rpc_endpoint (str): N3 RPC node endpoint - + alphabet: Path to alphabet wallets dir. + rpc_endpoint: N3 RPC node endpoint. Returns: - str: Command string - + Command's result. """ return self._execute( "morph force-new-epoch", @@ -166,17 +153,15 @@ class NeofsAdmMorph(CliCommand): alphabet_wallets: str, size: int = 7, ) -> CommandResult: - """Generate alphabet wallets for consensus nodes of the morph network + """Generate alphabet wallets for consensus nodes of the morph network. Args: - alphabet_wallets (str): path to alphabet wallets dir - size (int): amount of alphabet wallets to generate (default 7) - rpc_endpoint (str): N3 RPC node endpoint - + alphabet_wallets: Path to alphabet wallets dir. + size: Amount of alphabet wallets to generate (default 7). + rpc_endpoint: N3 RPC node endpoint. Returns: - str: Command string - + Command's result. """ return self._execute( "morph generate-alphabet", @@ -194,18 +179,16 @@ class NeofsAdmMorph(CliCommand): storage_wallet: str, initial_gas: Optional[str] = None, ) -> CommandResult: - """Generate storage node wallet for the morph network + """Generate storage node wallet for the morph network. Args: - alphabet_wallets (str): path to alphabet wallets dir - initial_gas (str): initial amount of GAS to transfer - rpc_endpoint (str): N3 RPC node endpoint - storage_wallet (str): path to new storage node wallet - + alphabet_wallets: Path to alphabet wallets dir. + initial_gas: Initial amount of GAS to transfer. + rpc_endpoint: N3 RPC node endpoint. + storage_wallet: Path to new storage node wallet. Returns: - str: Command string - + Command's result. """ return self._execute( "morph generate-storage-wallet", @@ -232,23 +215,20 @@ class NeofsAdmMorph(CliCommand): """Section for morph network configuration commands. Args: - alphabet_wallets (str): path to alphabet wallets dir - container_alias_fee (int): container alias fee (default 500) - container_fee (int): container registration fee (default 1000) - contracts (str): path to archive with compiled NeoFS contracts - (default fetched from latest github release) - epoch_duration (int): amount of side chain blocks in one NeoFS epoch - (default 240) - homomorphic_disabled: (bool): disable object homomorphic hashing - local_dump (str): path to the blocks dump file - max_object_size (int): max single object size in bytes (default 67108864) - protocol (str): path to the consensus node configuration - rpc_endpoint (str): N3 RPC node endpoint - + alphabet_wallets: Path to alphabet wallets dir. + container_alias_fee: Container alias fee (default 500). + container_fee: Container registration fee (default 1000). + contracts: Path to archive with compiled NeoFS contracts + (default fetched from latest github release). + epoch_duration: Amount of side chain blocks in one NeoFS epoch (default 240). + homomorphic_disabled: Disable object homomorphic hashing. + local_dump: Path to the blocks dump file. + max_object_size: Max single object size in bytes (default 67108864). + protocol: Path to the consensus node configuration. + rpc_endpoint: N3 RPC node endpoint. Returns: - str: Command string - + Command's result. """ return self._execute( "morph init", @@ -269,15 +249,13 @@ class NeofsAdmMorph(CliCommand): """Refill GAS of storage node's wallet in the morph network Args: - alphabet_wallets (str): path to alphabet wallets dir - gas (str): additional amount of GAS to transfer - rpc_endpoint (str): N3 RPC node endpoint - storage_wallet (str): path to new storage node wallet - + alphabet_wallets: Path to alphabet wallets dir. + gas: Additional amount of GAS to transfer. + rpc_endpoint: N3 RPC node endpoint. + storage_wallet: Path to new storage node wallet. Returns: - str: Command string - + Command's result. """ return self._execute( "morph refill-gas", @@ -298,15 +276,13 @@ class NeofsAdmMorph(CliCommand): """Restore NeoFS containers from file. Args: - alphabet_wallets (str): path to alphabet wallets dir - cid (str): containers to restore - dump (str): file to restore containers from - rpc_endpoint (str): N3 RPC node endpoint - + alphabet_wallets: Path to alphabet wallets dir. + cid: Containers to restore. + dump: File to restore containers from. + rpc_endpoint: N3 RPC node endpoint. Returns: - str: Command string - + Command's result. """ return self._execute( "morph restore-containers", @@ -325,19 +301,17 @@ class NeofsAdmMorph(CliCommand): storage_price: Optional[int] = None, fee_per_byte: Optional[int] = None, ) -> CommandResult: - """Set global policy values + """Set global policy values. Args: - alphabet_wallets (str): path to alphabet wallets dir - exec_fee_factor (int): ExecFeeFactor= - storage_price (int): StoragePrice= - fee_per_byte (int): FeePerByte= - rpc_endpoint (str): N3 RPC node endpoint - + alphabet_wallets: Path to alphabet wallets dir. + exec_fee_factor: ExecFeeFactor=. + storage_price: StoragePrice=. + fee_per_byte: FeePerByte=. + rpc_endpoint: N3 RPC node endpoint. Returns: - str: Command string - + Command's result. """ non_param_attribute = "" if exec_fee_factor: @@ -364,15 +338,13 @@ class NeofsAdmMorph(CliCommand): """Update NeoFS contracts. Args: - alphabet_wallets (str): path to alphabet wallets dir - contracts (str): path to archive with compiled NeoFS contracts - (default fetched from latest github release) - rpc_endpoint (str): N3 RPC node endpoint - + alphabet_wallets: Path to alphabet wallets dir. + contracts: Path to archive with compiled NeoFS contracts + (default fetched from latest github release). + rpc_endpoint: N3 RPC node endpoint. Returns: - str: Command string - + Command's result. """ return self._execute( "morph update-contracts", diff --git a/src/neofs_testlib/cli/neofs_adm/storage_config.py b/src/neofs_testlib/cli/neofs_adm/storage_config.py index aa48f6d..75e3e06 100644 --- a/src/neofs_testlib/cli/neofs_adm/storage_config.py +++ b/src/neofs_testlib/cli/neofs_adm/storage_config.py @@ -7,13 +7,11 @@ class NeofsAdmStorageConfig(CliCommand): """Initialize basic neofs-adm configuration file. Args: - account (str): wallet account - wallet (str): path to wallet - + account: Wallet account. + wallet: Path to wallet. Returns: - str: Command string - + Command's result. """ return self._execute( "storage-config", diff --git a/src/neofs_testlib/cli/neofs_adm/subnet.py b/src/neofs_testlib/cli/neofs_adm/subnet.py index 47a1757..127136d 100644 --- a/src/neofs_testlib/cli/neofs_adm/subnet.py +++ b/src/neofs_testlib/cli/neofs_adm/subnet.py @@ -11,15 +11,13 @@ class NeofsAdmMorphSubnet(CliCommand): """Create NeoFS subnet. Args: - address (str): Address in the wallet, optional - notary (bool): Flag to create subnet in notary environment - rpc_endpoint (str): N3 RPC node endpoint - wallet (str): Path to file with wallet - + address: Address in the wallet, optional. + notary: Flag to create subnet in notary environment. + rpc_endpoint: N3 RPC node endpoint. + wallet: Path to file with wallet. Returns: - str: Command string - + Command's result. """ return self._execute( "morph subnet create", @@ -34,13 +32,11 @@ class NeofsAdmMorphSubnet(CliCommand): """Read information about the NeoFS subnet. Args: - rpc_endpoint (str): N3 RPC node endpoint - subnet (str): ID of the subnet to read - + rpc_endpoint: N3 RPC node endpoint. + subnet: ID of the subnet to read. Returns: - str: Command string - + Command's result. """ return self._execute( "morph subnet get", @@ -57,15 +53,13 @@ class NeofsAdmMorphSubnet(CliCommand): """Remove NeoFS subnet. Args: - address (str): Address in the wallet, optional - rpc_endpoint (str): N3 RPC node endpoint - subnet (str): ID of the subnet to read - wallet (str): Path to file with wallet - + address: Address in the wallet, optional. + rpc_endpoint: N3 RPC node endpoint. + subnet: ID of the subnet to read. + wallet: Path to file with wallet. Returns: - str: Command string - + Command's result. """ return self._execute( "morph subnet remove", @@ -89,18 +83,16 @@ class NeofsAdmMorphSubnet(CliCommand): """Add admin to the NeoFS subnet. Args: - address (str): Address in the wallet, optional - admin (str): Hex-encoded public key of the admin - client (str): Add client admin instead of node one - group (str): Client group ID in text format (needed with --client only) - rpc_endpoint (str): N3 RPC node endpoint - subnet (str): ID of the subnet to read - wallet (str): Path to file with wallet - + address: Address in the wallet, optional. + admin: Hex-encoded public key of the admin. + client: Add client admin instead of node one. + group: Client group ID in text format (needed with --client only). + rpc_endpoint: N3 RPC node endpoint. + subnet: ID of the subnet to read. + wallet: Path to file with wallet. Returns: - str: Command string - + Command's result. """ return self._execute( "morph subnet admin add", @@ -123,17 +115,15 @@ class NeofsAdmMorphSubnet(CliCommand): """Remove admin of the NeoFS subnet. Args: - address (str): Address in the wallet, optional - admin (str): Hex-encoded public key of the admin - client (str): Remove client admin instead of node one - rpc_endpoint (str): N3 RPC node endpoint - subnet (str): ID of the subnet to read - wallet (str): Path to file with wallet - + address: Address in the wallet, optional. + admin: Hex-encoded public key of the admin. + client: Remove client admin instead of node one. + rpc_endpoint: N3 RPC node endpoint. + subnet: ID of the subnet to read. + wallet: Path to file with wallet. Returns: - str: Command string - + Command's result. """ return self._execute( "morph subnet admin remove", @@ -156,17 +146,15 @@ class NeofsAdmMorphSubnet(CliCommand): """Add client to the NeoFS subnet. Args: - address (str): Address in the wallet, optional - client (str): Add client admin instead of node one - group (str): Client group ID in text format (needed with --client only) - rpc_endpoint (str): N3 RPC node endpoint - subnet (str): ID of the subnet to read - wallet (str): Path to file with wallet - + address: Address in the wallet, optional. + client: Add client admin instead of node one. + group: Client group ID in text format (needed with --client only). + rpc_endpoint: N3 RPC node endpoint. + subnet: ID of the subnet to read. + wallet: Path to file with wallet. Returns: - str: Command string - + Command's result. """ return self._execute( "morph subnet client add", @@ -189,17 +177,15 @@ class NeofsAdmMorphSubnet(CliCommand): """Remove client of the NeoFS subnet. Args: - address (str): Address in the wallet, optional - client (str): Remove client admin instead of node one - group (str): ID of the client group to work with - rpc_endpoint (str): N3 RPC node endpoint - subnet (str): ID of the subnet to read - wallet (str): Path to file with wallet - + address: Address in the wallet, optional. + client: Remove client admin instead of node one. + group: ID of the client group to work with. + rpc_endpoint: N3 RPC node endpoint. + subnet: ID of the subnet to read. + wallet: Path to file with wallet. Returns: - str: Command string - + Command's result. """ return self._execute( "morph subnet client remove", @@ -214,15 +200,13 @@ class NeofsAdmMorphSubnet(CliCommand): """Add node to the NeoFS subnet. Args: - node (str): Hex-encoded public key of the node - rpc_endpoint (str): N3 RPC node endpoint - subnet (str): ID of the subnet to read - wallet (str): Path to file with wallet - + node: Hex-encoded public key of the node. + rpc_endpoint: N3 RPC node endpoint. + subnet: ID of the subnet to read. + wallet: Path to file with wallet. Returns: - str: Command string - + Command's result. """ return self._execute( "morph subnet node add", @@ -237,15 +221,13 @@ class NeofsAdmMorphSubnet(CliCommand): """Remove node from the NeoFS subnet. Args: - node (str): Hex-encoded public key of the node - rpc_endpoint (str): N3 RPC node endpoint - subnet (str): ID of the subnet to read - wallet (str): Path to file with wallet - + node: Hex-encoded public key of the node. + rpc_endpoint: N3 RPC node endpoint. + subnet: ID of the subnet to read. + wallet: Path to file with wallet. Returns: - str: Command string - + Command's result. """ return self._execute( "morph subnet node remove", diff --git a/src/neofs_testlib/cli/neofs_adm/version.py b/src/neofs_testlib/cli/neofs_adm/version.py index 8d7c02d..502d578 100644 --- a/src/neofs_testlib/cli/neofs_adm/version.py +++ b/src/neofs_testlib/cli/neofs_adm/version.py @@ -7,7 +7,6 @@ class NeofsAdmVersion(CliCommand): """Application version Returns: - str: Command string - + Command's result. """ return self._execute("", version=True) diff --git a/src/neofs_testlib/cli/neofs_authmate/secret.py b/src/neofs_testlib/cli/neofs_authmate/secret.py index 12871c8..8826249 100644 --- a/src/neofs_testlib/cli/neofs_authmate/secret.py +++ b/src/neofs_testlib/cli/neofs_authmate/secret.py @@ -14,19 +14,18 @@ class NeofsAuthmateSecret(CliCommand): address: Optional[str] = None, gate_address: Optional[str] = None, ) -> CommandResult: - """Obtain a secret from NeoFS network + """Obtain a secret from NeoFS network. Args: - wallet (str): path to the wallet - address (str): address of wallet account - peer (str): address of neofs peer to connect to - gate_wallet (str): path to the wallet - gate_address (str): address of wallet account - access_key_id (str): access key id for s3 + wallet: Path to the wallet. + address: Address of wallet account. + peer: Address of neofs peer to connect to. + gate_wallet: Path to the wallet. + gate_address: Address of wallet account. + access_key_id: Access key id for s3. Returns: - str: Command string - + Command's result. """ return self._execute( "obtain-secret", @@ -55,32 +54,26 @@ class NeofsAuthmateSecret(CliCommand): """Obtain a secret from NeoFS network Args: - wallet (str): path to the wallet - address (str): address of wallet account - peer (str): address of a neofs peer to connect to - bearer_rules (str): rules for bearer token as plain json string - gate_public_key (str): public 256r1 key of a gate (use flags repeatedly for - multiple gates) - container_id (str): auth container id to put the secret into - container_friendly_name (str): friendly name of auth container to put the - secret into - container_placement_policy (str): placement policy of auth container to put the - secret into - (default: "REP 2 IN X CBF 3 SELECT 2 FROM * AS X") - session_tokens (str): create session tokens with rules, if the rules are - set as 'none', no session tokens will be created - lifetime (str): Lifetime of tokens. For example 50h30m - (note: max time unit is an hour so to set a day you - should use 24h). It will be ceil rounded to the - nearest amount of epoch. (default: 720h0m0s) - container_policy (str): mapping AWS storage class to NeoFS storage policy as - plain json string or path to json file - aws_cli_credentials (str): path to the aws cli credential file - + wallet: Path to the wallet. + address: Address of wallet account. + peer: Address of a neofs peer to connect to. + bearer_rules: Rules for bearer token as plain json string. + gate_public_key: Public 256r1 key of a gate (use flags repeatedly for multiple gates). + container_id: Auth container id to put the secret into. + container_friendly_name: Friendly name of auth container to put the secret into. + container_placement_policy: Placement policy of auth container to put the secret into + (default: "REP 2 IN X CBF 3 SELECT 2 FROM * AS X"). + session_tokens: Create session tokens with rules, if the rules are set as 'none', no + session tokens will be created. + lifetime: Lifetime of tokens. For example 50h30m (note: max time unit is an hour so to + set a day you should use 24h). It will be ceil rounded to the nearest amount of + epoch. (default: 720h0m0s). + container_policy: Mapping AWS storage class to NeoFS storage policy as plain json string + or path to json file. + aws_cli_credentials: Path to the aws cli credential file. Returns: - str: Command string - + Command's result. """ return self._execute( "issue-secret", diff --git a/src/neofs_testlib/cli/neofs_authmate/version.py b/src/neofs_testlib/cli/neofs_authmate/version.py index e146f52..ec336dc 100644 --- a/src/neofs_testlib/cli/neofs_authmate/version.py +++ b/src/neofs_testlib/cli/neofs_authmate/version.py @@ -7,7 +7,6 @@ class NeofsAuthmateVersion(CliCommand): """Application version Returns: - str: Command string - + Command's result. """ return self._execute("", version=True) diff --git a/src/neofs_testlib/cli/neogo/candidate.py b/src/neofs_testlib/cli/neogo/candidate.py index 4e796cc..50200bb 100644 --- a/src/neofs_testlib/cli/neogo/candidate.py +++ b/src/neofs_testlib/cli/neogo/candidate.py @@ -14,22 +14,19 @@ class NeoGoCandidate(CliCommand): gas: Optional[float] = None, timeout: int = 10, ) -> CommandResult: - """Register as a new candidate + """Register as a new candidate. Args: - address (str): Address to register - wallet (str): Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config (str): Target location of the wallet config file; - conflicts with --wallet flag. - gas (float): network fee to add to the transaction (prioritizing it) - rpc_endpoint (str): RPC node address - timeout (int): Timeout for the operation (default: 10s) - + address: Address to register. + wallet: Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config: Target location of the wallet config file; conflicts with --wallet flag. + gas: Network fee to add to the transaction (prioritizing it). + rpc_endpoint: RPC node address. + timeout: Timeout for the operation (default: 10s). Returns: - str: Command string - + Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG @@ -51,22 +48,19 @@ class NeoGoCandidate(CliCommand): gas: Optional[float] = None, timeout: int = 10, ) -> CommandResult: - """Unregister self as a candidate + """Unregister self as a candidate. Args: - address (str): Address to unregister - wallet (str): Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config (str): Target location of the wallet config file; - conflicts with --wallet flag. - gas (float): network fee to add to the transaction (prioritizing it) - rpc_endpoint (str): RPC node address - timeout (int): Timeout for the operation (default: 10s) - + address: Address to unregister. + wallet: Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config: Target location of the wallet config file; conflicts with --wallet flag. + gas: Network fee to add to the transaction (prioritizing it). + rpc_endpoint: RPC node address. + timeout: Timeout for the operation (default: 10s). Returns: - str: Command string - + Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG @@ -88,23 +82,22 @@ class NeoGoCandidate(CliCommand): gas: Optional[float] = None, timeout: int = 10, ) -> CommandResult: - """Votes for a validator by calling "vote" method of a NEO native - contract. Do not provide candidate argument to perform unvoting. + """Votes for a validator. + Voting happens by calling "vote" method of a NEO native contract. Do not provide + candidate argument to perform unvoting. Args: - candidate (str): Public key of candidate to vote for - wallet (str): Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config (str): Target location of the wallet config file; - conflicts with --wallet flag. - gas (float): network fee to add to the transaction (prioritizing it) - rpc_endpoint (str): RPC node address - timeout (int): Timeout for the operation (default: 10s) + candidate: Public key of candidate to vote for. + wallet: Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config: Target location of the wallet config file; conflicts with --wallet flag. + gas: Network fee to add to the transaction (prioritizing it). + rpc_endpoint: RPC node address. + timeout: Timeout for the operation (default: 10s). Returns: - str: Command string - + Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG diff --git a/src/neofs_testlib/cli/neogo/contract.py b/src/neofs_testlib/cli/neogo/contract.py index a097e4d..5329978 100644 --- a/src/neofs_testlib/cli/neogo/contract.py +++ b/src/neofs_testlib/cli/neogo/contract.py @@ -16,22 +16,21 @@ class NeoGoContract(CliCommand): no_permissions: bool = False, bindings: Optional[str] = None, ) -> CommandResult: - """Compile a smart contract to a .nef file + """Compile a smart contract to a .nef file. Args: - input_file (str): Input file for the smart contract to be compiled - out (str): Output of the compiled contract - manifest (str): Emit contract manifest (*.manifest.json) file into separate - file using configuration input file (*.yml) - config (str): Configuration input file (*.yml) - no_standards (bool): do not check compliance with supported standards - no_events (bool): do not check emitted events with the manifest - no_permissions (bool): do not check if invoked contracts are allowed in manifest - bindings (str): output file for smart-contract bindings configuration + input_file: Input file for the smart contract to be compiled. + out: Output of the compiled contract. + manifest: Emit contract manifest (*.manifest.json) file into separate file using + configuration input file (*.yml). + config: Configuration input file (*.yml). + no_standards: Do not check compliance with supported standards. + no_events: Do not check emitted events with the manifest. + no_permissions: Do not check if invoked contracts are allowed in manifest. + bindings: Output file for smart-contract bindings configuration. Returns: - str: Command string - + Command's result. """ return self._execute( "contract compile", @@ -59,24 +58,23 @@ class NeoGoContract(CliCommand): """Deploy a smart contract (.nef with description) Args: - wallet (str): wallet to use to get the key for transaction signing; - conflicts with wallet_config - wallet_config (str): path to wallet config to use to get the key for transaction - signing; conflicts with wallet - address (str): address to use as transaction signee (and gas source) - gas (float): network fee to add to the transaction (prioritizing it) - sysgas (float): system fee to add to transaction (compensating for execution) - out (str): file to put JSON transaction to - force (bool): Do not ask for a confirmation - rpc_endpoint (str): RPC node address - timeout (int): Timeout for the operation (default: 10s) - input_file (str): Input file for the smart contract (*.nef) - manifest (str): Emit contract manifest (*.manifest.json) file into separate - file using configuration input file (*.yml) + wallet: Wallet to use to get the key for transaction signing; + conflicts with wallet_config. + wallet_config: Path to wallet config to use to get the key for transaction signing; + conflicts with wallet. + address: Address to use as transaction signee (and gas source). + gas: Network fee to add to the transaction (prioritizing it). + sysgas: System fee to add to transaction (compensating for execution). + out: File to put JSON transaction to. + force: Do not ask for a confirmation. + rpc_endpoint: RPC node address. + timeout: Timeout for the operation (default: 10s). + input_file: Input file for the smart contract (*.nef). + manifest: Emit contract manifest (*.manifest.json) file into separate file using + configuration input file (*.yml). Returns: - str: Command string - + Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG @@ -96,17 +94,16 @@ class NeoGoContract(CliCommand): config: Optional[str] = None, manifest: Optional[str] = None, ) -> CommandResult: - """Generate wrapper to use in other contracts + """Generate wrapper to use in other contracts. Args: - config (str): Configuration file to use - manifest (str): Read contract manifest (*.manifest.json) file - out (str): Output of the compiled contract - hash (str): Smart-contract hash + config: Configuration file to use. + manifest: Read contract manifest (*.manifest.json) file. + out: Output of the compiled contract. + hash: Smart-contract hash. Returns: - str: Command string - + Command's result. """ return self._execute( "contract generate-wrapper", @@ -133,34 +130,33 @@ class NeoGoContract(CliCommand): rpc_endpoint: Optional[str] = None, timeout: int = 10, ) -> CommandResult: - """Executes given (as a script hash) deployed script with the given method, - arguments and signers. Sender is included in the list of signers by default - with None witness scope. If you'd like to change default sender's scope, - specify it via signers parameter. See testinvokefunction documentation for - the details about parameters. It differs from testinvokefunction in that this - command sends an invocation transaction to the network. + """Executes given (as a script hash) deployed script. + + Script is executed with the given method, arguments and signers. Sender is included in + the list of signers by default with None witness scope. If you'd like to change default + sender's scope, specify it via signers parameter. See testinvokefunction documentation + for the details about parameters. It differs from testinvokefunction in that this command + sends an invocation transaction to the network. Args: - scripthash (str): Function hash - method (str): Call method - arguments (str): Method arguments - multisig_hash (str): Multisig hash - wallet (str): wallet to use to get the key for transaction signing; - conflicts with wallet_config - wallet_config (str): path to wallet config to use to get the key for transaction - signing; conflicts with wallet - address (str): address to use as transaction signee (and gas source) - gas (float): network fee to add to the transaction (prioritizing it) - sysgas (float): system fee to add to transaction (compensating for execution) - out (str): file to put JSON transaction to - force (bool): force-push the transaction in case of bad VM state after - test script invocation - rpc_endpoint (str): RPC node address - timeout (int): Timeout for the operation (default: 10s) + scripthash: Function hash. + method: Call method. + arguments: Method arguments. + multisig_hash: Multisig hash. + wallet: Wallet to use to get the key for transaction signing; + conflicts with wallet_config. + wallet_config: Path to wallet config to use to get the key for transaction signing; + conflicts with wallet. + address: Address to use as transaction signee (and gas source). + gas: Network fee to add to the transaction (prioritizing it). + sysgas: System fee to add to transaction (compensating for execution). + out: File to put JSON transaction to. + force: Force-push the transaction in case of bad VM state after test script invocation. + rpc_endpoint: RPC node address. + timeout: Timeout for the operation (default: 10s). Returns: - str: Command string - + Command's result. """ multisig_hash = f"-- {multisig_hash}" or "" return self._execute( @@ -183,28 +179,27 @@ class NeoGoContract(CliCommand): rpc_endpoint: Optional[str] = None, timeout: int = 10, ) -> CommandResult: - """Executes given (as a script hash) deployed script with the given method, - arguments and signers (sender is not included by default). If no method is given - "" is passed to the script, if no arguments are given, an empty array is - passed, if no signers are given no array is passed. If signers are specified, - the first one of them is treated as a sender. All of the given arguments are - encapsulated into array before invoking the script. The script thus should - follow the regular convention of smart contract arguments (method string and - an array of other arguments). + """Executes given (as a script hash) deployed script. - See more information and samples in `neo-go contract testinvokefunction --help` + Script is executed with the given method, arguments and signers (sender is not included + by default). If no method is given "" is passed to the script, if no arguments are given, + an empty array is passed, if no signers are given no array is passed. If signers are + specified, the first one of them is treated as a sender. All of the given arguments are + encapsulated into array before invoking the script. The script thus should follow the + regular convention of smart contract arguments (method string and an array of other + arguments). + See more information and samples in `neo-go contract testinvokefunction --help`. Args: - scripthash (str): Function hash - method (str): Call method - arguments (str): Method arguments - multisig_hash (str): Multisig hash - rpc_endpoint (str): RPC node address - timeout (int): Timeout for the operation (default: 10s) + scripthash: Function hash. + method: Call method. + arguments: Method arguments. + multisig_hash: Multisig hash. + rpc_endpoint: RPC node address. + timeout: Timeout for the operation (default: 10s). Returns: - str: Command string - + Command's result. """ multisig_hash = f"-- {multisig_hash}" or "" return self._execute( @@ -223,20 +218,18 @@ class NeoGoContract(CliCommand): rpc_endpoint: Optional[str] = None, timeout: int = 10, ) -> CommandResult: - """Executes given compiled AVM instructions in NEF format with the given set of - signers not included sender by default. See testinvokefunction documentation - for the details about parameters. + """Executes given compiled AVM instructions in NEF format. + Instructions are executed with the given set of signers not including sender by default. + See testinvokefunction documentation for the details about parameters. Args: - input_file (str): Input location of the .nef file that needs to be invoked - conflicts with wallet_config - rpc_endpoint (str): RPC node address - timeout (int): Timeout for the operation (default: 10s) + input_file: Input location of the .nef file that needs to be invoked. + rpc_endpoint: RPC node address. + timeout: Timeout for the operation (default: 10s). Returns: - str: Command string - + Command's result. """ return self._execute( f"contract testinvokescript", @@ -247,20 +240,15 @@ class NeoGoContract(CliCommand): }, ) - def init( - self, - name: str, - skip_details: bool = False, - ) -> CommandResult: - """Initialize a new smart-contract in a directory with boiler plate code + def init(self, name: str, skip_details: bool = False) -> CommandResult: + """Initialize a new smart-contract in a directory with boiler plate code. Args: - name (str): name of the smart-contract to be initialized - skip_details (bool): skip filling in the projects and contract details + name: Name of the smart-contract to be initialized. + skip_details: Skip filling in the projects and contract details. Returns: - str: Command string - + Command's result. """ return self._execute( "contract init", @@ -276,15 +264,14 @@ class NeoGoContract(CliCommand): input_file: Optional[str] = None, compile: Optional[str] = None, ) -> CommandResult: - """Creates a user readable dump of the program instructions + """Creates a user readable dump of the program instructions. Args: - input_file (str): input file of the program (either .go or .nef) - compile (str): compile input file (it should be go code then) + input_file: Input file of the program (either .go or .nef). + compile: Compile input file (it should be go code then). Returns: - str: Command string - + Command's result. """ return self._execute( "contract inspect", @@ -301,16 +288,15 @@ class NeoGoContract(CliCommand): manifest: str, sender: Optional[str] = None, ) -> CommandResult: - """Calculates hash of a contract after deployment + """Calculates hash of a contract after deployment. Args: - input_file (str): path to NEF file - sender (str): sender script hash or address - manifest (str): path to manifest file + input_file: Path to NEF file. + sender: Sender script hash or address. + manifest: Path to manifest file. Returns: - str: Command string - + Command's result. """ return self._execute( "contract calc-hash", @@ -330,22 +316,18 @@ class NeoGoContract(CliCommand): sender: Optional[str] = None, nef: Optional[str] = None, ) -> CommandResult: - """Adds group to the manifest + """Adds group to the manifest. Args: - wallet (str): wallet to use to get the key for transaction signing; - conflicts with wallet_config - wallet_config (str): path to wallet config to use to get the key for transaction - signing; conflicts with wallet - sender (str): deploy transaction sender - address (str): account to sign group with - nef (str): path to the NEF file - manifest (str): path to the manifest - + wallet: Wallet to use to get the key for transaction signing; conflicts with wallet_config. + wallet_config: Path to wallet config to use to get the key for transaction signing; conflicts with wallet. + sender: Deploy transaction sender. + address: Account to sign group with. + nef: Path to the NEF file. + manifest: Path to the manifest. Returns: - str: Command string - + Command's result. """ return self._execute( "contract manifest add-group", diff --git a/src/neofs_testlib/cli/neogo/db.py b/src/neofs_testlib/cli/neogo/db.py index 05bece2..ae3185d 100644 --- a/src/neofs_testlib/cli/neogo/db.py +++ b/src/neofs_testlib/cli/neogo/db.py @@ -14,19 +14,17 @@ class NeoGoDb(CliCommand): count: int = 0, start: int = 0, ) -> CommandResult: - """Dump blocks (starting with block #1) to the file + """Dump blocks (starting with block #1) to the file. Args: - config_path (str): path to config - network (NetworkType): Select network type (default: private) - count (int): number of blocks to be processed (default or 0: all chain) - (default: 0) - start (int): block number to start from (default: 0) (default: 0) - out (srt): Output file (stdout if not given) + config_path: Path to config. + network: Select network type (default: private). + count: Number of blocks to be processed (default or 0: all chain) (default: 0). + start: Block number to start from (default: 0) (default: 0). + out: Output file (stdout if not given). Returns: - str: Command string - + Command's result. """ return self._execute( "db dump", @@ -47,20 +45,18 @@ class NeoGoDb(CliCommand): dump: Optional[str] = None, incremental: bool = False, ) -> CommandResult: - """Dump blocks (starting with block #1) to the file + """Dump blocks (starting with block #1) to the file. Args: - config_path (str): path to config - network (NetworkType): Select network type (default: private) - count (int): number of blocks to be processed (default or 0: all chain) - (default: 0) - input_file (str): Input file (stdin if not given) - dump (str): directory for storing JSON dumps - incremental (bool): use if dump is incremental + config_path: Path to config. + network: Select network type (default: private). + count: Number of blocks to be processed (default or 0: all chain) (default: 0). + input_file: Input file (stdin if not given). + dump: Directory for storing JSON dumps. + incremental: Use if dump is incremental. Returns: - str: Command string - + Command's result. """ return self._execute( "db restore", diff --git a/src/neofs_testlib/cli/neogo/nep17.py b/src/neofs_testlib/cli/neogo/nep17.py index 8d89c25..edd65eb 100644 --- a/src/neofs_testlib/cli/neogo/nep17.py +++ b/src/neofs_testlib/cli/neogo/nep17.py @@ -14,21 +14,19 @@ class NeoGoNep17(CliCommand): wallet_config: Optional[str] = None, timeout: int = 10, ) -> CommandResult: - """Get address balance + """Get address balance. Args: - address (str): Address to use - wallet (str): Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config (str): Target location of the wallet config file; - conflicts with --wallet flag. - token (str): Token to use (hash or name (for NEO/GAS or imported tokens)) - rpc_endpoint (str): RPC node address - timeout (int): Timeout for the operation (default: 10s) + address: Address to use. + wallet: Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config: Target location of the wallet config file; conflicts with --wallet flag. + token: Token to use (hash or name (for NEO/GAS or imported tokens)). + rpc_endpoint: RPC node address. + timeout: Timeout for the operation (default: 10s). Returns: - str: Command string - + Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG @@ -50,21 +48,19 @@ class NeoGoNep17(CliCommand): rpc_endpoint: Optional[str] = None, timeout: int = 10, ) -> CommandResult: - """import NEP-17 token to a wallet + """Import NEP-17 token to a wallet. Args: - address (str): Token contract address or hash in LE - wallet (str): Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config (str): Target location of the wallet config file; - conflicts with --wallet flag. - token (str): Token to use (hash or name (for NEO/GAS or imported tokens)) - rpc_endpoint (str): RPC node address - timeout (int): Timeout for the operation (default: 10s) + address: Token contract address or hash in LE. + wallet: Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config: Target location of the wallet config file; conflicts with --wallet flag. + token: Token to use (hash or name (for NEO/GAS or imported tokens)). + rpc_endpoint: RPC node address. + timeout: Timeout for the operation (default: 10s). Returns: - str: Command string - + Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG @@ -83,18 +79,16 @@ class NeoGoNep17(CliCommand): wallet: Optional[str] = None, wallet_config: Optional[str] = None, ) -> CommandResult: - """print imported NEP-17 token info + """Print imported NEP-17 token info. Args: - wallet (str): Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config (str): Target location of the wallet config file; - conflicts with --wallet flag. - token (str): Token to use (hash or name (for NEO/GAS or imported tokens)) + wallet: Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config: Target location of the wallet config file; conflicts with --wallet flag. + token: Token to use (hash or name (for NEO/GAS or imported tokens)). Returns: - str: Command string - + Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG @@ -114,19 +108,17 @@ class NeoGoNep17(CliCommand): wallet_config: Optional[str] = None, force: bool = False, ) -> CommandResult: - """remove NEP-17 token from the wallet + """Remove NEP-17 token from the wallet. Args: - wallet (str): Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config (str): Target location of the wallet config file; - conflicts with --wallet flag. - token (str): Token to use (hash or name (for NEO/GAS or imported tokens)) - force (bool): Do not ask for a confirmation + wallet: Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config: Target location of the wallet config file; conflicts with --wallet flag. + token: Token to use (hash or name (for NEO/GAS or imported tokens)). + force: Do not ask for a confirmation. Returns: - str: Command string - + Command's result. """ return self._execute( "wallet nep17 remove", @@ -152,33 +144,32 @@ class NeoGoNep17(CliCommand): amount: float = 0, timeout: int = 10, ) -> CommandResult: - """Transfers specified NEP-17 token amount with optional 'data' parameter and cosigners - list attached to the transfer. See 'contract testinvokefunction' documentation - for the details about 'data' parameter and cosigners syntax. If no 'data' is - given then default nil value will be used. If no cosigners are given then the - sender with CalledByEntry scope will be used as the only signer. + """Transfers specified NEP-17 token amount. + + Transfer is executed with optional 'data' parameter and cosigners list attached to the + transfer. See 'contract testinvokefunction' documentation for the details about 'data' + parameter and cosigners syntax. If no 'data' is given then default nil value will be used. + If no cosigners are given then the sender with CalledByEntry scope will be used as the only + signer. Args: - wallet (str): Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config (str): Target location of the wallet config file; - conflicts with --wallet flag. - out (str): file to put JSON transaction to - from_address (str): Address to send an asset from - to_address (str): Address to send an asset to - token (str): Token to use (hash or name (for NEO/GAS or imported tokens)) - force (bool): Do not ask for a confirmation - gas (float): network fee to add to the transaction (prioritizing it) - sysgas (float): system fee to add to transaction (compensating for execution) - force (bool): Do not ask for a confirmation - amount (float) Amount of asset to send - rpc_endpoint (str): RPC node address - timeout (int): Timeout for the operation (default: 10s) - + wallet: Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config: Target location of the wallet config file; conflicts with --wallet flag. + out: File to put JSON transaction to. + from_address: Address to send an asset from. + to_address: Address to send an asset to. + token: Token to use (hash or name (for NEO/GAS or imported tokens)). + force: Do not ask for a confirmation. + gas: Network fee to add to the transaction (prioritizing it). + sysgas: System fee to add to transaction (compensating for execution). + force: Do not ask for a confirmation. + amount: Amount of asset to send. + rpc_endpoint: RPC node address. + timeout: Timeout for the operation (default: 10s). Returns: - str: Command string - + Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG @@ -206,29 +197,26 @@ class NeoGoNep17(CliCommand): amount: float = 0, timeout: int = 10, ) -> CommandResult: - """transfer NEP-17 tokens to multiple recipients + """Transfer NEP-17 tokens to multiple recipients. Args: - wallet (str): Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config (str): Target location of the wallet config file; - conflicts with --wallet flag. - out (str): file to put JSON transaction to - from_address (str): Address to send an asset from - to_address (str): Address to send an asset to - token (str): Token to use (hash or name (for NEO/GAS or imported tokens)) - force (bool): Do not ask for a confirmation - gas (float): network fee to add to the transaction (prioritizing it) - sysgas (float): system fee to add to transaction (compensating for execution) - force (bool): Do not ask for a confirmation - amount (float) Amount of asset to send - rpc_endpoint (str): RPC node address - timeout (int): Timeout for the operation (default: 10s) - + wallet: Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config: Target location of the wallet config file; conflicts with --wallet flag. + out: File to put JSON transaction to. + from_address: Address to send an asset from. + to_address: Address to send an asset to. + token: Token to use (hash or name (for NEO/GAS or imported tokens)). + force: Do not ask for a confirmation. + gas: Network fee to add to the transaction (prioritizing it). + sysgas: System fee to add to transaction (compensating for execution). + force: Do not ask for a confirmation. + amount: Amount of asset to send. + rpc_endpoint: RPC node address. + timeout: Timeout for the operation (default: 10s). Returns: - str: Command string - + Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG diff --git a/src/neofs_testlib/cli/neogo/node.py b/src/neofs_testlib/cli/neogo/node.py index 0d79561..8fe4d28 100644 --- a/src/neofs_testlib/cli/neogo/node.py +++ b/src/neofs_testlib/cli/neogo/node.py @@ -5,13 +5,12 @@ from neofs_testlib.shell import CommandResult class NeoGoNode(CliCommand): def start(self, network: NetworkType = NetworkType.PRIVATE) -> CommandResult: - """Start a NEO node + """Start a NEO node. Args: - network (NetworkType): Select network type (default: private) + network: Select network type (default: private). Returns: - str: Command string - + Command's result. """ return self._execute("start", **{network.value: True}) diff --git a/src/neofs_testlib/cli/neogo/query.py b/src/neofs_testlib/cli/neogo/query.py index 1567026..6d93799 100644 --- a/src/neofs_testlib/cli/neogo/query.py +++ b/src/neofs_testlib/cli/neogo/query.py @@ -3,20 +3,15 @@ from neofs_testlib.shell import CommandResult class NeoGoQuery(CliCommand): - def candidates( - self, - rpc_endpoint: str, - timeout: int = 10, - ) -> CommandResult: - """Get candidates and votes + def candidates(self, rpc_endpoint: str, timeout: int = 10) -> CommandResult: + """Get candidates and votes. Args: - rpc_endpoint (str): RPC node address - timeout (int): Timeout for the operation (default: 10s) + rpc_endpoint: RPC node address. + timeout: Timeout for the operation (default: 10s). Returns: - str: Command string - + Command's result. """ return self._execute( "query candidates", @@ -27,20 +22,15 @@ class NeoGoQuery(CliCommand): }, ) - def committee( - self, - rpc_endpoint: str, - timeout: int = 10, - ) -> CommandResult: - """Get committee list + def committee(self, rpc_endpoint: str, timeout: int = 10) -> CommandResult: + """Get committee list. Args: - rpc_endpoint (str): RPC node address - timeout (int): Timeout for the operation (default: 10s) + rpc_endpoint: RPC node address. + timeout: Timeout for the operation (default: 10s). Returns: - str: Command string - + Command's result. """ return self._execute( "query committee", @@ -51,20 +41,15 @@ class NeoGoQuery(CliCommand): }, ) - def height( - self, - rpc_endpoint: str, - timeout: int = 10, - ) -> CommandResult: - """Get node height + def height(self, rpc_endpoint: str, timeout: int = 10) -> CommandResult: + """Get node height. Args: - rpc_endpoint (str): RPC node address - timeout (int): Timeout for the operation (default: 10s) + rpc_endpoint: RPC node address. + timeout: Timeout for the operation (default: 10s). Returns: - str: Command string - + Command's result. """ return self._execute( "query height", @@ -75,22 +60,16 @@ class NeoGoQuery(CliCommand): }, ) - def tx( - self, - tx_hash: str, - rpc_endpoint: str, - timeout: int = 10, - ) -> CommandResult: - """Query transaction status + def tx(self, tx_hash: str, rpc_endpoint: str, timeout: int = 10) -> CommandResult: + """Query transaction status. Args: - tx_hash (str): Hash of transaction - rpc_endpoint (str): RPC node address - timeout (int): Timeout for the operation (default: 10s) + tx_hash: Hash of transaction. + rpc_endpoint: RPC node address. + timeout: Timeout for the operation (default: 10s). Returns: - str: Command string - + Command's result. """ return self._execute( f"query tx {tx_hash}", @@ -101,20 +80,15 @@ class NeoGoQuery(CliCommand): }, ) - def voter( - self, - rpc_endpoint: str, - timeout: int = 10, - ) -> CommandResult: - """Print NEO holder account state + def voter(self, rpc_endpoint: str, timeout: int = 10) -> CommandResult: + """Print NEO holder account state. Args: - rpc_endpoint (str): RPC node address - timeout (int): Timeout for the operation (default: 10s) + rpc_endpoint: RPC node address. + timeout: Timeout for the operation (default: 10s). Returns: - str: Command string - + Command's result. """ return self._execute( "query voter", diff --git a/src/neofs_testlib/cli/neogo/version.py b/src/neofs_testlib/cli/neogo/version.py index 18f52bd..0d56bdb 100644 --- a/src/neofs_testlib/cli/neogo/version.py +++ b/src/neofs_testlib/cli/neogo/version.py @@ -4,10 +4,9 @@ from neofs_testlib.shell import CommandResult class NeoGoVersion(CliCommand): def get(self) -> CommandResult: - """Application version + """Application version. Returns: - str: Command string - + Command's result. """ return self._execute("", version=True) diff --git a/src/neofs_testlib/cli/neogo/wallet.py b/src/neofs_testlib/cli/neogo/wallet.py index c5cf012..e327fb5 100644 --- a/src/neofs_testlib/cli/neogo/wallet.py +++ b/src/neofs_testlib/cli/neogo/wallet.py @@ -13,20 +13,18 @@ class NeoGoWallet(CliCommand): wallet_config: Optional[str] = None, timeout: int = 10, ) -> CommandResult: - """claim GAS + """Claim GAS. Args: - address (str): Address to claim GAS for - wallet (str): Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config (str): Target location of the wallet config file; - conflicts with --wallet flag. - rpc_endpoint (str): RPC node address - timeout (int): Timeout for the operation (default: 10s) + address: Address to claim GAS for. + wallet: Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config: Target location of the wallet config file; conflicts with --wallet flag. + rpc_endpoint: RPC node address. + timeout: Timeout for the operation (default: 10s). Returns: - str: Command string - + Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG @@ -45,18 +43,16 @@ class NeoGoWallet(CliCommand): wallet_config: Optional[str] = None, account: bool = False, ) -> CommandResult: - """create a new wallet + """Create a new wallet. Args: - wallet (str): Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config (str): Target location of the wallet config file; - conflicts with --wallet flag. - account (bool): Create a new account + wallet: Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config: Target location of the wallet config file; conflicts with --wallet flag. + account: Create a new account. Returns: - str: Command string - + Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG @@ -75,18 +71,16 @@ class NeoGoWallet(CliCommand): wallet: Optional[str] = None, wallet_config: Optional[str] = None, ) -> CommandResult: - """convert addresses from existing NEO2 NEP6-wallet to NEO3 format + """Convert addresses from existing NEO2 NEP6-wallet to NEO3 format. Args: - wallet (str): Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config (str): Target location of the wallet config file; - conflicts with --wallet flag. - out (str): where to write converted wallet + wallet: Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config: Target location of the wallet config file; conflicts with --wallet flag. + out: Where to write converted wallet. Returns: - str: Command string - + Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG @@ -104,17 +98,15 @@ class NeoGoWallet(CliCommand): wallet: Optional[str] = None, wallet_config: Optional[str] = None, ) -> CommandResult: - """add an account to the existing wallet + """Add an account to the existing wallet. Args: - wallet (str): Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config (str): Target location of the wallet config file; - conflicts with --wallet flag. + wallet: Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config: Target location of the wallet config file; conflicts with --wallet flag. Returns: - str: Command string - + Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG @@ -133,18 +125,16 @@ class NeoGoWallet(CliCommand): wallet_config: Optional[str] = None, decrypt: bool = False, ) -> CommandResult: - """check and dump an existing NEO wallet + """Check and dump an existing NEO wallet. Args: - wallet (str): Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config (str): Target location of the wallet config file; - conflicts with --wallet flag. - decrypt (bool): Decrypt encrypted keys. + wallet: Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config: Target location of the wallet config file; conflicts with --wallet flag. + decrypt: Decrypt encrypted keys. Returns: - str: Command string - + Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG @@ -163,18 +153,16 @@ class NeoGoWallet(CliCommand): wallet: Optional[str] = None, wallet_config: Optional[str] = None, ) -> CommandResult: - """check and dump an existing NEO wallet + """Check and dump an existing NEO wallet. Args: - wallet (str): Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config (str): Target location of the wallet config file; - conflicts with --wallet flag. - address (str): address to print public keys for + wallet: Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config: Target location of the wallet config file; conflicts with --wallet flag. + address: Address to print public keys for. Returns: - str: Command string - + Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG @@ -193,18 +181,16 @@ class NeoGoWallet(CliCommand): wallet_config: Optional[str] = None, decrypt: bool = False, ) -> CommandResult: - """export keys for address + """Export keys for address. Args: - wallet (str): Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config (str): Target location of the wallet config file; - conflicts with --wallet flag. - decrypt (bool): Decrypt encrypted keys. + wallet: Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config: Target location of the wallet config file; conflicts with --wallet flag. + decrypt: Decrypt encrypted keys. Returns: - str: Command string - + Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG @@ -225,20 +211,18 @@ class NeoGoWallet(CliCommand): wallet: Optional[str] = None, wallet_config: Optional[str] = None, ) -> CommandResult: - """import WIF of a standard signature contract + """Import WIF of a standard signature contract. Args: - wallet (str): Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config (str): Target location of the wallet config file; - conflicts with --wallet flag. - wif (str): WIF to import - name (str): Optional account name - contract (str): Verification script for custom contracts + wallet: Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config: Target location of the wallet config file; conflicts with --wallet flag. + wif: WIF to import. + name: Optional account name. + contract: Verification script for custom contracts. Returns: - str: Command string - + Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG @@ -259,20 +243,18 @@ class NeoGoWallet(CliCommand): wallet: Optional[str] = None, wallet_config: Optional[str] = None, ) -> CommandResult: - """import multisig contract + """Import multisig contract. Args: - wallet (str): Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config (str): Target location of the wallet config file; - conflicts with --wallet flag. - wif (str): WIF to import - name (str): Optional account name - min_number (int): Minimal number of signatures (default: 0) + wallet: Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config: Target location of the wallet config file; conflicts with --wallet flag. + wif: WIF to import. + name: Optional account name. + min_number: Minimal number of signatures (default: 0). Returns: - str: Command string - + Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG @@ -295,22 +277,20 @@ class NeoGoWallet(CliCommand): contract: Optional[str] = None, timeout: int = 10, ) -> CommandResult: - """import multisig contract + """Import deployed contract. Args: - wallet (str): Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config (str): Target location of the wallet config file; - conflicts with --wallet flag. - wif (str): WIF to import - name (str): Optional account name - contract (str): Contract hash or address - rpc_endpoint (str): RPC node address - timeout (int): Timeout for the operation (default: 10s) + wallet: Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config: Target location of the wallet config file; conflicts with --wallet flag. + wif: WIF to import. + name: Optional account name. + contract: Contract hash or address. + rpc_endpoint: RPC node address. + timeout: Timeout for the operation (default: 10s). Returns: - str: Command string - + Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG @@ -330,19 +310,17 @@ class NeoGoWallet(CliCommand): wallet_config: Optional[str] = None, force: bool = False, ) -> CommandResult: - """check and dump an existing NEO wallet + """Remove an account from the wallet. Args: - wallet (str): Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config (str): Target location of the wallet config file; - conflicts with --wallet flag. - address (str): Account address or hash in LE form to be removed - force (bool): Do not ask for a confirmation + wallet: Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config: Target location of the wallet config file; conflicts with --wallet flag. + address: Account address or hash in LE form to be removed. + force: Do not ask for a confirmation. Returns: - str: Command string - + Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG @@ -365,22 +343,27 @@ class NeoGoWallet(CliCommand): out: Optional[str] = None, timeout: int = 10, ) -> CommandResult: - """import multisig contract + """Cosign transaction with multisig/contract/additional account. + + Signs the given (in the input file) context (which must be a transaction signing context) + for the given address using the given wallet. This command can output the resulting JSON + (with additional signature added) right to the console (if no output file and no RPC + endpoint specified) or into a file (which can be the same as input one). If an RPC endpoint + is given it'll also try to construct a complete transaction and send it via RPC (printing + its hash if everything is OK). Args: - wallet (str): Target location of the wallet file ('-' to read from stdin); - conflicts with --wallet-config flag. - wallet_config (str): Target location of the wallet config file; - conflicts with --wallet flag. - out (str): file to put JSON transaction to - input_file (str): file with JSON transaction - address (str): Address to use - rpc_endpoint (str): RPC node address - timeout (int): Timeout for the operation (default: 10s) + wallet: Target location of the wallet file ('-' to read from stdin); + conflicts with --wallet-config flag. + wallet_config: Target location of the wallet config file; conflicts with --wallet flag. + out: File to put JSON transaction to. + input_file: File with JSON transaction. + address: Address to use. + rpc_endpoint: RPC node address. + timeout: Timeout for the operation (default: 10s). Returns: - str: Command string - + Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG diff --git a/src/neofs_testlib/reporter/dummy_reporter.py b/src/neofs_testlib/reporter/dummy_reporter.py index 1d8cfde..9ca206b 100644 --- a/src/neofs_testlib/reporter/dummy_reporter.py +++ b/src/neofs_testlib/reporter/dummy_reporter.py @@ -10,9 +10,7 @@ def _dummy_step(): class DummyReporter(Reporter): - """ - Dummy implementation of reporter, does not store artifacts anywhere. - """ + """Dummy implementation of reporter, does not store artifacts anywhere.""" def step(self, name: str) -> AbstractContextManager: return _dummy_step() diff --git a/src/neofs_testlib/reporter/interfaces.py b/src/neofs_testlib/reporter/interfaces.py index 347f71f..5343678 100644 --- a/src/neofs_testlib/reporter/interfaces.py +++ b/src/neofs_testlib/reporter/interfaces.py @@ -4,25 +4,25 @@ from typing import Any class Reporter(ABC): - """ - Interface that supports storage of test artifacts in some reporting tool. - """ + """Interface that supports storage of test artifacts in some reporting tool.""" @abstractmethod def step(self, name: str) -> AbstractContextManager: - """ - Register a new step in test execution. + """Register a new step in test execution. - :param str name: Name of the step - :return: step context + Args: + name: Name of the step. + + Returns: + Step context. """ @abstractmethod def attach(self, content: Any, file_name: str) -> None: - """ - Attach specified content with given file name to the test report. + """Attach specified content with given file name to the test report. - :param any content: content to attach. If content value is not a string, it will be - converted to a string. - :param str file_name: file name of attachment. + Args: + content: Content to attach. If content value is not a string, it will be + converted to a string. + file_name: File name of attachment. """ diff --git a/src/neofs_testlib/shell/interfaces.py b/src/neofs_testlib/shell/interfaces.py index 97ba7cc..4d6e8ac 100644 --- a/src/neofs_testlib/shell/interfaces.py +++ b/src/neofs_testlib/shell/interfaces.py @@ -5,11 +5,11 @@ from typing import Optional @dataclass class InteractiveInput: - """ - Interactive input for a shell command. + """Interactive input for a shell command. - :attr str prompt_pattern: regular expression that defines expected prompt from the command. - :attr str input: user input that should be supplied to the command in response to the prompt. + Attributes: + prompt_pattern: regular expression that defines expected prompt from the command. + input: user input that should be supplied to the command in response to the prompt. """ prompt_pattern: str @@ -18,14 +18,14 @@ class InteractiveInput: @dataclass class CommandOptions: - """ - Options that control command execution. + """Options that control command execution. - :attr list interactive_inputs: user inputs that should be interactively supplied to - the command during execution. - :attr int timeout: timeout for command execution (in seconds). - :attr bool check: controls whether to check return code of the command. Set to False to - ignore non-zero return codes. + Attributes: + interactive_inputs: user inputs that should be interactively supplied to + the command during execution. + timeout: timeout for command execution (in seconds). + check: controls whether to check return code of the command. Set to False to + ignore non-zero return codes. """ interactive_inputs: Optional[list[InteractiveInput]] = None @@ -35,8 +35,12 @@ class CommandOptions: @dataclass class CommandResult: - """ - Represents a result of a command executed via shell. + """Represents a result of a command executed via shell. + + Attributes: + stdout: complete content of stdout stream. + stderr: complete content of stderr stream. + return_code: return code (or exit code) of the command's process. """ stdout: str @@ -45,17 +49,18 @@ class CommandResult: class Shell(ABC): - """ - Interface of a command shell on some system (local or remote). - """ + """Interface of a command shell on some system (local or remote).""" @abstractmethod def exec(self, command: str, options: Optional[CommandOptions] = None) -> CommandResult: - """ - Executes specified command on this shell. To execute interactive command, user inputs - should be specified in *options*. + """Executes specified command on this shell. - :param str command: command to execute on the shell. - :param CommandOptions options: options that control command execution. - :return command result. + To execute interactive command, user inputs should be specified in *options*. + + Args: + command: Command to execute on the shell. + options: Options that control command execution. + + Returns: + Command's result. """ diff --git a/src/neofs_testlib/shell/local_shell.py b/src/neofs_testlib/shell/local_shell.py index 0b8681a..a329990 100644 --- a/src/neofs_testlib/shell/local_shell.py +++ b/src/neofs_testlib/shell/local_shell.py @@ -14,9 +14,7 @@ reporter = get_reporter() class LocalShell(Shell): - """ - Implements command shell on a local machine. - """ + """Implements command shell on a local machine.""" def exec(self, command: str, options: Optional[CommandOptions] = None) -> CommandResult: # If no options were provided, use default options @@ -122,7 +120,8 @@ class LocalShell(Shell): def _get_pexpect_process_result( self, command_process: Optional[pexpect.spawn], command: str ) -> CommandResult: - """ + """Captures output of the process. + If command process is not None, captures output of this process. If command process is None, then command fails when we attempt to start it, in this case we use regular non-interactive process to get it's output. diff --git a/src/neofs_testlib/shell/ssh_shell.py b/src/neofs_testlib/shell/ssh_shell.py index 6ed4f7a..967fbbf 100644 --- a/src/neofs_testlib/shell/ssh_shell.py +++ b/src/neofs_testlib/shell/ssh_shell.py @@ -26,7 +26,7 @@ reporter = get_reporter() class HostIsNotAvailable(Exception): - """Raised when host is not reachable via SSH connection""" + """Raised when host is not reachable via SSH connection.""" def __init__(self, host: str = None): msg = f"Host {host} is not available" @@ -63,8 +63,7 @@ def log_command(func): @lru_cache def _load_private_key(file_path: str, password: Optional[str]) -> PKey: - """ - Loads private key from specified file. + """Loads private key from specified file. We support several type formats, however paramiko doesn't provide functionality to determine key type in advance. So we attempt to load file with each of the supported formats and then @@ -81,9 +80,7 @@ def _load_private_key(file_path: str, password: Optional[str]) -> PKey: class SSHShell(Shell): - """ - Implements command shell on a remote machine via SSH connection. - """ + """Implements command shell on a remote machine via SSH connection.""" # Time in seconds to delay after remote command has completed. The delay is required # to allow remote command to flush its output buffer diff --git a/tests/helpers.py b/tests/helpers.py index 1cb393a..8ee11b0 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -4,12 +4,15 @@ from neofs_testlib.shell.interfaces import CommandResult def format_error_details(error: Exception) -> str: - """ - Converts specified exception instance into a string that includes error message - and full stack trace. + """Converts specified exception instance into a string. - :param Exception error: exception to convert. - :return: string containing exception details. + The resulting string includes error message and the full stack trace. + + Args: + error: Exception to convert. + + Returns: + String containing exception details. """ detail_lines = traceback.format_exception( etype=type(error), @@ -20,11 +23,14 @@ def format_error_details(error: Exception) -> str: def get_output_lines(result: CommandResult) -> list[str]: - """ - Converts output of specified command result into separate lines trimmed from whitespaces. - Empty lines are excluded. + """Converts output of specified command result into separate lines. - :param CommandResult result: result which output should be converted. - :return: list of lines extracted from the output. + Whitespaces are trimmed, empty lines are excluded. + + Args: + result: Command result which output should be converted. + + Returns: + List of lines extracted from the output. """ return [line.strip() for line in result.stdout.split("\n") if line.strip()] From 655a86a5b06c390d398a3e1b94eb84e819e77be3 Mon Sep 17 00:00:00 2001 From: Vladimir Avdeev Date: Thu, 6 Oct 2022 10:38:48 +0300 Subject: [PATCH 011/363] Implement neofs-cli lib Signed-off-by: Vladimir Avdeev --- src/neofs_testlib/cli/__init__.py | 7 +- src/neofs_testlib/cli/neofs_cli/__init__.py | 1 + src/neofs_testlib/cli/neofs_cli/accounting.py | 30 ++ src/neofs_testlib/cli/neofs_cli/acl.py | 52 +++ src/neofs_testlib/cli/neofs_cli/cli.py | 26 ++ src/neofs_testlib/cli/neofs_cli/container.py | 250 +++++++++++++ src/neofs_testlib/cli/neofs_cli/netmap.py | 120 +++++++ src/neofs_testlib/cli/neofs_cli/object.py | 331 ++++++++++++++++++ src/neofs_testlib/cli/neofs_cli/version.py | 13 + 9 files changed, 827 insertions(+), 3 deletions(-) create mode 100644 src/neofs_testlib/cli/neofs_cli/__init__.py create mode 100644 src/neofs_testlib/cli/neofs_cli/accounting.py create mode 100644 src/neofs_testlib/cli/neofs_cli/acl.py create mode 100644 src/neofs_testlib/cli/neofs_cli/cli.py create mode 100644 src/neofs_testlib/cli/neofs_cli/container.py create mode 100644 src/neofs_testlib/cli/neofs_cli/netmap.py create mode 100644 src/neofs_testlib/cli/neofs_cli/object.py create mode 100644 src/neofs_testlib/cli/neofs_cli/version.py diff --git a/src/neofs_testlib/cli/__init__.py b/src/neofs_testlib/cli/__init__.py index 27ffbc2..63cd5bc 100644 --- a/src/neofs_testlib/cli/__init__.py +++ b/src/neofs_testlib/cli/__init__.py @@ -1,3 +1,4 @@ -from neofs_testlib.cli.neofs_adm.adm import NeofsAdm -from neofs_testlib.cli.neofs_authmate.authmate import NeofsAuthmate -from neofs_testlib.cli.neogo.go import NeoGo +from neofs_testlib.cli.neofs_adm import NeofsAdm +from neofs_testlib.cli.neofs_authmate import NeofsAuthmate +from neofs_testlib.cli.neofs_cli import NeofsCli +from neofs_testlib.cli.neogo import NeoGo, NetworkType diff --git a/src/neofs_testlib/cli/neofs_cli/__init__.py b/src/neofs_testlib/cli/neofs_cli/__init__.py new file mode 100644 index 0000000..9911fe2 --- /dev/null +++ b/src/neofs_testlib/cli/neofs_cli/__init__.py @@ -0,0 +1 @@ +from neofs_testlib.cli.neofs_cli.cli import NeofsCli diff --git a/src/neofs_testlib/cli/neofs_cli/accounting.py b/src/neofs_testlib/cli/neofs_cli/accounting.py new file mode 100644 index 0000000..b8bdcc2 --- /dev/null +++ b/src/neofs_testlib/cli/neofs_cli/accounting.py @@ -0,0 +1,30 @@ +from typing import Optional + +from neofs_testlib.cli.cli_command import CliCommand +from neofs_testlib.shell import CommandResult + + +class NeofsCliAccounting(CliCommand): + def balance( + self, + wallet: Optional[str] = None, + rpc_endpoint: Optional[str] = None, + address: Optional[str] = None, + owner: Optional[str] = None, + ) -> CommandResult: + """Get internal balance of NeoFS account + + Args: + address: Address of wallet account. + owner: Owner of balance account (omit to use owner from private key). + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + + Returns: + Command's result. + + """ + return self._execute( + "accounting balance", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/neofs_testlib/cli/neofs_cli/acl.py b/src/neofs_testlib/cli/neofs_cli/acl.py new file mode 100644 index 0000000..47f86cb --- /dev/null +++ b/src/neofs_testlib/cli/neofs_cli/acl.py @@ -0,0 +1,52 @@ +from typing import Optional + +from neofs_testlib.cli.cli_command import CliCommand +from neofs_testlib.shell import CommandResult + + +class NeofsCliACL(CliCommand): + def extended_create( + self, cid: str, out: str, file: Optional[str] = None, rule: Optional[list] = None + ) -> CommandResult: + + """Create extended ACL from the text representation. + + Rule consist of these blocks: [ ...] [ ...] + Action is 'allow' or 'deny'. + Operation is an object service verb: 'get', 'head', 'put', 'search', 'delete', 'getrange', + or 'getrangehash'. + + Filter consists of : + Typ is 'obj' for object applied filter or 'req' for request applied filter. + Key is a valid unicode string corresponding to object or request header key. + Well-known system object headers start with '$Object:' prefix. + User defined headers start without prefix. + Read more about filter keys at: + http://github.com/nspcc-dev/neofs-api/blob/master/proto-docs/acl.md#message-eaclrecordfilter + Match is '=' for matching and '!=' for non-matching filter. + Value is a valid unicode string corresponding to object or request header value. + + Target is + 'user' for container owner, + 'system' for Storage nodes in container and Inner Ring nodes, + 'others' for all other request senders, + 'pubkey:,,...' for exact request sender, where is a hex-encoded 33-byte + public key. + + When both '--rule' and '--file' arguments are used, '--rule' records will be placed higher + in resulting extended ACL table. + + Args: + cid: Container ID. + file: Read list of extended ACL table records from from text file. + out: Save JSON formatted extended ACL table in file. + rule: Extended ACL table record to apply. + + Returns: + Command's result. + + """ + return self._execute( + "acl extended create", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/neofs_testlib/cli/neofs_cli/cli.py b/src/neofs_testlib/cli/neofs_cli/cli.py new file mode 100644 index 0000000..c65d86f --- /dev/null +++ b/src/neofs_testlib/cli/neofs_cli/cli.py @@ -0,0 +1,26 @@ +from typing import Optional + +from neofs_testlib.cli.neofs_cli.accounting import NeofsCliAccounting +from neofs_testlib.cli.neofs_cli.acl import NeofsCliACL +from neofs_testlib.cli.neofs_cli.container import NeofsCliContainer +from neofs_testlib.cli.neofs_cli.netmap import NeofsCliNetmap +from neofs_testlib.cli.neofs_cli.object import NeofsCliObject +from neofs_testlib.cli.neofs_cli.version import NeofsCliVersion +from neofs_testlib.shell import Shell + + +class NeofsCli: + accounting: Optional[NeofsCliAccounting] = None + acl: Optional[NeofsCliACL] = None + container: Optional[NeofsCliContainer] = None + netmap: Optional[NeofsCliNetmap] = None + object: Optional[NeofsCliObject] = None + version: Optional[NeofsCliVersion] = None + + def __init__(self, shell: Shell, neofs_cli_exec_path: str, config_file: Optional[str] = None): + self.accounting = NeofsCliAccounting(shell, neofs_cli_exec_path, config=config_file) + self.acl = NeofsCliACL(shell, neofs_cli_exec_path, config=config_file) + self.container = NeofsCliContainer(shell, neofs_cli_exec_path, config=config_file) + self.netmap = NeofsCliNetmap(shell, neofs_cli_exec_path, config=config_file) + self.object = NeofsCliObject(shell, neofs_cli_exec_path, config=config_file) + self.version = NeofsCliVersion(shell, neofs_cli_exec_path, config=config_file) diff --git a/src/neofs_testlib/cli/neofs_cli/container.py b/src/neofs_testlib/cli/neofs_cli/container.py new file mode 100644 index 0000000..270d820 --- /dev/null +++ b/src/neofs_testlib/cli/neofs_cli/container.py @@ -0,0 +1,250 @@ +from typing import Optional + +from neofs_testlib.cli.cli_command import CliCommand +from neofs_testlib.shell import CommandResult + + +class NeofsCliContainer(CliCommand): + def create( + self, + rpc_endpoint: str, + wallet: str, + address: Optional[str] = None, + attributes: Optional[dict] = None, + basic_acl: Optional[str] = None, + await_mode: bool = False, + disable_timestamp: bool = False, + name: Optional[str] = None, + nonce: Optional[str] = None, + policy: Optional[str] = None, + session: Optional[str] = None, + subnet: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + ) -> CommandResult: + """ + Create a new container and register it in the NeoFS. + It will be stored in the sidechain when the Inner Ring accepts it. + + Args: + address: Address of wallet account. + attributes: Comma separated pairs of container attributes in form of + Key1=Value1,Key2=Value2. + await_mode: Block execution until container is persisted. + basic_acl: Hex encoded basic ACL value or keywords like 'public-read-write', + 'private', 'eacl-public-read' (default "private"). + disable_timestamp: Disable timestamp container attribute. + name: Container name attribute. + nonce: UUIDv4 nonce value for container. + policy: QL-encoded or JSON-encoded placement policy or path to file with it. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + session: Path to a JSON-encoded container session token. + subnet: String representation of container subnetwork. + ttl: TTL value in request meta header (default 2). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Request X-Headers in form of Key=Value. + + Returns: + Command's result. + """ + return self._execute( + "container create", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def delete( + self, + rpc_endpoint: str, + wallet: str, + cid: str, + address: Optional[str] = None, + await_mode: bool = False, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + force: bool = False, + ) -> CommandResult: + """ + Delete an existing container. + Only the owner of the container has permission to remove the container. + + Args: + address: Address of wallet account. + await_mode: Block execution until container is removed. + cid: Container ID. + force: Do not check whether container contains locks and remove immediately. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + session: Path to a JSON-encoded container session token. + ttl: TTL value in request meta header (default 2). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Request X-Headers in form of Key=Value. + + Returns: + Command's result. + """ + + return self._execute( + "container delete", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def get( + self, + rpc_endpoint: str, + wallet: str, + cid: str, + address: Optional[str] = None, + await_mode: bool = False, + to: Optional[str] = None, + json_mode: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + ) -> CommandResult: + """ + Get container field info. + + Args: + address: Address of wallet account. + await_mode: Block execution until container is removed. + cid: Container ID. + json_mode: Print or dump container in JSON format. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + to: Path to dump encoded container. + ttl: TTL value in request meta header (default 2). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Request X-Headers in form of Key=Value. + + Returns: + Command's result. + """ + return self._execute( + "container get", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def get_eacl( + self, + rpc_endpoint: str, + wallet: str, + cid: str, + address: Optional[str] = None, + await_mode: bool = False, + to: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + ) -> CommandResult: + """ + Get extended ACL table of container. + + Args: + address: Address of wallet account. + await_mode: Block execution until container is removed. + cid: Container ID. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + to: Path to dump encoded container. + session: Path to a JSON-encoded container session token. + ttl: TTL value in request meta header (default 2). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Request X-Headers in form of Key=Value. + + Returns: + Command's result. + + """ + return self._execute( + "container get-eacl", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def list( + self, + rpc_endpoint: str, + wallet: str, + address: Optional[str] = None, + owner: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + **params, + ) -> CommandResult: + """ + List all created containers. + + Args: + address: Address of wallet account. + owner: Owner of containers (omit to use owner from private key). + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + ttl: TTL value in request meta header (default 2). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Request X-Headers in form of Key=Value. + + Returns: + Command's result. + """ + return self._execute( + "container list", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def list_objects( + self, + rpc_endpoint: str, + wallet: str, + cid: str, + address: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + ) -> CommandResult: + """ + List existing objects in container. + + Args: + address: Address of wallet account. + cid: Container ID. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + ttl: TTL value in request meta header (default 2). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Request X-Headers in form of Key=Value. + + Returns: + Command's result. + """ + return self._execute( + "container list-objects", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def set_eacl( + self, + rpc_endpoint: str, + wallet: str, + cid: str, + address: Optional[str] = None, + await_mode: bool = False, + table: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + ) -> CommandResult: + """ + Set a new extended ACL table for the container. + Container ID in the EACL table will be substituted with the ID from the CLI. + + Args: + address: Address of wallet account. + await_mode: Block execution until container is removed. + cid: Container ID. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + session: Path to a JSON-encoded container session token. + table: Path to file with JSON or binary encoded EACL table. + ttl: TTL value in request meta header (default 2). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Request X-Headers in form of Key=Value. + + Returns: + Command's result. + """ + return self._execute( + "container set-eacl", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/neofs_testlib/cli/neofs_cli/netmap.py b/src/neofs_testlib/cli/neofs_cli/netmap.py new file mode 100644 index 0000000..7144b8f --- /dev/null +++ b/src/neofs_testlib/cli/neofs_cli/netmap.py @@ -0,0 +1,120 @@ +from typing import Optional + +from neofs_testlib.cli.cli_command import CliCommand +from neofs_testlib.shell import CommandResult + + +class NeofsCliNetmap(CliCommand): + def epoch( + self, + rpc_endpoint: str, + wallet: str, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + ) -> CommandResult: + """ + Get current epoch number. + + Args: + address: Address of wallet account. + generate_key: Generate new private key. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + ttl: TTL value in request meta header (default 2). + wallet: Path to the wallet or binary key. + xhdr: Request X-Headers in form of Key=Value. + + Returns: + Command's result. + """ + return self._execute( + "netmap epoch", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def netinfo( + self, + rpc_endpoint: str, + wallet: str, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + ) -> CommandResult: + """ + Get information about NeoFS network. + + Args: + address: Address of wallet account + generate_key: Generate new private key + rpc_endpoint: Remote node address (as 'multiaddr' or ':') + ttl: TTL value in request meta header (default 2) + wallet: Path to the wallet or binary key + xhdr: Request X-Headers in form of Key=Value + + Returns: + Command's result. + """ + return self._execute( + "netmap netinfo", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def nodeinfo( + self, + rpc_endpoint: str, + wallet: str, + address: Optional[str] = None, + generate_key: bool = False, + json: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + ) -> CommandResult: + """ + Get target node info. + + Args: + address: Address of wallet account. + generate_key: Generate new private key. + json: Print node info in JSON format. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + ttl: TTL value in request meta header (default 2). + wallet: Path to the wallet or binary key. + xhdr: Request X-Headers in form of Key=Value. + + Returns: + Command's result. + """ + return self._execute( + "netmap nodeinfo", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def snapshot( + self, + rpc_endpoint: str, + wallet: str, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + ) -> CommandResult: + """ + Request current local snapshot of the network map. + + Args: + address: Address of wallet account. + generate_key: Generate new private key. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + ttl: TTL value in request meta header (default 2). + wallet: Path to the wallet or binary key. + xhdr: Request X-Headers in form of Key=Value. + + Returns: + Command's result. + """ + return self._execute( + "netmap snapshot", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/neofs_testlib/cli/neofs_cli/object.py b/src/neofs_testlib/cli/neofs_cli/object.py new file mode 100644 index 0000000..ba2cbce --- /dev/null +++ b/src/neofs_testlib/cli/neofs_cli/object.py @@ -0,0 +1,331 @@ +from typing import Optional + +from neofs_testlib.cli.cli_command import CliCommand +from neofs_testlib.shell import CommandResult + + +class NeofsCliObject(CliCommand): + def delete( + self, + rpc_endpoint: str, + wallet: str, + cid: str, + oid: str, + address: Optional[str] = None, + bearer: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + ) -> CommandResult: + """ + Delete object from NeoFS. + + Args: + address: Address of wallet account. + bearer: File with signed JSON or binary encoded bearer token. + cid: Container ID. + oid: Object ID. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + session: Path to a JSON-encoded container session token. + ttl: TTL value in request meta header (default 2). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Request X-Headers in form of Key=Value. + + Returns: + Command's result. + """ + return self._execute( + "object delete", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def get( + self, + rpc_endpoint: str, + wallet: str, + cid: str, + oid: str, + address: Optional[str] = None, + bearer: Optional[str] = None, + file: Optional[str] = None, + header: Optional[str] = None, + no_progress: bool = False, + raw: bool = False, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + ) -> CommandResult: + """ + Get object from NeoFS. + + Args: + address: Address of wallet account. + bearer: File with signed JSON or binary encoded bearer token. + cid: Container ID. + file: File to write object payload to. Default: stdout. + header: File to write header to. Default: stdout. + no_progress: Do not show progress bar. + oid: Object ID. + raw: Set raw request option. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + session: Path to a JSON-encoded container session token. + ttl: TTL value in request meta header (default 2). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Request X-Headers in form of Key=Value. + + Returns: + Command's result. + """ + return self._execute( + "object get", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def hash( + self, + rpc_endpoint: str, + wallet: str, + cid: str, + oid: str, + address: Optional[str] = None, + bearer: Optional[str] = None, + range: Optional[str] = None, + salt: Optional[str] = None, + ttl: Optional[int] = None, + hash_type: Optional[str] = None, + xhdr: Optional[dict] = None, + ) -> CommandResult: + """ + Get object hash. + + Args: + address: Address of wallet account. + bearer: File with signed JSON or binary encoded bearer token. + cid: Container ID. + oid: Object ID. + range: Range to take hash from in the form offset1:length1,... + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + salt: Salt in hex format. + ttl: TTL value in request meta header (default 2). + hash_type: Hash type. Either 'sha256' or 'tz' (default "sha256"). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Request X-Headers in form of Key=Value. + + Returns: + Command's result. + """ + return self._execute( + "object hash", + **{ + param: value for param, value in locals().items() if param not in ["self", "params"] + }, + ) + + def head( + self, + rpc_endpoint: str, + wallet: str, + cid: str, + oid: str, + address: Optional[str] = None, + bearer: Optional[str] = None, + file: Optional[str] = None, + json_mode: bool = False, + main_only: bool = False, + proto: bool = False, + raw: bool = False, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + ) -> CommandResult: + """ + Get object header. + + Args: + address: Address of wallet account. + bearer: File with signed JSON or binary encoded bearer token. + cid: Container ID. + file: File to write object payload to. Default: stdout. + json_mode: Marshal output in JSON. + main_only: Return only main fields. + oid: Object ID. + proto: Marshal output in Protobuf. + raw: Set raw request option. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + session: Path to a JSON-encoded container session token. + ttl: TTL value in request meta header (default 2). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Request X-Headers in form of Key=Value. + + Returns: + Command's result. + """ + return self._execute( + "object head", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def lock( + self, + rpc_endpoint: str, + wallet: str, + cid: str, + oid: str, + lifetime: int, + address: Optional[str] = None, + bearer: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + ) -> CommandResult: + """ + Lock object in container. + + Args: + address: Address of wallet account. + bearer: File with signed JSON or binary encoded bearer token. + cid: Container ID. + oid: Object ID. + lifetime: Object lifetime. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + session: Path to a JSON-encoded container session token. + ttl: TTL value in request meta header (default 2). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Request X-Headers in form of Key=Value. + + Returns: + Command's result. + """ + return self._execute( + "object lock", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def put( + self, + rpc_endpoint: str, + wallet: str, + cid: str, + file: str, + address: Optional[str] = None, + attributes: Optional[dict] = None, + bearer: Optional[str] = None, + disable_filename: bool = False, + disable_timestamp: bool = False, + expire_at: Optional[int] = None, + no_progress: bool = False, + notify: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + ) -> CommandResult: + """ + Put object to NeoFS. + + Args: + address: Address of wallet account. + attributes: User attributes in form of Key1=Value1,Key2=Value2. + bearer: File with signed JSON or binary encoded bearer token. + cid: Container ID. + disable_filename: Do not set well-known filename attribute. + disable_timestamp: Do not set well-known timestamp attribute. + expire_at: Last epoch in the life of the object. + file: File with object payload. + no_progress: Do not show progress bar. + notify: Object notification in the form of *epoch*:*topic*; '-' + topic means using default. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + session: Path to a JSON-encoded container session token. + ttl: TTL value in request meta header (default 2). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Request X-Headers in form of Key=Value. + + Returns: + Command's result. + """ + return self._execute( + "object put", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def range( + self, + rpc_endpoint: str, + wallet: str, + cid: str, + oid: str, + range: str, + address: Optional[str] = None, + bearer: Optional[str] = None, + file: Optional[str] = None, + json_mode: bool = False, + raw: bool = False, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + ) -> CommandResult: + """ + Get payload range data of an object. + + Args: + address: Address of wallet account. + bearer: File with signed JSON or binary encoded bearer token. + cid: Container ID. + file: File to write object payload to. Default: stdout. + json_mode: Marshal output in JSON. + oid: Object ID. + range: Range to take data from in the form offset:length. + raw: Set raw request option. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + session: Path to a JSON-encoded container session token. + ttl: TTL value in request meta header (default 2). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Request X-Headers in form of Key=Value. + + Returns: + Command's result. + """ + return self._execute( + "object range", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def search( + self, + rpc_endpoint: str, + wallet: str, + cid: str, + address: Optional[str] = None, + bearer: Optional[str] = None, + filters: Optional[list] = None, + oid: Optional[str] = None, + phy: bool = False, + root: bool = False, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + ) -> CommandResult: + """ + Search object. + + Args: + address: Address of wallet account. + bearer: File with signed JSON or binary encoded bearer token. + cid: Container ID. + filters: Repeated filter expressions or files with protobuf JSON. + oid: Object ID. + phy: Search physically stored objects. + root: Search for user objects. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + session: Path to a JSON-encoded container session token. + ttl: TTL value in request meta header (default 2). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Request X-Headers in form of Key=Value. + + Returns: + Command's result. + """ + return self._execute( + "object search", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/neofs_testlib/cli/neofs_cli/version.py b/src/neofs_testlib/cli/neofs_cli/version.py new file mode 100644 index 0000000..6f22613 --- /dev/null +++ b/src/neofs_testlib/cli/neofs_cli/version.py @@ -0,0 +1,13 @@ +from neofs_testlib.cli.cli_command import CliCommand +from neofs_testlib.shell import CommandResult + + +class NeofsCliVersion(CliCommand): + def get(self) -> CommandResult: + """ + Application version and NeoFS API compatibility. + + Returns: + Command's result. + """ + return self._execute("", version=True) From c5ff64b3fd6d315743638e5259d8ee58700a1067 Mon Sep 17 00:00:00 2001 From: Vladimir Domnich Date: Tue, 4 Oct 2022 00:03:24 +0400 Subject: [PATCH 012/363] [#5] Implement plugin-based system for reporter Signed-off-by: Vladimir Domnich --- README.md | 61 ++++++++++++++++++++++++++ pyproject.toml | 8 +++- requirements.txt | 4 +- src/neofs_testlib/__init__.py | 59 +++++++++++++++++++++++++ src/neofs_testlib/reporter/__init__.py | 26 +++++++---- 5 files changed, 148 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index f097eee..6539d02 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,67 @@ Library can be installed via pip: $ pip install neofs-testlib ``` +## Configuration +Library components can be configured explicitly via code or implicitly via configuration file that supports plugin-based extensions. + +By default testlib uses configuration from file `.neofs-testlib.yaml` that must be located next to the process entry point. Path to the file can be customized via environment variable `NEOFS_TESTLIB_CONFIG`. Config file should have either YAML or JSON format. + +### Reporter Configuration +Currently only reporter component can be configured. Function `set_reporter` assigns current reporter that should be used in the library: + +```python +from neofs_testlib.reporter import AllureReporter, set_reporter + +reporter = AllureReporter() +set_reporter(reporter) +``` + +Assignment of reporter must happen before any testlib modules were imported. Otherwise, testlib code will bind to default dummy reporter. It is not convenient to call utility functions at specific time, so alternative approach is to set reporter in configuration file. To do that, please, specify name of reporter plugin in configuration parameter `reporter`: +```yaml +reporter: allure +``` + +Testlib provides two built-in reporters: `allure` and `dummy`. However, you can use any custom reporter via [plugins](#plugins). + +## Plugins +Testlib uses [entrypoint specification](https://docs.python.org/3/library/importlib.metadata.html) for plugins. Testlib supports the following entrypoint groups for plugins: + - `neofs.testlib.reporter` - group for reporter plugins. Plugin should be a class that implements interface `neofs_testlib.reporter.interfaces.Reporter`. + +### Example reporter plugin +In this example we will consider two Python projects: + - Project "my_neofs_plugins" where we will build a plugin that extends testlib functionality. + - Project "my_neofs_tests" that uses "neofs_testlib" and "my_neofs_plugins" to build some tests. + +Let's say we want to implement some custom reporter that can be used as a plugin for testlib. Pseudo-code of implementation can look like that: +```python +# my_neofs_plugins/src/x/y/z/custom_reporter.py +from contextlib import AbstractContextManager +from neofs_testlib.reporter.interfaces import Reporter + + +class CustomReporter(Reporter): + def step(self, name: str) -> AbstractContextManager: + ... some implementation ... + + def attach(self, content: Any, file_name: str) -> None: + ... some implementation ... +``` + +Then in `pyproject.toml` of "my_neofs_plugins" we should register entrypoint for this plugin. Entrypoint must belong to the group `neofs.testlib.reporter`: +```yaml +# my_neofs_plugins/pyproject.toml +[project.entry-points."neofs.testlib.reporter"] +my_custom_reporter = "x.y.z.custom_reporter:CustomReporter" +``` + +Finally, to use this reporter in our test project "my_neofs_tests", we should specify its entrypoint name in testlib config: +```yaml +# my_neofs_tests/pyproject.toml +reporter: my_custom_reporter +``` + +Detailed information on registering entrypoints can be found at [setuptools docs](https://setuptools.pypa.io/en/latest/userguide/entry_point.html). + ## Library structure The library provides the following primary components: * `cli` - wrappers on top of neoFS command-line tools. These wrappers execute on a shell and provide type-safe interface for interacting with the tools. diff --git a/pyproject.toml b/pyproject.toml index 7d5b913..9355d1c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [build-system] -requires = ["setuptools>=63.0.0", "wheel"] +requires = ["setuptools>=65.0.0", "wheel"] build-backend = "setuptools.build_meta" [project] @@ -17,8 +17,10 @@ classifiers = [ keywords = ["neofs", "test"] dependencies = [ "allure-python-commons>=2.9.45", + "importlib_metadata>=5.0; python_version < '3.10'", "paramiko>=2.10.3", "pexpect>=4.8.0", + "pyyaml>=6.0", ] requires-python = ">=3.9" @@ -28,6 +30,10 @@ dev = ["black", "bumpver", "isort", "pre-commit"] [project.urls] Homepage = "https://github.com/nspcc-dev/neofs-testlib" +[project.entry-points."neofs.testlib.reporter"] +allure = "neofs_testlib.reporter.allure_reporter:AllureReporter" +dummy = "neofs_testlib.reporter.dummy_reporter:DummyReporter" + [tool.isort] profile = "black" src_paths = ["src", "tests"] diff --git a/requirements.txt b/requirements.txt index 9b7968c..988cbe7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,8 @@ allure-python-commons==2.9.45 +importlib_metadata==5.0.0 paramiko==2.10.3 pexpect==4.8.0 +pyyaml==6.0 # Dev dependencies black==22.8.0 @@ -10,5 +12,5 @@ pre-commit==2.20.0 # Packaging dependencies build==0.8.0 -setuptools==63.2.0 +setuptools==65.3.0 twine==4.0.1 diff --git a/src/neofs_testlib/__init__.py b/src/neofs_testlib/__init__.py index 3dc1f76..4827efe 100644 --- a/src/neofs_testlib/__init__.py +++ b/src/neofs_testlib/__init__.py @@ -1 +1,60 @@ +import json +import os +import sys +from typing import Any, Optional + +import yaml + +from neofs_testlib.reporter import set_reporter + +if sys.version_info < (3, 10): + from importlib_metadata import entry_points +else: + from importlib.metadata import entry_points + + __version__ = "0.1.0" + + +def __read_config() -> dict[str, Any]: + """ + Loads configuration of library from default file .neofs-testlib.yaml or from + the file configured via environment variable NEOFS_TESTLIB_CONFIG. + """ + file_path = os.getenv("NEOFS_TESTLIB_CONFIG", ".neofs-testlib.yaml") + if os.path.exists(file_path): + _, extension = os.path.splitext(file_path) + if extension == ".yaml": + with open(file_path, "r") as file: + return yaml.full_load(file) + if extension == ".json": + with open(file_path, "r") as file: + return json.load(file) + return {} + + +def __load_plugin(group: str, name: Optional[str]) -> Any: + """ + Loads plugin using entry point specification. + """ + if not name: + return None + plugins = entry_points(group=group) + if name not in plugins.names: + return None + plugin = plugins[name] + return plugin.load() + + +def __init_lib(): + """ + Initializes singleton components in the library. + """ + config = __read_config() + + reporter = __load_plugin("neofs.testlib.reporter", config.get("reporter")) + if reporter: + set_reporter(reporter) + + +__init_lib() diff --git a/src/neofs_testlib/reporter/__init__.py b/src/neofs_testlib/reporter/__init__.py index 5e3c5fc..4ffbc29 100644 --- a/src/neofs_testlib/reporter/__init__.py +++ b/src/neofs_testlib/reporter/__init__.py @@ -1,14 +1,24 @@ -import os - from neofs_testlib.reporter.allure_reporter import AllureReporter from neofs_testlib.reporter.dummy_reporter import DummyReporter from neofs_testlib.reporter.interfaces import Reporter +__reporter = DummyReporter() + def get_reporter() -> Reporter: - # TODO: in scope of reporter implementation task here we will have extendable - # solution for configuring and providing reporter for the library - if os.getenv("TESTLIB_REPORTER_TYPE", "DUMMY") == "DUMMY": - return DummyReporter() - else: - return AllureReporter() + """ + Returns reporter that library should use for storing artifacts. + """ + return __reporter + + +def set_reporter(reporter: Reporter) -> None: + """ + Assigns specified reporter for storing test artifacts produced by the library. + + This function must be called before any testlib modules are imported. + Recommended way to assign reporter is via configuration file; please, refer to + testlib documentation for details. + """ + global __reporter + __reporter = reporter From 834ddede36df0dd510355fe650f03bc8e1f07575 Mon Sep 17 00:00:00 2001 From: Vladimir Domnich Date: Thu, 6 Oct 2022 14:17:19 +0400 Subject: [PATCH 013/363] [#5] Remove testlib config file support In order to make library as flexible as possible we will try to use configuration methods similar to function `logging.dictConfig` from the standard library. So, we won't support configuration file `.neofs-testlib.yaml`, but will allow users to call `configure` method that will load plugins and initialize library components. Signed-off-by: Vladimir Domnich --- README.md | 53 ++++----- pyproject.toml | 4 +- requirements.txt | 1 - src/neofs_testlib/__init__.py | 59 ---------- src/neofs_testlib/plugins/__init__.py | 25 +++++ src/neofs_testlib/reporter/__init__.py | 29 ++--- .../{allure_reporter.py => allure_handler.py} | 14 ++- src/neofs_testlib/reporter/dummy_reporter.py | 19 ---- src/neofs_testlib/reporter/interfaces.py | 4 +- src/neofs_testlib/reporter/reporter.py | 102 ++++++++++++++++++ tests/test_reporter.py | 73 +++++++++++++ 11 files changed, 248 insertions(+), 135 deletions(-) create mode 100644 src/neofs_testlib/plugins/__init__.py rename src/neofs_testlib/reporter/{allure_reporter.py => allure_handler.py} (74%) delete mode 100644 src/neofs_testlib/reporter/dummy_reporter.py create mode 100644 src/neofs_testlib/reporter/reporter.py create mode 100644 tests/test_reporter.py diff --git a/README.md b/README.md index 6539d02..cd4593c 100644 --- a/README.md +++ b/README.md @@ -8,44 +8,44 @@ $ pip install neofs-testlib ``` ## Configuration -Library components can be configured explicitly via code or implicitly via configuration file that supports plugin-based extensions. - -By default testlib uses configuration from file `.neofs-testlib.yaml` that must be located next to the process entry point. Path to the file can be customized via environment variable `NEOFS_TESTLIB_CONFIG`. Config file should have either YAML or JSON format. +Some library components support configuration that allows dynamic loading of extensions via plugins. Configuration of such components is described in this section. ### Reporter Configuration -Currently only reporter component can be configured. Function `set_reporter` assigns current reporter that should be used in the library: +Reporter is a singleton component that is used by the library to store test artifacts. + +Reporter sends artifacts to handlers that are responsible for actual storing in particular system. By default reporter is initialized without any handlers and won't take any actions to store the artifacts. To add handlers directly via code you can use method `register_handler`: ```python -from neofs_testlib.reporter import AllureReporter, set_reporter +from neofs_testlib.reporter import AllureHandler, get_reporter -reporter = AllureReporter() -set_reporter(reporter) +get_reporter().register_handler(AllureHandler()) ``` -Assignment of reporter must happen before any testlib modules were imported. Otherwise, testlib code will bind to default dummy reporter. It is not convenient to call utility functions at specific time, so alternative approach is to set reporter in configuration file. To do that, please, specify name of reporter plugin in configuration parameter `reporter`: -```yaml -reporter: allure -``` +This registration should happen early at the test session, because any artifacts produced before handler is registered won't be stored anywhere. -Testlib provides two built-in reporters: `allure` and `dummy`. However, you can use any custom reporter via [plugins](#plugins). +Alternative approach for registering handlers is to use method `configure`. It is similar to method [dictConfig](https://docs.python.org/3/library/logging.config.html#logging.config.dictConfig) in a sense that it receives a config structure that describes handlers that should be registered in the reporter. Each handler is defined by it's plugin name; for example, to register the built-in Allure handler, we can use the following config: + +```python +get_reporter().configure({ "handlers": [{"plugin_name": "allure"}] }) +``` ## Plugins Testlib uses [entrypoint specification](https://docs.python.org/3/library/importlib.metadata.html) for plugins. Testlib supports the following entrypoint groups for plugins: - - `neofs.testlib.reporter` - group for reporter plugins. Plugin should be a class that implements interface `neofs_testlib.reporter.interfaces.Reporter`. + - `neofs.testlib.reporter` - group for reporter handler plugins. Plugin should be a class that implements interface `neofs_testlib.reporter.interfaces.ReporterHandler`. ### Example reporter plugin In this example we will consider two Python projects: - Project "my_neofs_plugins" where we will build a plugin that extends testlib functionality. - Project "my_neofs_tests" that uses "neofs_testlib" and "my_neofs_plugins" to build some tests. -Let's say we want to implement some custom reporter that can be used as a plugin for testlib. Pseudo-code of implementation can look like that: +Let's say we want to implement some custom reporter handler that can be used as a plugin for testlib. Pseudo-code of implementation can look like that: ```python -# my_neofs_plugins/src/x/y/z/custom_reporter.py +# File my_neofs_plugins/src/foo/bar/custom_handler.py from contextlib import AbstractContextManager -from neofs_testlib.reporter.interfaces import Reporter +from neofs_testlib.reporter import ReporterHandler -class CustomReporter(Reporter): +class CustomHandler(ReporterHandler): def step(self, name: str) -> AbstractContextManager: ... some implementation ... @@ -53,20 +53,23 @@ class CustomReporter(Reporter): ... some implementation ... ``` -Then in `pyproject.toml` of "my_neofs_plugins" we should register entrypoint for this plugin. Entrypoint must belong to the group `neofs.testlib.reporter`: +Then in the file `pyproject.toml` of "my_neofs_plugins" we should register entrypoint for this plugin. Entrypoint must belong to the group `neofs.testlib.reporter`: ```yaml -# my_neofs_plugins/pyproject.toml +# File my_neofs_plugins/pyproject.toml [project.entry-points."neofs.testlib.reporter"] -my_custom_reporter = "x.y.z.custom_reporter:CustomReporter" +my_custom_handler = "foo.bar.custom_handler:CustomHandler" ``` -Finally, to use this reporter in our test project "my_neofs_tests", we should specify its entrypoint name in testlib config: -```yaml -# my_neofs_tests/pyproject.toml -reporter: my_custom_reporter +Finally, to use this handler in our test project "my_neofs_tests", we should configure reporter with name of the handler plugin: + +```python +# File my_neofs_tests/src/conftest.py +from neofs_testlib.reporter import get_reporter + +get_reporter().configure({ "handlers": [{"plugin_name": "my_custom_handler"}] }) ``` -Detailed information on registering entrypoints can be found at [setuptools docs](https://setuptools.pypa.io/en/latest/userguide/entry_point.html). +Detailed information about registering entrypoints can be found at [setuptools docs](https://setuptools.pypa.io/en/latest/userguide/entry_point.html). ## Library structure The library provides the following primary components: diff --git a/pyproject.toml b/pyproject.toml index 9355d1c..d4b3eec 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -20,7 +20,6 @@ dependencies = [ "importlib_metadata>=5.0; python_version < '3.10'", "paramiko>=2.10.3", "pexpect>=4.8.0", - "pyyaml>=6.0", ] requires-python = ">=3.9" @@ -31,8 +30,7 @@ dev = ["black", "bumpver", "isort", "pre-commit"] Homepage = "https://github.com/nspcc-dev/neofs-testlib" [project.entry-points."neofs.testlib.reporter"] -allure = "neofs_testlib.reporter.allure_reporter:AllureReporter" -dummy = "neofs_testlib.reporter.dummy_reporter:DummyReporter" +allure = "neofs_testlib.reporter.allure_handler:AllureHandler" [tool.isort] profile = "black" diff --git a/requirements.txt b/requirements.txt index 988cbe7..39b6bd3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,6 @@ allure-python-commons==2.9.45 importlib_metadata==5.0.0 paramiko==2.10.3 pexpect==4.8.0 -pyyaml==6.0 # Dev dependencies black==22.8.0 diff --git a/src/neofs_testlib/__init__.py b/src/neofs_testlib/__init__.py index 4827efe..3dc1f76 100644 --- a/src/neofs_testlib/__init__.py +++ b/src/neofs_testlib/__init__.py @@ -1,60 +1 @@ -import json -import os -import sys -from typing import Any, Optional - -import yaml - -from neofs_testlib.reporter import set_reporter - -if sys.version_info < (3, 10): - from importlib_metadata import entry_points -else: - from importlib.metadata import entry_points - - __version__ = "0.1.0" - - -def __read_config() -> dict[str, Any]: - """ - Loads configuration of library from default file .neofs-testlib.yaml or from - the file configured via environment variable NEOFS_TESTLIB_CONFIG. - """ - file_path = os.getenv("NEOFS_TESTLIB_CONFIG", ".neofs-testlib.yaml") - if os.path.exists(file_path): - _, extension = os.path.splitext(file_path) - if extension == ".yaml": - with open(file_path, "r") as file: - return yaml.full_load(file) - if extension == ".json": - with open(file_path, "r") as file: - return json.load(file) - return {} - - -def __load_plugin(group: str, name: Optional[str]) -> Any: - """ - Loads plugin using entry point specification. - """ - if not name: - return None - plugins = entry_points(group=group) - if name not in plugins.names: - return None - plugin = plugins[name] - return plugin.load() - - -def __init_lib(): - """ - Initializes singleton components in the library. - """ - config = __read_config() - - reporter = __load_plugin("neofs.testlib.reporter", config.get("reporter")) - if reporter: - set_reporter(reporter) - - -__init_lib() diff --git a/src/neofs_testlib/plugins/__init__.py b/src/neofs_testlib/plugins/__init__.py new file mode 100644 index 0000000..fcd7acc --- /dev/null +++ b/src/neofs_testlib/plugins/__init__.py @@ -0,0 +1,25 @@ +import sys +from typing import Any + +if sys.version_info < (3, 10): + # On Python prior 3.10 we need to use backport of entry points + from importlib_metadata import entry_points +else: + from importlib.metadata import entry_points + + +def load_plugin(plugin_group: str, name: str) -> Any: + """Loads plugin using entry point specification. + + Args: + plugin_group: Name of plugin group that contains the plugin. + name: Name of the plugin in the group. + + Returns: + Plugin class if the plugin was found; otherwise returns None. + """ + plugins = entry_points(group=plugin_group) + if name not in plugins.names: + return None + plugin = plugins[name] + return plugin.load() diff --git a/src/neofs_testlib/reporter/__init__.py b/src/neofs_testlib/reporter/__init__.py index 4ffbc29..ebfb9fd 100644 --- a/src/neofs_testlib/reporter/__init__.py +++ b/src/neofs_testlib/reporter/__init__.py @@ -1,24 +1,17 @@ -from neofs_testlib.reporter.allure_reporter import AllureReporter -from neofs_testlib.reporter.dummy_reporter import DummyReporter -from neofs_testlib.reporter.interfaces import Reporter +from neofs_testlib.reporter.allure_handler import AllureHandler +from neofs_testlib.reporter.interfaces import ReporterHandler +from neofs_testlib.reporter.reporter import Reporter -__reporter = DummyReporter() +__reporter = Reporter() def get_reporter() -> Reporter: - """ - Returns reporter that library should use for storing artifacts. + """Returns reporter that the library should use for storing artifacts. + + Reporter is a singleton instance that can be configured with multiple handlers that store + artifacts in various systems. Most common use case is to use single handler. + + Returns: + Singleton reporter instance. """ return __reporter - - -def set_reporter(reporter: Reporter) -> None: - """ - Assigns specified reporter for storing test artifacts produced by the library. - - This function must be called before any testlib modules are imported. - Recommended way to assign reporter is via configuration file; please, refer to - testlib documentation for details. - """ - global __reporter - __reporter = reporter diff --git a/src/neofs_testlib/reporter/allure_reporter.py b/src/neofs_testlib/reporter/allure_handler.py similarity index 74% rename from src/neofs_testlib/reporter/allure_reporter.py rename to src/neofs_testlib/reporter/allure_handler.py index 2d99527..9c7f978 100644 --- a/src/neofs_testlib/reporter/allure_reporter.py +++ b/src/neofs_testlib/reporter/allure_handler.py @@ -6,13 +6,11 @@ from typing import Any import allure from allure import attachment_type -from neofs_testlib.reporter.interfaces import Reporter +from neofs_testlib.reporter.interfaces import ReporterHandler -class AllureReporter(Reporter): - """ - Implements storing of test artifacts in Allure report. - """ +class AllureHandler(ReporterHandler): + """Handler that stores test artifacts in Allure report.""" def step(self, name: str) -> AbstractContextManager: name = shorten(name, width=70, placeholder="...") @@ -25,9 +23,9 @@ class AllureReporter(Reporter): allure.attach(body, attachment_name, attachment_type) def _resolve_attachment_type(self, extension: str) -> attachment_type: - """ - Try to find matching Allure attachment type by extension. If no match was found, - default to TXT format. + """Try to find matching Allure attachment type by extension. + + If no match was found, default to TXT format. """ extension = extension.lower() return next( diff --git a/src/neofs_testlib/reporter/dummy_reporter.py b/src/neofs_testlib/reporter/dummy_reporter.py deleted file mode 100644 index 9ca206b..0000000 --- a/src/neofs_testlib/reporter/dummy_reporter.py +++ /dev/null @@ -1,19 +0,0 @@ -from contextlib import AbstractContextManager, contextmanager -from typing import Any - -from neofs_testlib.reporter.interfaces import Reporter - - -@contextmanager -def _dummy_step(): - yield - - -class DummyReporter(Reporter): - """Dummy implementation of reporter, does not store artifacts anywhere.""" - - def step(self, name: str) -> AbstractContextManager: - return _dummy_step() - - def attach(self, content: Any, file_name: str) -> None: - pass diff --git a/src/neofs_testlib/reporter/interfaces.py b/src/neofs_testlib/reporter/interfaces.py index 5343678..f2f6ce4 100644 --- a/src/neofs_testlib/reporter/interfaces.py +++ b/src/neofs_testlib/reporter/interfaces.py @@ -3,8 +3,8 @@ from contextlib import AbstractContextManager from typing import Any -class Reporter(ABC): - """Interface that supports storage of test artifacts in some reporting tool.""" +class ReporterHandler(ABC): + """Interface of handler that stores test artifacts in some reporting tool.""" @abstractmethod def step(self, name: str) -> AbstractContextManager: diff --git a/src/neofs_testlib/reporter/reporter.py b/src/neofs_testlib/reporter/reporter.py new file mode 100644 index 0000000..3e9e394 --- /dev/null +++ b/src/neofs_testlib/reporter/reporter.py @@ -0,0 +1,102 @@ +from contextlib import AbstractContextManager, contextmanager +from types import TracebackType +from typing import Any, Optional + +from neofs_testlib.plugins import load_plugin +from neofs_testlib.reporter.interfaces import ReporterHandler + + +@contextmanager +def _empty_step(): + yield + + +class Reporter: + """Root reporter that sends artifacts to handlers.""" + + handlers: list[ReporterHandler] + + def __init__(self) -> None: + super().__init__() + self.handlers = [] + + def register_handler(self, handler: ReporterHandler) -> None: + """Register a new handler for the reporter. + + Args: + handler: Handler instance to add to the reporter. + """ + self.handlers.append(handler) + + def configure(self, config: dict[str, Any]) -> None: + """Configure handlers in the reporter from specified config. + + All existing handlers will be removed from the reporter. + + Args: + config: dictionary with reporter configuration. + """ + # Reset current configuration + self.handlers = [] + + # Setup handlers from the specified config + handler_configs = config.get("handlers", []) + for handler_config in handler_configs: + handler_class = load_plugin("neofs.testlib.reporter", handler_config["plugin_name"]) + self.register_handler(handler_class()) + + def step(self, name: str) -> AbstractContextManager: + """Register a new step in test execution. + + Args: + name: Name of the step. + + Returns: + Step context. + """ + if not self.handlers: + return _empty_step() + + step_contexts = [handler.step(name) for handler in self.handlers] + return AggregateContextManager(step_contexts) + + def attach(self, content: Any, file_name: str) -> None: + """Attach specified content with given file name to the test report. + + Args: + content: Content to attach. If content value is not a string, it will be + converted to a string. + file_name: File name of attachment. + """ + for handler in self.handlers: + handler.attach(content, file_name) + + +class AggregateContextManager(AbstractContextManager): + """Aggregates multiple context managers in a single context.""" + + contexts: list[AbstractContextManager] + + def __init__(self, contexts: list[AbstractContextManager]) -> None: + super().__init__() + self.contexts = contexts + + def __enter__(self): + for context in self.contexts: + context.__enter__() + return self + + def __exit__( + self, + exc_type: Optional[type[BaseException]], + exc_value: Optional[BaseException], + traceback: Optional[TracebackType], + ) -> Optional[bool]: + suppress_decisions = [] + for context in self.contexts: + suppress_decision = context.__exit__(exc_type, exc_value, traceback) + suppress_decisions.append(suppress_decision) + + # If all context agreed to suppress exception, then suppress it; + # otherwise return None to reraise + return True if all(suppress_decisions) else None diff --git a/tests/test_reporter.py b/tests/test_reporter.py new file mode 100644 index 0000000..2dec8fb --- /dev/null +++ b/tests/test_reporter.py @@ -0,0 +1,73 @@ +from contextlib import AbstractContextManager +from types import TracebackType +from typing import Optional +from unittest import TestCase +from unittest.mock import MagicMock + +from neofs_testlib.reporter import Reporter + + +class TestLocalShellInteractive(TestCase): + def setUp(self): + self.reporter = Reporter() + + def test_handler_step_is_invoked(self): + handler = MagicMock() + self.reporter.register_handler(handler) + + with self.reporter.step("test_step"): + pass + + handler.step.assert_called_once_with("test_step") + + def test_two_handler_steps_are_invoked(self): + handler1 = MagicMock() + handler2 = MagicMock() + + self.reporter.register_handler(handler1) + self.reporter.register_handler(handler2) + + with self.reporter.step("test_step"): + pass + + handler1.step.assert_called_once_with("test_step") + handler2.step.assert_called_once_with("test_step") + + def test_handlers_can_suppress_exception(self): + handler1 = MagicMock() + handler1.step = MagicMock(return_value=StubContext(suppress_exception=True)) + handler2 = MagicMock() + handler2.step = MagicMock(return_value=StubContext(suppress_exception=True)) + + self.reporter.register_handler(handler1) + self.reporter.register_handler(handler2) + + with self.reporter.step("test_step"): + raise ValueError("Test exception") + + def test_handler_can_override_exception_suppression(self): + handler1 = MagicMock() + handler1.step = MagicMock(return_value=StubContext(suppress_exception=True)) + handler2 = MagicMock() + handler2.step = MagicMock(return_value=StubContext(suppress_exception=False)) + + self.reporter.register_handler(handler1) + self.reporter.register_handler(handler2) + + with self.assertRaises(ValueError): + with self.reporter.step("test_step"): + raise ValueError("Test exception") + + +class StubContext(AbstractContextManager): + def __init__(self, suppress_exception: bool) -> None: + super().__init__() + self.suppress_exception = suppress_exception + + def __exit__( + self, + exc_type: Optional[type[BaseException]], + exc_value: Optional[BaseException], + traceback: Optional[TracebackType], + ) -> Optional[bool]: + return self.suppress_exception From a750dfd1489763d2966cf0baba6840d84745bcb6 Mon Sep 17 00:00:00 2001 From: Vladimir Domnich Date: Wed, 5 Oct 2022 20:41:47 +0400 Subject: [PATCH 014/363] [#9] Implement hosting package Package defines interface for host management and provides implementation for docker host (local or remote). Other hosts can be added via plugins. Signed-off-by: Vladimir Domnich --- README.md | 11 + pyproject.toml | 7 +- requirements.txt | 2 + src/neofs_testlib/hosting/__init__.py | 3 + src/neofs_testlib/hosting/config.py | 70 +++++++ src/neofs_testlib/hosting/docker_host.py | 193 ++++++++++++++++++ src/neofs_testlib/hosting/hosting.py | 107 ++++++++++ src/neofs_testlib/hosting/interfaces.py | 124 +++++++++++ src/neofs_testlib/reporter/allure_handler.py | 4 +- src/neofs_testlib/reporter/reporter.py | 2 +- src/neofs_testlib/shell/__init__.py | 2 +- src/neofs_testlib/shell/command_inspectors.py | 13 ++ src/neofs_testlib/shell/interfaces.py | 35 +++- src/neofs_testlib/shell/local_shell.py | 9 +- src/neofs_testlib/shell/ssh_shell.py | 19 +- 15 files changed, 582 insertions(+), 19 deletions(-) create mode 100644 src/neofs_testlib/hosting/__init__.py create mode 100644 src/neofs_testlib/hosting/config.py create mode 100644 src/neofs_testlib/hosting/docker_host.py create mode 100644 src/neofs_testlib/hosting/hosting.py create mode 100644 src/neofs_testlib/hosting/interfaces.py create mode 100644 src/neofs_testlib/shell/command_inspectors.py diff --git a/README.md b/README.md index cd4593c..ed28dfc 100644 --- a/README.md +++ b/README.md @@ -29,6 +29,16 @@ Alternative approach for registering handlers is to use method `configure`. It i get_reporter().configure({ "handlers": [{"plugin_name": "allure"}] }) ``` +### Hosting Configuration +Hosting component is a class that represents infrastructure (machines/containers/services) where neoFS is hosted. Interaction with specific infrastructure instance (host) is encapsulated in classes that implement interface `neofs_testlib.hosting.Host`. To pass information about hosts to the `Hosting` class in runtime we use method `configure`: + +```python +from neofs_testlib.hosting import Hosting + +hosting = Hosting() +hosting.configure({ "hosts": [{ "address": "localhost", "plugin_name": "docker" ... }]}) +``` + ## Plugins Testlib uses [entrypoint specification](https://docs.python.org/3/library/importlib.metadata.html) for plugins. Testlib supports the following entrypoint groups for plugins: - `neofs.testlib.reporter` - group for reporter handler plugins. Plugin should be a class that implements interface `neofs_testlib.reporter.interfaces.ReporterHandler`. @@ -74,6 +84,7 @@ Detailed information about registering entrypoints can be found at [setuptools d ## Library structure The library provides the following primary components: * `cli` - wrappers on top of neoFS command-line tools. These wrappers execute on a shell and provide type-safe interface for interacting with the tools. + * `hosting` - management of infrastructure (docker, virtual machines, services where neoFS is hosted). The library provides host implementation for docker environment (when neoFS services are running as docker containers). Support for other hosts is provided via plugins. * `reporter` - abstraction on top of test reporting tool like Allure. Components of the library will report their steps and attach artifacts to the configured reporter instance. * `shell` - shells that can be used to execute commands. Currently library provides local shell (on machine that runs the code) or SSH shell that connects to a remote machine via SSH. diff --git a/pyproject.toml b/pyproject.toml index d4b3eec..78c88f5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [build-system] -requires = ["setuptools>=65.0.0", "wheel"] +requires = ["setuptools>=65.0.0", "wheel"] build-backend = "setuptools.build_meta" [project] @@ -17,9 +17,11 @@ classifiers = [ keywords = ["neofs", "test"] dependencies = [ "allure-python-commons>=2.9.45", + "docker>=4.4.0", "importlib_metadata>=5.0; python_version < '3.10'", "paramiko>=2.10.3", "pexpect>=4.8.0", + "requests>=2.28.0", ] requires-python = ">=3.9" @@ -32,6 +34,9 @@ Homepage = "https://github.com/nspcc-dev/neofs-testlib" [project.entry-points."neofs.testlib.reporter"] allure = "neofs_testlib.reporter.allure_handler:AllureHandler" +[project.entry-points."neofs.testlib.hosting"] +docker = "neofs_testlib.hosting.docker_host:DockerHost" + [tool.isort] profile = "black" src_paths = ["src", "tests"] diff --git a/requirements.txt b/requirements.txt index 39b6bd3..294f406 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,9 @@ allure-python-commons==2.9.45 +docker==4.4.0 importlib_metadata==5.0.0 paramiko==2.10.3 pexpect==4.8.0 +requests==2.28.1 # Dev dependencies black==22.8.0 diff --git a/src/neofs_testlib/hosting/__init__.py b/src/neofs_testlib/hosting/__init__.py new file mode 100644 index 0000000..d3f1f8f --- /dev/null +++ b/src/neofs_testlib/hosting/__init__.py @@ -0,0 +1,3 @@ +from neofs_testlib.hosting.config import CLIConfig, HostConfig, ServiceConfig +from neofs_testlib.hosting.hosting import Hosting +from neofs_testlib.hosting.interfaces import Host diff --git a/src/neofs_testlib/hosting/config.py b/src/neofs_testlib/hosting/config.py new file mode 100644 index 0000000..febc848 --- /dev/null +++ b/src/neofs_testlib/hosting/config.py @@ -0,0 +1,70 @@ +from dataclasses import dataclass, field, fields +from typing import Any + + +@dataclass +class ParsedAttributes: + """Base class for data structures representing parsed attributes from configs.""" + + @classmethod + def parse(cls, attributes: dict[str, Any]): + # Pick attributes supported by the class + field_names = set(field.name for field in fields(cls)) + supported_attributes = { + key: value for key, value in attributes.items() if key in field_names + } + return cls(**supported_attributes) + + +@dataclass +class CLIConfig: + """Describes CLI tool on some host. + + Attributes: + name: Name of the tool. + exec_path: Path to executable file of the tool. + attributes: Dict with extra information about the tool. + """ + + name: str + exec_path: str + attributes: dict[str, str] = field(default_factory=dict) + + +@dataclass +class ServiceConfig: + """Describes neoFS service on some host. + + Attributes: + name: Name of the service that uniquely identifies it across all hosts. + attributes: Dict with extra information about the service. For example, we can store + name of docker container (or name of systemd service), endpoints, path to wallet, + path to configuration file, etc. + """ + + name: str + attributes: dict[str, str] = field(default_factory=dict) + + +@dataclass +class HostConfig: + """Describes machine that hosts neoFS services. + + Attributes: + plugin_name: Name of plugin that should be used to manage the host. + address: Address of the machine (IP or DNS name). + services: List of services hosted on the machine. + clis: List of CLI tools available on the machine. + attributes: Dict with extra information about the host. For example, we can store + connection parameters in this dict. + """ + + plugin_name: str + address: str + services: list[ServiceConfig] = field(default_factory=list) + clis: list[CLIConfig] = field(default_factory=list) + attributes: dict[str, str] = field(default_factory=dict) + + def __post_init__(self) -> None: + self.services = [ServiceConfig(**service) for service in self.services or []] + self.clis = [CLIConfig(**cli) for cli in self.clis or []] diff --git a/src/neofs_testlib/hosting/docker_host.py b/src/neofs_testlib/hosting/docker_host.py new file mode 100644 index 0000000..34ebb87 --- /dev/null +++ b/src/neofs_testlib/hosting/docker_host.py @@ -0,0 +1,193 @@ +import json +import logging +import os +import time +from dataclasses import dataclass +from datetime import datetime +from typing import Any, Optional + +import docker +from requests import HTTPError + +from neofs_testlib.hosting.config import ParsedAttributes +from neofs_testlib.hosting.interfaces import Host +from neofs_testlib.shell import LocalShell, Shell, SSHShell +from neofs_testlib.shell.command_inspectors import SudoInspector + +logger = logging.getLogger("neofs.testlib.hosting") + + +@dataclass +class HostAttributes(ParsedAttributes): + """Represents attributes of host where Docker with neoFS runs. + + Attributes: + sudo_shell: Specifies whether shell commands should be auto-prefixed with sudo. + docker_endpoint: Protocol, address and port of docker where neoFS runs. Recommended format + is tcp socket (https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-socket-option), + for example: tcp://{address}:2375 (where 2375 is default docker port). + ssh_login: Login for SSH connection to the machine where docker runs. + ssh_password: Password for SSH connection. + ssh_private_key_path: Path to private key for SSH connection. + ssh_private_key_passphrase: Passphrase for the private key. + """ + + sudo_shell: bool = False + docker_endpoint: Optional[str] = None + ssh_login: Optional[str] = None + ssh_password: Optional[str] = None + ssh_private_key_path: Optional[str] = None + ssh_private_key_passphrase: Optional[str] = None + + +@dataclass +class ServiceAttributes(ParsedAttributes): + """Represents attributes of service running as Docker container. + + Attributes: + container_name: Name of Docker container where the service runs. + volume_name: Name of volume where storage node service stores the data. + start_timeout: Timeout (in seconds) for service to start. + stop_timeout: Timeout (in seconds) for service to stop. + """ + + container_name: str + volume_name: Optional[str] = None + start_timeout: int = 60 + stop_timeout: int = 60 + + +class DockerHost(Host): + """Manages services hosted in Docker containers running on a local or remote machine.""" + + def get_shell(self) -> Shell: + host_attributes = HostAttributes.parse(self._config.attributes) + command_inspectors = [] + if host_attributes.sudo_shell: + command_inspectors.append(SudoInspector()) + + if not host_attributes.ssh_login: + # If there is no SSH connection to the host, use local shell + return LocalShell(command_inspectors) + + # If there is SSH connection to the host, use SSH shell + return SSHShell( + host=self._config.address, + login=host_attributes.ssh_login, + password=host_attributes.ssh_password, + private_key_path=host_attributes.ssh_private_key_path, + private_key_passphrase=host_attributes.ssh_private_key_passphrase, + command_inspectors=command_inspectors, + ) + + def start_host(self) -> None: + # We emulate starting machine by starting all services + # As an alternative we can probably try to stop docker service... + for service_config in self._config.services: + self.start_service(service_config.name) + + def stop_host(self) -> None: + # We emulate stopping machine by stopping all services + # As an alternative we can probably try to stop docker service... + for service_config in self._config.services: + self.stop_service(service_config.name) + + def start_service(self, service_name: str) -> None: + service_attributes = self._get_service_attributes(service_name) + + client = self._get_docker_client() + client.start(service_attributes.container_name) + + self._wait_for_container_to_be_in_state( + container_name=service_attributes.container_name, + expected_state="running", + timeout=service_attributes.start_timeout, + ) + + def stop_service(self, service_name: str) -> None: + service_attributes = self._get_service_attributes(service_name) + + client = self._get_docker_client() + client.stop(service_attributes.container_name) + + self._wait_for_container_to_be_in_state( + container_name=service_attributes.container_name, + expected_state="exited", + timeout=service_attributes.stop_timeout, + ) + + def delete_storage_node_data(self, service_name: str) -> None: + service_attributes = self._get_service_attributes(service_name) + + client = self._get_docker_client() + volume_info = client.inspect_volume(service_attributes.volume_name) + volume_path = volume_info["Mountpoint"] + + shell = self.get_shell() + shell.exec(f"rm -rf {volume_path}/*") + + def dump_logs( + self, + directory_path: str, + since: Optional[datetime] = None, + until: Optional[datetime] = None, + ) -> None: + client = self._get_docker_client() + for service_config in self._config.services: + container_name = self._get_service_attributes(service_config.name).container_name + try: + logs = client.logs(container_name, since=since, until=until) + except HTTPError as exc: + logger.info(f"Got exception while dumping logs of '{container_name}': {exc}") + continue + + # Save logs to the directory + file_path = os.path.join( + directory_path, + f"{self._config.address}-{container_name}-log.txt", + ) + with open(file_path, "wb") as file: + file.write(logs) + + def _get_service_attributes(self, service_name) -> ServiceAttributes: + service_config = self.get_service_config(service_name) + return ServiceAttributes.parse(service_config.attributes) + + def _get_docker_client(self) -> docker.APIClient: + docker_endpoint = HostAttributes.parse(self._config.attributes).docker_endpoint + + if not docker_endpoint: + # Use default docker client that talks to unix socket + return docker.APIClient() + + # Otherwise use docker client that talks to specified endpoint + return docker.APIClient(base_url=docker_endpoint) + + def _get_container_by_name(self, container_name: str) -> dict[str, Any]: + client = self._get_docker_client() + containers = client.containers(all=True) + + for container in containers: + # Names in local docker environment are prefixed with / + clean_names = set(name.strip("/") for name in container["Names"]) + if container_name in clean_names: + return container + return None + + def _wait_for_container_to_be_in_state( + self, container_name: str, expected_state: str, timeout: int + ) -> None: + iterations = 10 + iteration_wait_time = timeout / iterations + + # To speed things up, we break timeout in smaller iterations and check container state + # several times. This way waiting stops as soon as container reaches the expected state + for _ in range(iterations): + container = self._get_container_by_name(container_name) + logger.debug(f"Current container state\n:{json.dumps(container, indent=2)}") + + if container and container["State"] == expected_state: + return + time.sleep(iteration_wait_time) + + raise RuntimeError(f"Container {container_name} is not in {expected_state} state.") diff --git a/src/neofs_testlib/hosting/hosting.py b/src/neofs_testlib/hosting/hosting.py new file mode 100644 index 0000000..d127f25 --- /dev/null +++ b/src/neofs_testlib/hosting/hosting.py @@ -0,0 +1,107 @@ +import re +from typing import Any + +from neofs_testlib.hosting.config import HostConfig, ServiceConfig +from neofs_testlib.hosting.interfaces import Host +from neofs_testlib.plugins import load_plugin + + +class Hosting: + """Hosting manages infrastructure where neoFS runs (machines and neoFS services).""" + + _hosts: list[Host] + _host_by_address: dict[str, Host] + _host_by_service_name: dict[str, Host] + + @property + def hosts(self) -> list[Host]: + """Returns all hosts registered in the hosting. + + Returns: + List of hosts. + """ + return self._hosts + + def configure(self, config: dict[str, Any]) -> None: + """Configures hosts from specified config. + + All existing hosts will be removed from the hosting. + + Args: + config: Dictionary with hosting configuration. + """ + hosts = [] + host_by_address = {} + host_by_service_name = {} + + host_configs = [HostConfig(**host_config) for host_config in config["hosts"]] + for host_config in host_configs: + host_class = load_plugin("neofs.testlib.hosting", host_config.plugin_name) + host = host_class(host_config) + + hosts.append(host) + host_by_address[host_config.address] = host + + for service_config in host_config.services: + host_by_service_name[service_config.name] = host + + self._hosts = hosts + self._host_by_address = host_by_address + self._host_by_service_name = host_by_service_name + + def get_host_by_address(self, host_address: str) -> Host: + """Returns host with specified address. + + Args: + host_address: Address of the host. + + Returns: + Host that manages machine with specified address. + """ + host = self._host_by_address.get(host_address) + if host is None: + raise ValueError(f"Unknown host address: '{host_address}'") + return host + + def get_host_by_service(self, service_name: str) -> Host: + """Returns host where service with specified name is located. + + Args: + service_name: Name of the service. + + Returns: + Host that manages machine where service is located. + """ + host = self._host_by_service_name.get(service_name) + if host is None: + raise ValueError(f"Unknown service name: '{service_name}'") + return host + + def get_service_config(self, service_name: str) -> ServiceConfig: + """Returns config of service with specified name. + + Args: + service_name: Name of the service. + + Returns: + Config of the service. + """ + host = self.get_host_by_service(service_name) + return host.get_service_config(service_name) + + def find_service_configs(self, service_name_pattern: str) -> list[ServiceConfig]: + """Finds configs of services where service name matches specified regular expression. + + Args: + service_name_pattern - regular expression for service names. + + Returns: + List of service configs matched with the regular expression. + """ + service_configs = [ + service_config + for host in self.hosts + for service_config in host.config.services + if re.match(service_name_pattern, service_config.name) + ] + return service_configs diff --git a/src/neofs_testlib/hosting/interfaces.py b/src/neofs_testlib/hosting/interfaces.py new file mode 100644 index 0000000..b004689 --- /dev/null +++ b/src/neofs_testlib/hosting/interfaces.py @@ -0,0 +1,124 @@ +from abc import ABC, abstractmethod +from datetime import datetime +from typing import Optional + +from neofs_testlib.hosting.config import CLIConfig, HostConfig, ServiceConfig +from neofs_testlib.shell.interfaces import Shell + + +class Host(ABC): + """Interface of a host machine where neoFS services are running. + + Allows to manage the machine and neoFS services that are hosted on it. + """ + + def __init__(self, config: HostConfig) -> None: + self._config = config + self._service_config_by_name = { + service_config.name: service_config for service_config in config.services + } + self._cli_config_by_name = {cli_config.name: cli_config for cli_config in config.clis} + + @property + def config(self) -> HostConfig: + """Returns config of the host. + + Returns: + Config of this host. + """ + return self._config + + def get_service_config(self, service_name: str) -> ServiceConfig: + """Returns config of service with specified name. + + The service must be hosted on this host. + + Args: + service_name: Name of the service. + + Returns: + Config of the service. + """ + service_config = self._service_config_by_name.get(service_name) + if service_config is None: + raise ValueError(f"Unknown service name: '{service_name}'") + return service_config + + def get_cli_config(self, cli_name: str) -> CLIConfig: + """Returns config of CLI tool with specified name. + + The CLI must be located on this host. + + Args: + cli_name: Name of the CLI tool. + + Returns: + Config of the CLI tool. + """ + cli_config = self._cli_config_by_name.get(cli_name) + if cli_config is None: + raise ValueError(f"Unknown CLI name: '{cli_name}'") + return cli_config + + @abstractmethod + def get_shell(self) -> Shell: + """Returns shell to this host. + + Returns: + Shell that executes commands on this host. + """ + + @abstractmethod + def start_host(self) -> None: + """Starts the host machine.""" + + @abstractmethod + def stop_host(self, mode: str) -> None: + """Stops the host machine. + + Args: + mode: Specifies mode how host should be stopped. Mode might be host-specific. + """ + + @abstractmethod + def start_service(self, service_name: str) -> None: + """Starts the service with specified name and waits until it starts. + + The service must be hosted on this host. + + Args: + service_name: Name of the service to start. + """ + + @abstractmethod + def stop_service(self, service_name: str) -> None: + """Stops the service with specified name and waits until it stops. + + The service must be hosted on this host. + + Args: + service_name: Name of the service to stop. + """ + + @abstractmethod + def delete_storage_node_data(self, service_name: str) -> None: + """Erases all data of the storage node with specified name. + + Args: + service_name: Name of storage node service. + """ + + @abstractmethod + def dump_logs( + self, + directory_path: str, + since: Optional[datetime] = None, + until: Optional[datetime] = None, + ) -> None: + """Dumps logs of all services on the host to specified directory. + + Args: + directory_path: Path to the directory where logs should be stored. + since: If set, limits the time from which logs should be collected. Must be in UTC. + until: If set, limits the time until which logs should be collected. Must be in UTC. + """ diff --git a/src/neofs_testlib/reporter/allure_handler.py b/src/neofs_testlib/reporter/allure_handler.py index 9c7f978..0fceffb 100644 --- a/src/neofs_testlib/reporter/allure_handler.py +++ b/src/neofs_testlib/reporter/allure_handler.py @@ -20,7 +20,7 @@ class AllureHandler(ReporterHandler): attachment_name, extension = os.path.splitext(file_name) attachment_type = self._resolve_attachment_type(extension) - allure.attach(body, attachment_name, attachment_type) + allure.attach(body, attachment_name, attachment_type, extension) def _resolve_attachment_type(self, extension: str) -> attachment_type: """Try to find matching Allure attachment type by extension. @@ -30,5 +30,5 @@ class AllureHandler(ReporterHandler): extension = extension.lower() return next( (allure_type for allure_type in attachment_type if allure_type.extension == extension), - attachment_type.TXT, + attachment_type.TEXT, ) diff --git a/src/neofs_testlib/reporter/reporter.py b/src/neofs_testlib/reporter/reporter.py index 3e9e394..d12cb05 100644 --- a/src/neofs_testlib/reporter/reporter.py +++ b/src/neofs_testlib/reporter/reporter.py @@ -34,7 +34,7 @@ class Reporter: All existing handlers will be removed from the reporter. Args: - config: dictionary with reporter configuration. + config: Dictionary with reporter configuration. """ # Reset current configuration self.handlers = [] diff --git a/src/neofs_testlib/shell/__init__.py b/src/neofs_testlib/shell/__init__.py index c51f3b9..3fd63bd 100644 --- a/src/neofs_testlib/shell/__init__.py +++ b/src/neofs_testlib/shell/__init__.py @@ -1,3 +1,3 @@ -from neofs_testlib.shell.interfaces import CommandResult, Shell +from neofs_testlib.shell.interfaces import CommandOptions, CommandResult, Shell from neofs_testlib.shell.local_shell import LocalShell from neofs_testlib.shell.ssh_shell import SSHShell diff --git a/src/neofs_testlib/shell/command_inspectors.py b/src/neofs_testlib/shell/command_inspectors.py new file mode 100644 index 0000000..9537549 --- /dev/null +++ b/src/neofs_testlib/shell/command_inspectors.py @@ -0,0 +1,13 @@ +from neofs_testlib.shell.interfaces import CommandInspector + + +class SudoInspector(CommandInspector): + """Prepends command with sudo. + + If command is already prepended with sudo, then has no effect. + """ + + def inspect(self, command: str) -> str: + if not command.startswith("sudo"): + return f"sudo {command}" + return command diff --git a/src/neofs_testlib/shell/interfaces.py b/src/neofs_testlib/shell/interfaces.py index 4d6e8ac..52e77a3 100644 --- a/src/neofs_testlib/shell/interfaces.py +++ b/src/neofs_testlib/shell/interfaces.py @@ -8,27 +8,46 @@ class InteractiveInput: """Interactive input for a shell command. Attributes: - prompt_pattern: regular expression that defines expected prompt from the command. - input: user input that should be supplied to the command in response to the prompt. + prompt_pattern: Regular expression that defines expected prompt from the command. + input: User input that should be supplied to the command in response to the prompt. """ prompt_pattern: str input: str +class CommandInspector(ABC): + """Interface of inspector that processes command text before execution.""" + + @abstractmethod + def inspect(self, command: str) -> str: + """Transforms command text and returns modified command. + + Args: + command: Command to transform with this inspector. + + Returns: + Transformed command text. + """ + + @dataclass class CommandOptions: """Options that control command execution. Attributes: - interactive_inputs: user inputs that should be interactively supplied to + interactive_inputs: User inputs that should be interactively supplied to the command during execution. - timeout: timeout for command execution (in seconds). - check: controls whether to check return code of the command. Set to False to + close_stdin: Controls whether stdin stream should be closed after feeding interactive + inputs or after requesting non-interactive command. If shell implementation does not + support this functionality, it should ignore this flag without raising an error. + timeout: Timeout for command execution (in seconds). + check: Controls whether to check return code of the command. Set to False to ignore non-zero return codes. """ interactive_inputs: Optional[list[InteractiveInput]] = None + close_stdin: bool = False timeout: int = 30 check: bool = True @@ -38,9 +57,9 @@ class CommandResult: """Represents a result of a command executed via shell. Attributes: - stdout: complete content of stdout stream. - stderr: complete content of stderr stream. - return_code: return code (or exit code) of the command's process. + stdout: Complete content of stdout stream. + stderr: Complete content of stderr stream. + return_code: Return code (or exit code) of the command's process. """ stdout: str diff --git a/src/neofs_testlib/shell/local_shell.py b/src/neofs_testlib/shell/local_shell.py index a329990..b20988c 100644 --- a/src/neofs_testlib/shell/local_shell.py +++ b/src/neofs_testlib/shell/local_shell.py @@ -7,7 +7,7 @@ from typing import IO, Optional import pexpect from neofs_testlib.reporter import get_reporter -from neofs_testlib.shell.interfaces import CommandOptions, CommandResult, Shell +from neofs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell logger = logging.getLogger("neofs.testlib.shell") reporter = get_reporter() @@ -16,10 +16,17 @@ reporter = get_reporter() class LocalShell(Shell): """Implements command shell on a local machine.""" + def __init__(self, command_inspectors: Optional[list[CommandInspector]] = None) -> None: + super().__init__() + self.command_inspectors = command_inspectors or [] + def exec(self, command: str, options: Optional[CommandOptions] = None) -> CommandResult: # If no options were provided, use default options options = options or CommandOptions() + for inspector in self.command_inspectors: + command = inspector.inspect(command) + logger.info(f"Executing command: {command}") if options.interactive_inputs: return self._exec_interactive(command, options) diff --git a/src/neofs_testlib/shell/ssh_shell.py b/src/neofs_testlib/shell/ssh_shell.py index 967fbbf..d56e2c4 100644 --- a/src/neofs_testlib/shell/ssh_shell.py +++ b/src/neofs_testlib/shell/ssh_shell.py @@ -19,7 +19,7 @@ from paramiko import ( from paramiko.ssh_exception import AuthenticationException from neofs_testlib.reporter import get_reporter -from neofs_testlib.shell.interfaces import CommandOptions, CommandResult, Shell +from neofs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell logger = logging.getLogger("neofs.testlib.shell") reporter = get_reporter() @@ -97,13 +97,16 @@ class SSHShell(Shell): private_key_path: Optional[str] = None, private_key_passphrase: Optional[str] = None, port: str = "22", + command_inspectors: Optional[list[CommandInspector]] = None, ) -> None: + super().__init__() self.host = host self.port = port self.login = login self.password = password self.private_key_path = private_key_path self.private_key_passphrase = private_key_passphrase + self.command_inspectors = command_inspectors or [] self.__connection: Optional[SSHClient] = None @property @@ -118,6 +121,9 @@ class SSHShell(Shell): def exec(self, command: str, options: Optional[CommandOptions] = None) -> CommandResult: options = options or CommandOptions() + for inspector in self.command_inspectors: + command = inspector.inspect(command) + if options.interactive_inputs: result = self._exec_interactive(command, options) else: @@ -125,8 +131,7 @@ class SSHShell(Shell): if options.check and result.return_code != 0: raise RuntimeError( - f"Command: {command}\nreturn code: {result.return_code}" - f"\nOutput: {result.stdout}" + f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}" ) return result @@ -141,7 +146,8 @@ class SSHShell(Shell): stdin.write(input) except OSError: logger.exception(f"Error while feeding {input} into command {command}") - # stdin.close() + if options.close_stdin: + stdin.close() # Wait for command to complete and flush its buffer before we attempt to read output sleep(self.DELAY_AFTER_EXIT) @@ -158,7 +164,10 @@ class SSHShell(Shell): @log_command def _exec_non_interactive(self, command: str, options: CommandOptions) -> CommandResult: try: - _, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout) + stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout) + + if options.close_stdin: + stdin.close() # Wait for command to complete and flush its buffer before we attempt to read output return_code = stdout.channel.recv_exit_status() From 13301fe52e78ebfb1db4bdfbeb81f8bd42e42797 Mon Sep 17 00:00:00 2001 From: Vladimir Domnich Date: Thu, 13 Oct 2022 12:26:46 +0000 Subject: [PATCH 015/363] Bump version 0.1.0 -> 0.2.0 Signed-off-by: Vladimir Domnich --- pyproject.toml | 4 ++-- src/neofs_testlib/__init__.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 78c88f5..b5d3407 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "neofs-testlib" -version = "0.1.0" +version = "0.2.0" description = "Building blocks and utilities to facilitate development of automated tests for NeoFS system" readme = "README.md" authors = [{ name = "NSPCC", email = "info@nspcc.ru" }] @@ -47,7 +47,7 @@ line-length = 100 target-version = ["py39"] [tool.bumpver] -current_version = "0.1.0" +current_version = "0.2.0" version_pattern = "MAJOR.MINOR.PATCH" commit_message = "Bump version {old_version} -> {new_version}" commit = false diff --git a/src/neofs_testlib/__init__.py b/src/neofs_testlib/__init__.py index 3dc1f76..d3ec452 100644 --- a/src/neofs_testlib/__init__.py +++ b/src/neofs_testlib/__init__.py @@ -1 +1 @@ -__version__ = "0.1.0" +__version__ = "0.2.0" From 64430486f1180dba3237f0dc5f3c00a92d91afaa Mon Sep 17 00:00:00 2001 From: anastasia prasolova Date: Sun, 16 Oct 2022 20:13:10 +0300 Subject: [PATCH 016/363] Add CODEOWNERS file Signed-off-by: anastasia prasolova --- .github/CODEOWNERS | 1 + 1 file changed, 1 insertion(+) create mode 100644 .github/CODEOWNERS diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000..c2a7a3b --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +* @aprasolova @vdomnich-yadro @dansingjulia @yadro-vavdeev From a79b608b4b78283c284d580198cf5585f1a68514 Mon Sep 17 00:00:00 2001 From: Vladimir Domnich Date: Mon, 17 Oct 2022 14:15:25 +0400 Subject: [PATCH 017/363] Fix hanging of ssh shell SSH shell was hanging while waiting for remote process exit code. The hanging occurs when stdout/stderr contain large amount of data. The fix changes how we read the data and how we wait for remote process's exit code. Signed-off-by: Vladimir Domnich --- src/neofs_testlib/shell/ssh_shell.py | 79 +++++++++++++++++++++++----- 1 file changed, 66 insertions(+), 13 deletions(-) diff --git a/src/neofs_testlib/shell/ssh_shell.py b/src/neofs_testlib/shell/ssh_shell.py index d56e2c4..4e918c1 100644 --- a/src/neofs_testlib/shell/ssh_shell.py +++ b/src/neofs_testlib/shell/ssh_shell.py @@ -4,10 +4,11 @@ import textwrap from datetime import datetime from functools import lru_cache, wraps from time import sleep -from typing import ClassVar, Optional +from typing import ClassVar, Optional, Tuple from paramiko import ( AutoAddPolicy, + Channel, ECDSAKey, Ed25519Key, PKey, @@ -38,7 +39,7 @@ def log_command(func): def wrapper(shell: "SSHShell", command: str, *args, **kwargs) -> CommandResult: command_info = command.removeprefix("$ProgressPreference='SilentlyContinue'\n") with reporter.step(command_info): - logging.info(f'Execute command "{command}" on "{shell.host}"') + logger.info(f'Execute command "{command}" on "{shell.host}"') start_time = datetime.utcnow() result = func(shell, command, *args, **kwargs) @@ -146,17 +147,17 @@ class SSHShell(Shell): stdin.write(input) except OSError: logger.exception(f"Error while feeding {input} into command {command}") + if options.close_stdin: stdin.close() + sleep(self.DELAY_AFTER_EXIT) - # Wait for command to complete and flush its buffer before we attempt to read output - sleep(self.DELAY_AFTER_EXIT) + decoded_stdout, decoded_stderr = self._read_channels(stdout.channel, stderr.channel) return_code = stdout.channel.recv_exit_status() - sleep(self.DELAY_AFTER_EXIT) result = CommandResult( - stdout=stdout.read().decode(errors="ignore"), - stderr=stderr.read().decode(errors="ignore"), + stdout=decoded_stdout, + stderr=decoded_stderr, return_code=return_code, ) return result @@ -169,13 +170,12 @@ class SSHShell(Shell): if options.close_stdin: stdin.close() - # Wait for command to complete and flush its buffer before we attempt to read output + decoded_stdout, decoded_stderr = self._read_channels(stdout.channel, stderr.channel) return_code = stdout.channel.recv_exit_status() - sleep(self.DELAY_AFTER_EXIT) return CommandResult( - stdout=stdout.read().decode(errors="ignore"), - stderr=stderr.read().decode(errors="ignore"), + stdout=decoded_stdout, + stderr=decoded_stderr, return_code=return_code, ) except ( @@ -190,13 +190,66 @@ class SSHShell(Shell): self._reset_connection() raise HostIsNotAvailable(self.host) from exc + def _read_channels( + self, + stdout: Channel, + stderr: Channel, + chunk_size: int = 4096, + ) -> Tuple[str, str]: + """Reads data from stdout/stderr channels. + + Reading channels is required before we wait for exit status of the remote process. + Otherwise waiting step will hang indefinitely, see the warning from paramiko docs: + # https://docs.paramiko.org/en/stable/api/channel.html#paramiko.channel.Channel.recv_exit_status + + Args: + stdout: Channel of stdout stream of the remote process. + stderr: Channel of stderr stream of the remote process. + chunk_size: Max size of data chunk that we read from channel at a time. + + Returns: + Tuple with stdout and stderr channels decoded into strings. + """ + # We read data in chunks + stdout_chunks = [] + stderr_chunks = [] + + # Read from channels (if data is ready) until process exits + while not stdout.exit_status_ready(): + if stdout.recv_ready(): + stdout_chunks.append(stdout.recv(chunk_size)) + if stderr.recv_stderr_ready(): + stderr_chunks.append(stderr.recv_stderr(chunk_size)) + + # Wait for command to complete and flush its buffer before we read final output + sleep(self.DELAY_AFTER_EXIT) + + # Read the remaining data from the channels: + # If channel returns empty data chunk, it means that all data has been read + while True: + data_chunk = stdout.recv(chunk_size) + if not data_chunk: + break + stdout_chunks.append(data_chunk) + while True: + data_chunk = stderr.recv_stderr(chunk_size) + if not data_chunk: + break + stderr_chunks.append(data_chunk) + + # Combine chunks and decode results into regular strings + full_stdout = b"".join(stdout_chunks) + full_stderr = b"".join(stderr_chunks) + + return (full_stdout.decode(errors="ignore"), full_stderr.decode(errors="ignore")) + def _create_connection(self, attempts: int = SSH_CONNECTION_ATTEMPTS) -> SSHClient: for attempt in range(attempts): connection = SSHClient() connection.set_missing_host_key_policy(AutoAddPolicy()) try: if self.private_key_path: - logging.info( + logger.info( f"Trying to connect to host {self.host} as {self.login} using SSH key " f"{self.private_key_path} (attempt {attempt})" ) @@ -208,7 +261,7 @@ class SSHShell(Shell): timeout=self.CONNECTION_TIMEOUT, ) else: - logging.info( + logger.info( f"Trying to connect to host {self.host} as {self.login} using password " f"(attempt {attempt})" ) From c05ed650c5585e620ff5ec0d8f9799c8b4a5a956 Mon Sep 17 00:00:00 2001 From: Vladimir Domnich Date: Mon, 17 Oct 2022 10:22:20 +0000 Subject: [PATCH 018/363] Bump version 0.2.0 -> 0.2.1 Signed-off-by: Vladimir Domnich --- pyproject.toml | 4 ++-- src/neofs_testlib/__init__.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index b5d3407..1581df3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "neofs-testlib" -version = "0.2.0" +version = "0.2.1" description = "Building blocks and utilities to facilitate development of automated tests for NeoFS system" readme = "README.md" authors = [{ name = "NSPCC", email = "info@nspcc.ru" }] @@ -47,7 +47,7 @@ line-length = 100 target-version = ["py39"] [tool.bumpver] -current_version = "0.2.0" +current_version = "0.2.1" version_pattern = "MAJOR.MINOR.PATCH" commit_message = "Bump version {old_version} -> {new_version}" commit = false diff --git a/src/neofs_testlib/__init__.py b/src/neofs_testlib/__init__.py index d3ec452..3ced358 100644 --- a/src/neofs_testlib/__init__.py +++ b/src/neofs_testlib/__init__.py @@ -1 +1 @@ -__version__ = "0.2.0" +__version__ = "0.2.1" From f907de52cffd795c335326da54151c7a6160cf8d Mon Sep 17 00:00:00 2001 From: Vladimir Domnich Date: Wed, 26 Oct 2022 09:43:12 +0300 Subject: [PATCH 019/363] Add versioning guide Signed-off-by: Vladimir Domnich --- CONTRIBUTING.md | 24 ++++++++++++++++++++++++ pyproject.toml | 2 +- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index da8ea41..53d9d6b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -188,6 +188,30 @@ If you would like to modify code of the library in the integration with your tes $ pip install -e ../neofs-testlib ``` +# Maintaining guide + +## Versioning +We follow [Semantic Versioning Specification](https://semver.org/) to version this library. To manage version number in the source code, we use [bumpver](https://pypi.org/project/bumpver/) package. + +To update a version of the library, please, take the following steps: +1. Make sure that your have no pending changes in git. +2. Run the following command to update version and commit it to git: + ```shell + $ bumpver update --major # to update major version + $ bumpver update --minor # to update minor version + $ bumpver update --patch # to update the patch component of the version + ``` +3. Sign-off the created commit: + ```shell + $ git commit --amend --signoff + ``` +4. Push the changes to remote. +5. After this commit is merged to upstream, create a tag on the master branch of upstream: + ```shell + $ git tag + $ git push upstream + ``` + ## Building and publishing package To build Python package of the library, please run the following command in the library root directory: ```shell diff --git a/pyproject.toml b/pyproject.toml index 1581df3..858f235 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -50,7 +50,7 @@ target-version = ["py39"] current_version = "0.2.1" version_pattern = "MAJOR.MINOR.PATCH" commit_message = "Bump version {old_version} -> {new_version}" -commit = false +commit = true tag = false push = false From aebec54495d1371bb10678fdc6a870d8092eb49b Mon Sep 17 00:00:00 2001 From: Vladimir Avdeev Date: Wed, 26 Oct 2022 12:40:57 +0300 Subject: [PATCH 020/363] Add keywords helpers Signed-off-by: Vladimir Avdeev --- README.md | 3 + pyproject.toml | 1 + requirements.txt | 1 + src/neofs_testlib/blockchain/__init__.py | 2 + src/neofs_testlib/blockchain/multisig.py | 53 ++++++ .../blockchain/role_designation.py | 156 ++++++++++++++++++ src/neofs_testlib/blockchain/rpc_client.py | 80 +++++++++ src/neofs_testlib/cli/cli_command.py | 16 +- src/neofs_testlib/cli/neogo/candidate.py | 71 +++++--- src/neofs_testlib/cli/neogo/contract.py | 149 ++++++++++++----- src/neofs_testlib/cli/neogo/go.py | 2 +- src/neofs_testlib/cli/neogo/nep17.py | 64 ++++--- src/neofs_testlib/cli/neogo/query.py | 10 +- src/neofs_testlib/cli/neogo/wallet.py | 46 +++--- src/neofs_testlib/shell/__init__.py | 2 +- src/neofs_testlib/utils/__init__.py | 0 src/neofs_testlib/utils/converters.py | 69 ++++++++ src/neofs_testlib/utils/wallet.py | 37 +++++ 18 files changed, 637 insertions(+), 125 deletions(-) create mode 100644 src/neofs_testlib/blockchain/__init__.py create mode 100644 src/neofs_testlib/blockchain/multisig.py create mode 100644 src/neofs_testlib/blockchain/role_designation.py create mode 100644 src/neofs_testlib/blockchain/rpc_client.py create mode 100644 src/neofs_testlib/utils/__init__.py create mode 100644 src/neofs_testlib/utils/converters.py create mode 100644 src/neofs_testlib/utils/wallet.py diff --git a/README.md b/README.md index ed28dfc..3cb43fb 100644 --- a/README.md +++ b/README.md @@ -83,10 +83,13 @@ Detailed information about registering entrypoints can be found at [setuptools d ## Library structure The library provides the following primary components: + * `blockchain` - Contains helpers that allow to interact with neo blockchain, smart contracts, gas transfers, etc. * `cli` - wrappers on top of neoFS command-line tools. These wrappers execute on a shell and provide type-safe interface for interacting with the tools. * `hosting` - management of infrastructure (docker, virtual machines, services where neoFS is hosted). The library provides host implementation for docker environment (when neoFS services are running as docker containers). Support for other hosts is provided via plugins. * `reporter` - abstraction on top of test reporting tool like Allure. Components of the library will report their steps and attach artifacts to the configured reporter instance. * `shell` - shells that can be used to execute commands. Currently library provides local shell (on machine that runs the code) or SSH shell that connects to a remote machine via SSH. + * `utils` - Support functions. + ## Contributing Any contributions to the library should conform to the [contribution guideline](https://github.com/nspcc-dev/neofs-testlib/blob/master/CONTRIBUTING.md). diff --git a/pyproject.toml b/pyproject.toml index 858f235..cb9ddb7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,6 +19,7 @@ dependencies = [ "allure-python-commons>=2.9.45", "docker>=4.4.0", "importlib_metadata>=5.0; python_version < '3.10'", + "neo-mamba==0.10.0", "paramiko>=2.10.3", "pexpect>=4.8.0", "requests>=2.28.0", diff --git a/requirements.txt b/requirements.txt index 294f406..adca8f9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,7 @@ allure-python-commons==2.9.45 docker==4.4.0 importlib_metadata==5.0.0 +neo-mamba==0.10.0 paramiko==2.10.3 pexpect==4.8.0 requests==2.28.1 diff --git a/src/neofs_testlib/blockchain/__init__.py b/src/neofs_testlib/blockchain/__init__.py new file mode 100644 index 0000000..006e8f1 --- /dev/null +++ b/src/neofs_testlib/blockchain/__init__.py @@ -0,0 +1,2 @@ +from neofs_testlib.blockchain.multisig import Multisig +from neofs_testlib.blockchain.rpc_client import RPCClient diff --git a/src/neofs_testlib/blockchain/multisig.py b/src/neofs_testlib/blockchain/multisig.py new file mode 100644 index 0000000..9dafd72 --- /dev/null +++ b/src/neofs_testlib/blockchain/multisig.py @@ -0,0 +1,53 @@ +from typing import List + +from neofs_testlib.cli import NeoGo + + +class Multisig: + def __init__(self, neogo: NeoGo, invoke_tx_file: str, block_period: int): + self.neogo = neogo + self.invoke_tx_file = invoke_tx_file + self.block_period = block_period + + def create_and_send( + self, + contract_hash: str, + contract_args: str, + multisig_hash: str, + wallets: List[str], + passwords: List[str], + address: str, + endpoint: str, + ) -> None: + if not len(wallets): + raise AttributeError("Got empty wallets list") + + self.neogo.contract.invokefunction( + address=address, + rpc_endpoint=endpoint, + wallet=wallets[0], + wallet_password=passwords[0], + out=None if len(wallets) == 1 else self.invoke_tx_file, + scripthash=contract_hash, + arguments=contract_args, + multisig_hash=multisig_hash, + ) + + if len(wallets) > 1: + # sign with rest of wallets except the last one + for wallet in wallets[1:-1]: + self.neogo.wallet.sign( + wallet=wallet, + input_file=self.invoke_tx_file, + out=self.invoke_tx_file, + address=address, + ) + + # sign tx with last wallet and push it to blockchain + self.neogo.wallet.sign( + wallet=wallets[-1], + input_file=self.invoke_tx_file, + out=self.invoke_tx_file, + address=address, + rpc_endpoint=endpoint, + ) diff --git a/src/neofs_testlib/blockchain/role_designation.py b/src/neofs_testlib/blockchain/role_designation.py new file mode 100644 index 0000000..a97438e --- /dev/null +++ b/src/neofs_testlib/blockchain/role_designation.py @@ -0,0 +1,156 @@ +import json +from time import sleep +from typing import List, Optional + +from cli import NeoGo +from shell import Shell +from utils.converters import process_b64_bytearray + +from neofs_testlib.blockchain import Multisig + + +class RoleDesignation: + def __init__( + self, + shell: Shell, + neo_go_exec_path: str, + block_period: int, + designate_contract: str, + ): + self.neogo = NeoGo(shell, neo_go_exec_path) + self.block_period = block_period + self.designate_contract = designate_contract + + def set_notary_nodes( + self, + addr: str, + pubkeys: List[str], + script_hash: str, + wallet: str, + passwd: str, + endpoint: str, + ) -> str: + keys = [f"bytes:{k}" for k in pubkeys] + keys_str = " ".join(keys) + out = self.neogo.contract.invokefunction( + address=addr, + scripthash=self.designate_contract, + wallet=wallet, + wallet_password=passwd, + rpc_endpoint=endpoint, + arguments=f"designateAsRole int:32 [ {keys_str} ] -- {script_hash}", + force=True, + ) + sleep(self.block_period) + return out.stdout.split(" ")[-1] + + def set_inner_ring( + self, + addr: str, + pubkeys: List[str], + script_hash: str, + wallet: str, + passwd: str, + endpoint: str, + ) -> str: + keys = [f"bytes:{k}" for k in pubkeys] + keys_str = " ".join(keys) + out = self.neogo.contract.invokefunction( + address=addr, + scripthash=self.designate_contract, + wallet=wallet, + wallet_password=passwd, + rpc_endpoint=endpoint, + arguments=f"designateAsRole int:16 [ {keys_str} ] -- {script_hash}", + force=True, + ) + sleep(self.block_period) + return out.stdout.split(" ")[-1] + + def set_oracles( + self, + addr: str, + pubkeys: List[str], + script_hash: str, + wallet: str, + passwd: str, + endpoint: str, + ) -> str: + keys = [f"bytes:{k}" for k in pubkeys] + keys_str = " ".join(keys) + out = self.neogo.contract.invokefunction( + address=addr, + scripthash=self.designate_contract, + wallet=wallet, + wallet_password=passwd, + rpc_endpoint=endpoint, + arguments=f"designateAsRole int:8 [ {keys_str} ] -- {script_hash}", + force=True, + ) + sleep(self.block_period) + return out.stdout.split(" ")[-1] + + def set_notary_nodes_multisig_tx( + self, + pubkeys: List[str], + script_hash: str, + wallets: List[str], + passwords: List[str], + address: str, + endpoint: str, + invoke_tx_file: str, + ) -> None: + keys = [f"bytes:{k}" for k in pubkeys] + keys_str = " ".join(keys) + multisig = Multisig( + self.neogo, invoke_tx_file=invoke_tx_file, block_period=self.block_period + ) + multisig.create_and_send( + self.designate_contract, + f"designateAsRole int:32 [ {keys_str} ]", + script_hash, + wallets, + passwords, + address, + endpoint, + ) + sleep(self.block_period) + + def set_inner_ring_multisig_tx( + self, + pubkeys: List[str], + script_hash: str, + wallets: List[str], + passwords: List[str], + address: str, + endpoint: str, + invoke_tx_file: str, + ) -> None: + keys = [f"bytes:{k}" for k in pubkeys] + keys_str = " ".join(keys) + multisig = Multisig( + self.neogo, invoke_tx_file=invoke_tx_file, block_period=self.block_period + ) + multisig.create_and_send( + self.designate_contract, + f"designateAsRole int:16 [ {keys_str} ]", + script_hash, + wallets, + passwords, + address, + endpoint, + ) + sleep(self.block_period) + + def check_candidates(self, contract_hash: str, endpoint: str) -> Optional[List[str]]: + out = self.neogo.contract.testinvokefunction( + scripthash=contract_hash, + method="innerRingCandidates", + rpc_endpoint=endpoint, + ) + output_dict = json.loads(out.stdout.replace("\n", "")) + candidates = output_dict["stack"][0]["value"] + if len(candidates) == 0: + return None + # TODO: return a list of keys + return [process_b64_bytearray(candidate["value"][0]["value"]) for candidate in candidates] diff --git a/src/neofs_testlib/blockchain/rpc_client.py b/src/neofs_testlib/blockchain/rpc_client.py new file mode 100644 index 0000000..b4e85c1 --- /dev/null +++ b/src/neofs_testlib/blockchain/rpc_client.py @@ -0,0 +1,80 @@ +import json +import logging +from typing import Any, Dict, List, Optional + +import requests + +logger = logging.getLogger("neofs.testlib.blockchain") + + +class NeoRPCException(Exception): + pass + + +class RPCClient: + def __init__(self, endpoint, timeout: int = 10): + self.endpoint = endpoint + self.timeout = timeout + + def get_raw_transaction(self, tx_hash): + return self._call_endpoint("getrawtransaction", params=[tx_hash]) + + def send_raw_transaction(self, raw_tx: str): + return self._call_endpoint("sendrawtransaction", params=[raw_tx]) + + def get_storage(self, sc_hash: str, storage_key: str): + return self._call_endpoint("getstorage", params=[sc_hash, storage_key]) + + def invoke_function( + self, + sc_hash: str, + function: str, + params: Optional[List] = None, + signers: Optional[List] = None, + ) -> Dict[str, Any]: + return self._call_endpoint( + "invokefunction", params=[sc_hash, function, params or [], signers or []] + ) + + def get_transaction_height(self, txid: str): + return self._call_endpoint("gettransactionheight", params=[txid]) + + def get_nep17_transfers(self, address, timestamps=None): + params = [address] + if timestamps: + params.append(timestamps) + return self._call_endpoint("getnep17transfers", params) + + def get_nep17_balances(self, address): + return self._call_endpoint("getnep17balances", [address, 0]) + + def get_application_log(self, tx_hash): + return self._call_endpoint("getapplicationlog", params=[tx_hash]) + + def get_contract_state(self, contract_id): + """ + `contract_id` might be contract name, script hash or number + """ + return self._call_endpoint("getcontractstate", params=[contract_id]) + + def _call_endpoint(self, method, params=None) -> Dict[str, Any]: + payload = _build_payload(method, params) + logger.info(payload) + try: + response = requests.post(self.endpoint, data=payload, timeout=self.timeout) + response.raise_for_status() + if response.status_code == 200: + if "result" in response.json(): + return response.json()["result"] + return response.json() + except Exception as exc: + raise NeoRPCException( + f"Could not call method {method} " + f"with endpoint: {self.endpoint}: {exc}" + f"\nRequest sent: {payload}" + ) from exc + + +def _build_payload(method, params: Optional[List] = None): + payload = json.dumps({"jsonrpc": "2.0", "method": method, "params": params or [], "id": 1}) + return payload.replace("'", '"') diff --git a/src/neofs_testlib/cli/cli_command.py b/src/neofs_testlib/cli/cli_command.py index 13268f2..772b9da 100644 --- a/src/neofs_testlib/cli/cli_command.py +++ b/src/neofs_testlib/cli/cli_command.py @@ -1,11 +1,12 @@ from typing import Optional -from neofs_testlib.shell import CommandResult, Shell +from neofs_testlib.shell import CommandOptions, CommandResult, InteractiveInput, Shell class CliCommand: WALLET_SOURCE_ERROR_MSG = "Provide either wallet or wallet_config to specify wallet location" + WALLET_PASSWD_ERROR_MSG = "Provide either wallet_password or wallet_config to specify password" cli_exec_path: Optional[str] = None __base_params: Optional[str] = None @@ -14,6 +15,8 @@ class CliCommand: "await_mode": "await", "hash_type": "hash", "doc_type": "type", + "to_address": "to", + "from_address": "from", } def __init__(self, shell: Shell, cli_exec_path: str, **base_params): @@ -26,6 +29,9 @@ class CliCommand: def _format_command(self, command: str, **params) -> str: param_str = [] for param, value in params.items(): + if param == "post_data": + param_str.append(value) + continue if param in self.map_params.keys(): param = self.map_params[param] param = param.replace("_", "-") @@ -56,3 +62,11 @@ class CliCommand: def _execute(self, command: Optional[str], **params) -> CommandResult: return self.shell.exec(self._format_command(command, **params)) + + def _execute_with_password(self, command: Optional[str], password, **params) -> CommandResult: + return self.shell.exec( + self._format_command(command, **params), + options=CommandOptions( + interactive_inputs=[InteractiveInput(prompt_pattern="assword", input=password)] + ), + ) diff --git a/src/neofs_testlib/cli/neogo/candidate.py b/src/neofs_testlib/cli/neogo/candidate.py index 50200bb..f5e4f33 100644 --- a/src/neofs_testlib/cli/neogo/candidate.py +++ b/src/neofs_testlib/cli/neogo/candidate.py @@ -11,6 +11,7 @@ class NeoGoCandidate(CliCommand): rpc_endpoint: str, wallet: Optional[str] = None, wallet_config: Optional[str] = None, + wallet_password: Optional[str] = None, gas: Optional[float] = None, timeout: int = 10, ) -> CommandResult: @@ -21,6 +22,7 @@ class NeoGoCandidate(CliCommand): wallet: Target location of the wallet file ('-' to read from stdin); conflicts with --wallet-config flag. wallet_config: Target location of the wallet config file; conflicts with --wallet flag. + wallet_password: Wallet password. gas: Network fee to add to the transaction (prioritizing it). rpc_endpoint: RPC node address. timeout: Timeout for the operation (default: 10s). @@ -29,15 +31,20 @@ class NeoGoCandidate(CliCommand): Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + exec_param = { + param: param_value + for param, param_value in locals().items() + if param not in ["self", "wallet_password"] + } + exec_param["timeout"] = f"{timeout}s" + if wallet_password is not None: + return self._execute_with_password( + "wallet candidate register", wallet_password, **exec_param + ) + if wallet_config: + return self._execute("wallet candidate register", **exec_param) - return self._execute( - "wallet candidate register", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) + raise Exception(self.WALLET_PASSWD_ERROR_MSG) def unregister( self, @@ -45,6 +52,7 @@ class NeoGoCandidate(CliCommand): rpc_endpoint: str, wallet: Optional[str] = None, wallet_config: Optional[str] = None, + wallet_password: Optional[str] = None, gas: Optional[float] = None, timeout: int = 10, ) -> CommandResult: @@ -55,6 +63,7 @@ class NeoGoCandidate(CliCommand): wallet: Target location of the wallet file ('-' to read from stdin); conflicts with --wallet-config flag. wallet_config: Target location of the wallet config file; conflicts with --wallet flag. + wallet_password: Wallet password. gas: Network fee to add to the transaction (prioritizing it). rpc_endpoint: RPC node address. timeout: Timeout for the operation (default: 10s). @@ -63,22 +72,29 @@ class NeoGoCandidate(CliCommand): Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + exec_param = { + param: param_value + for param, param_value in locals().items() + if param not in ["self", "wallet_password"] + } + exec_param["timeout"] = f"{timeout}s" + if wallet_password is not None: + return self._execute_with_password( + "wallet candidate unregister", wallet_password, **exec_param + ) + if wallet_config: + return self._execute("wallet candidate unregister", **exec_param) - return self._execute( - "wallet candidate unregister", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) + raise Exception(self.WALLET_PASSWD_ERROR_MSG) def vote( self, + address: str, candidate: str, rpc_endpoint: str, wallet: Optional[str] = None, wallet_config: Optional[str] = None, + wallet_password: Optional[str] = None, gas: Optional[float] = None, timeout: int = 10, ) -> CommandResult: @@ -88,10 +104,12 @@ class NeoGoCandidate(CliCommand): candidate argument to perform unvoting. Args: + address: Address to vote from candidate: Public key of candidate to vote for. wallet: Target location of the wallet file ('-' to read from stdin); conflicts with --wallet-config flag. wallet_config: Target location of the wallet config file; conflicts with --wallet flag. + wallet_password: Wallet password. gas: Network fee to add to the transaction (prioritizing it). rpc_endpoint: RPC node address. timeout: Timeout for the operation (default: 10s). @@ -100,12 +118,17 @@ class NeoGoCandidate(CliCommand): Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + exec_param = { + param: param_value + for param, param_value in locals().items() + if param not in ["self", "wallet_password"] + } + exec_param["timeout"] = f"{timeout}s" + if wallet_password is not None: + return self._execute_with_password( + "wallet candidate vote", wallet_password, **exec_param + ) + if wallet_config: + return self._execute("wallet candidate vote", **exec_param) - return self._execute( - "wallet candidate vote", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) + raise Exception(self.WALLET_PASSWD_ERROR_MSG) diff --git a/src/neofs_testlib/cli/neogo/contract.py b/src/neofs_testlib/cli/neogo/contract.py index 5329978..61f4edb 100644 --- a/src/neofs_testlib/cli/neogo/contract.py +++ b/src/neofs_testlib/cli/neogo/contract.py @@ -45,11 +45,12 @@ class NeoGoContract(CliCommand): self, address: str, input_file: str, - sysgas: float, manifest: str, rpc_endpoint: str, + sysgas: Optional[float] = None, wallet: Optional[str] = None, wallet_config: Optional[str] = None, + wallet_password: Optional[str] = None, gas: Optional[float] = None, out: Optional[str] = None, force: bool = False, @@ -62,6 +63,7 @@ class NeoGoContract(CliCommand): conflicts with wallet_config. wallet_config: Path to wallet config to use to get the key for transaction signing; conflicts with wallet. + wallet_password: Wallet password. address: Address to use as transaction signee (and gas source). gas: Network fee to add to the transaction (prioritizing it). sysgas: System fee to add to transaction (compensating for execution). @@ -77,15 +79,26 @@ class NeoGoContract(CliCommand): Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + exec_param = { + param: param_value + for param, param_value in locals().items() + if param not in ["self", "wallet_password"] + } + exec_param["timeout"] = f"{timeout}s" - return self._execute( - "contract deploy", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) + if wallet_password is not None: + return self._execute_with_password( + "contract deploy", + wallet_password, + **exec_param, + ) + if wallet_config: + return self._execute( + "contract deploy", + **exec_param, + ) + + raise Exception(self.WALLET_PASSWD_ERROR_MSG) def generate_wrapper( self, @@ -116,13 +129,14 @@ class NeoGoContract(CliCommand): def invokefunction( self, - address: str, scripthash: str, + address: Optional[str] = None, wallet: Optional[str] = None, method: Optional[str] = None, arguments: Optional[str] = None, multisig_hash: Optional[str] = None, wallet_config: Optional[str] = None, + wallet_password: Optional[str] = None, gas: Optional[float] = None, sysgas: Optional[float] = None, out: Optional[str] = None, @@ -147,6 +161,7 @@ class NeoGoContract(CliCommand): conflicts with wallet_config. wallet_config: Path to wallet config to use to get the key for transaction signing; conflicts with wallet. + wallet_password: Wallet password. address: Address to use as transaction signee (and gas source). gas: Network fee to add to the transaction (prioritizing it). sysgas: System fee to add to transaction (compensating for execution). @@ -158,21 +173,40 @@ class NeoGoContract(CliCommand): Returns: Command's result. """ + + assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + multisig_hash = f"-- {multisig_hash}" or "" - return self._execute( - "contract invokefunction " - f"{scripthash} {method or ''} {arguments or ''} {multisig_hash}", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self", "scripthash", "method", "arguments", "multisig_hash"] - }, - ) + post_data = f"{scripthash} {method or ''} {arguments or ''} {multisig_hash}" + exec_param = { + param: param_value + for param, param_value in locals().items() + if param + not in [ + "self", + "scripthash", + "method", + "arguments", + "multisig_hash", + "wallet_password", + ] + } + exec_param["timeout"] = f"{timeout}s" + exec_param["post_data"] = post_data + if wallet_password is not None: + return self._execute_with_password( + "contract invokefunction", wallet_password, **exec_param + ) + if wallet_config: + return self._execute("contract invokefunction", **exec_param) + + raise Exception(self.WALLET_PASSWD_ERROR_MSG) def testinvokefunction( self, scripthash: str, wallet: Optional[str] = None, + wallet_password: Optional[str] = None, method: Optional[str] = None, arguments: Optional[str] = None, multisig_hash: Optional[str] = None, @@ -192,6 +226,8 @@ class NeoGoContract(CliCommand): Args: scripthash: Function hash. + wallet: Wallet to use for testinvoke. + wallet_password: Wallet password. method: Call method. arguments: Method arguments. multisig_hash: Multisig hash. @@ -201,16 +237,29 @@ class NeoGoContract(CliCommand): Returns: Command's result. """ - multisig_hash = f"-- {multisig_hash}" or "" - return self._execute( - "contract testinvokefunction " - f"{scripthash} {method or ''} {arguments or ''} {multisig_hash}", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self", "scripthash", "method", "arguments", "multisig_hash"] - }, - ) + multisig_hash = f"-- {multisig_hash}" if multisig_hash else "" + post_data = f"{scripthash} {method or ''} {arguments or ''} {multisig_hash}" + exec_param = { + param: param_value + for param, param_value in locals().items() + if param + not in [ + "self", + "scripthash", + "method", + "arguments", + "multisig_hash", + "wallet_password", + ] + } + exec_param["timeout"] = f"{timeout}s" + exec_param["post_data"] = post_data + if wallet_password is not None: + return self._execute_with_password( + "contract testinvokefunction", wallet_password, **exec_param + ) + + return self._execute("contract testinvokefunction", **exec_param) def testinvokescript( self, @@ -231,13 +280,13 @@ class NeoGoContract(CliCommand): Returns: Command's result. """ + exec_param = { + param: param_value for param, param_value in locals().items() if param not in ["self"] + } + exec_param["timeout"] = f"{timeout}s" return self._execute( - f"contract testinvokescript", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + "contract testinvokescript", + **exec_param, ) def init(self, name: str, skip_details: bool = False) -> CommandResult: @@ -313,14 +362,18 @@ class NeoGoContract(CliCommand): address: str, wallet: Optional[str] = None, wallet_config: Optional[str] = None, + wallet_password: Optional[str] = None, sender: Optional[str] = None, nef: Optional[str] = None, ) -> CommandResult: """Adds group to the manifest. Args: - wallet: Wallet to use to get the key for transaction signing; conflicts with wallet_config. - wallet_config: Path to wallet config to use to get the key for transaction signing; conflicts with wallet. + wallet: Wallet to use to get the key for transaction signing; + conflicts with wallet_config. + wallet_config: Path to wallet config to use to get the key for transaction signing; + conflicts with wallet. + wallet_password: Wallet password. sender: Deploy transaction sender. address: Account to sign group with. nef: Path to the NEF file. @@ -329,11 +382,17 @@ class NeoGoContract(CliCommand): Returns: Command's result. """ - return self._execute( - "contract manifest add-group", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) + assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + exec_param = { + param: param_value + for param, param_value in locals().items() + if param not in ["self", "wallet_password"] + } + if wallet_password is not None: + return self._execute_with_password( + "contract manifest add-group", wallet_password, **exec_param + ) + if wallet_config: + return self._execute("contract manifest add-group", **exec_param) + + raise Exception(self.WALLET_PASSWD_ERROR_MSG) diff --git a/src/neofs_testlib/cli/neogo/go.py b/src/neofs_testlib/cli/neogo/go.py index 02aac73..5f216ce 100644 --- a/src/neofs_testlib/cli/neogo/go.py +++ b/src/neofs_testlib/cli/neogo/go.py @@ -24,7 +24,7 @@ class NeoGo: def __init__( self, shell: Shell, - neo_go_exec_path: Optional[str] = None, + neo_go_exec_path: str, config_path: Optional[str] = None, ): self.candidate = NeoGoCandidate(shell, neo_go_exec_path, config_path=config_path) diff --git a/src/neofs_testlib/cli/neogo/nep17.py b/src/neofs_testlib/cli/neogo/nep17.py index edd65eb..7cc00b6 100644 --- a/src/neofs_testlib/cli/neogo/nep17.py +++ b/src/neofs_testlib/cli/neogo/nep17.py @@ -29,14 +29,13 @@ class NeoGoNep17(CliCommand): Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG - + exec_param = { + param: param_value for param, param_value in locals().items() if param not in ["self"] + } + exec_param["timeout"] = f"{timeout}s" return self._execute( "wallet nep17 balance", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **exec_param, ) def import_token( @@ -63,14 +62,13 @@ class NeoGoNep17(CliCommand): Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG - + exec_param = { + param: param_value for param, param_value in locals().items() if param not in ["self"] + } + exec_param["timeout"] = f"{timeout}s" return self._execute( "wallet nep17 import", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **exec_param, ) def info( @@ -133,10 +131,11 @@ class NeoGoNep17(CliCommand): self, token: str, to_address: str, - sysgas: float, rpc_endpoint: str, + sysgas: Optional[float] = None, wallet: Optional[str] = None, wallet_config: Optional[str] = None, + wallet_password: Optional[str] = None, out: Optional[str] = None, from_address: Optional[str] = None, force: bool = False, @@ -156,6 +155,7 @@ class NeoGoNep17(CliCommand): wallet: Target location of the wallet file ('-' to read from stdin); conflicts with --wallet-config flag. wallet_config: Target location of the wallet config file; conflicts with --wallet flag. + wallet_password: Wallet password. out: File to put JSON transaction to. from_address: Address to send an asset from. to_address: Address to send an asset to. @@ -172,15 +172,26 @@ class NeoGoNep17(CliCommand): Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + exec_param = { + param: param_value + for param, param_value in locals().items() + if param not in ["self", "wallet_password"] + } + exec_param["timeout"] = f"{timeout}s" - return self._execute( - "wallet nep17 transfer", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) + if wallet_password is not None: + return self._execute_with_password( + "wallet nep17 transfer", + wallet_password, + **exec_param, + ) + if wallet_config: + return self._execute( + "wallet nep17 transfer", + **exec_param, + ) + + raise Exception(self.WALLET_PASSWD_ERROR_MSG) def multitransfer( self, @@ -219,12 +230,11 @@ class NeoGoNep17(CliCommand): Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG - + exec_param = { + param: param_value for param, param_value in locals().items() if param not in ["self"] + } + exec_param["timeout"] = f"{timeout}s" return self._execute( "wallet nep17 multitransfer", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **exec_param, ) diff --git a/src/neofs_testlib/cli/neogo/query.py b/src/neofs_testlib/cli/neogo/query.py index 6d93799..945cd6c 100644 --- a/src/neofs_testlib/cli/neogo/query.py +++ b/src/neofs_testlib/cli/neogo/query.py @@ -3,7 +3,7 @@ from neofs_testlib.shell import CommandResult class NeoGoQuery(CliCommand): - def candidates(self, rpc_endpoint: str, timeout: int = 10) -> CommandResult: + def candidates(self, rpc_endpoint: str, timeout: str = "10s") -> CommandResult: """Get candidates and votes. Args: @@ -22,7 +22,7 @@ class NeoGoQuery(CliCommand): }, ) - def committee(self, rpc_endpoint: str, timeout: int = 10) -> CommandResult: + def committee(self, rpc_endpoint: str, timeout: str = "10s") -> CommandResult: """Get committee list. Args: @@ -41,7 +41,7 @@ class NeoGoQuery(CliCommand): }, ) - def height(self, rpc_endpoint: str, timeout: int = 10) -> CommandResult: + def height(self, rpc_endpoint: str, timeout: str = "10s") -> CommandResult: """Get node height. Args: @@ -60,7 +60,7 @@ class NeoGoQuery(CliCommand): }, ) - def tx(self, tx_hash: str, rpc_endpoint: str, timeout: int = 10) -> CommandResult: + def tx(self, tx_hash: str, rpc_endpoint: str, timeout: str = "10s") -> CommandResult: """Query transaction status. Args: @@ -80,7 +80,7 @@ class NeoGoQuery(CliCommand): }, ) - def voter(self, rpc_endpoint: str, timeout: int = 10) -> CommandResult: + def voter(self, rpc_endpoint: str, timeout: str = "10s") -> CommandResult: """Print NEO holder account state. Args: diff --git a/src/neofs_testlib/cli/neogo/wallet.py b/src/neofs_testlib/cli/neogo/wallet.py index e327fb5..9e95a51 100644 --- a/src/neofs_testlib/cli/neogo/wallet.py +++ b/src/neofs_testlib/cli/neogo/wallet.py @@ -27,14 +27,13 @@ class NeoGoWallet(CliCommand): Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG - + exec_param = { + param: param_value for param, param_value in locals().items() if param not in ["self"] + } + exec_param["timeout"] = f"{timeout}s" return self._execute( "wallet claim", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **exec_param, ) def init( @@ -293,14 +292,13 @@ class NeoGoWallet(CliCommand): Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG - + exec_param = { + param: param_value for param, param_value in locals().items() if param not in ["self"] + } + exec_param["timeout"] = f"{timeout}s" return self._execute( "wallet import-deployed", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **exec_param, ) def remove( @@ -337,9 +335,10 @@ class NeoGoWallet(CliCommand): self, input_file: str, address: str, - rpc_endpoint: str, + rpc_endpoint: Optional[str] = None, wallet: Optional[str] = None, wallet_config: Optional[str] = None, + wallet_password: Optional[str] = None, out: Optional[str] = None, timeout: int = 10, ) -> CommandResult: @@ -356,6 +355,7 @@ class NeoGoWallet(CliCommand): wallet: Target location of the wallet file ('-' to read from stdin); conflicts with --wallet-config flag. wallet_config: Target location of the wallet config file; conflicts with --wallet flag. + wallet_password: Wallet password. out: File to put JSON transaction to. input_file: File with JSON transaction. address: Address to use. @@ -366,12 +366,16 @@ class NeoGoWallet(CliCommand): Command's result. """ assert bool(wallet) ^ bool(wallet_config), self.WALLET_SOURCE_ERROR_MSG + exec_param = { + param: param_value + for param, param_value in locals().items() + if param not in ["self", "wallet_password"] + } + exec_param["timeout"] = f"{timeout}s" + if wallet_password is not None: + return self._execute_with_password("wallet sign", wallet_password, **exec_param) - return self._execute( - "wallet sign", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) + if wallet_config: + return self._execute("wallet sign", **exec_param) + + raise Exception(self.WALLET_PASSWD_ERROR_MSG) diff --git a/src/neofs_testlib/shell/__init__.py b/src/neofs_testlib/shell/__init__.py index 3fd63bd..d0f22d6 100644 --- a/src/neofs_testlib/shell/__init__.py +++ b/src/neofs_testlib/shell/__init__.py @@ -1,3 +1,3 @@ -from neofs_testlib.shell.interfaces import CommandOptions, CommandResult, Shell +from neofs_testlib.shell.interfaces import CommandOptions, CommandResult, InteractiveInput, Shell from neofs_testlib.shell.local_shell import LocalShell from neofs_testlib.shell.ssh_shell import SSHShell diff --git a/src/neofs_testlib/utils/__init__.py b/src/neofs_testlib/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/neofs_testlib/utils/converters.py b/src/neofs_testlib/utils/converters.py new file mode 100644 index 0000000..64cef1a --- /dev/null +++ b/src/neofs_testlib/utils/converters.py @@ -0,0 +1,69 @@ +import base64 +import binascii +import json + +import base58 +from neo3 import wallet as neo3_wallet + + +def str_to_ascii_hex(input: str) -> str: + b = binascii.hexlify(input.encode()) + return str(b)[2:-1] + + +def ascii_hex_to_str(input: str) -> bytes: + return bytes.fromhex(input) + + +# Two functions below do parsing of Base64-encoded byte arrays which +# tests receive from Neo node RPC calls. + + +def process_b64_bytearray_reverse(data: str) -> bytes: + """ + This function decodes input data from base64, reverses the byte + array and returns its string representation. + """ + arr = bytearray(base64.standard_b64decode(data)) + arr.reverse() + return binascii.b2a_hex(arr) + + +def process_b64_bytearray(data: str) -> bytes: + """ + This function decodes input data from base64 and returns the + bytearray string representation. + """ + arr = bytearray(base64.standard_b64decode(data)) + return binascii.b2a_hex(arr) + + +def contract_hash_to_address(chash: str) -> str: + """ + This function accepts contract hash in BE, then translates in to LE, + prepends NEO wallet prefix and encodes to base58. It is equal to + `UInt160ToString` method in NEO implementations. + """ + be = bytearray(bytes.fromhex(chash)) + be.reverse() + return base58.b58encode_check(b"\x35" + bytes(be)).decode() + + +def get_contract_hash_from_manifest(manifest_path: str) -> str: + with open(manifest_path) as m: + data = json.load(m) + # cut off '0x' and return the hash + return data["abi"]["hash"][2:] + + +def get_wif_from_private_key(priv_key: bytes) -> str: + wif_version = b"\x80" + compressed_flag = b"\x01" + wif = base58.b58encode_check(wif_version + priv_key + compressed_flag) + return wif.decode("utf-8") + + +def load_wallet(path: str, passwd: str = "") -> neo3_wallet.Wallet: + with open(path, "r") as wallet_file: + wlt_data = wallet_file.read() + return neo3_wallet.Wallet.from_json(json.loads(wlt_data), password=passwd) diff --git a/src/neofs_testlib/utils/wallet.py b/src/neofs_testlib/utils/wallet.py new file mode 100644 index 0000000..656c41f --- /dev/null +++ b/src/neofs_testlib/utils/wallet.py @@ -0,0 +1,37 @@ +import json +import logging + +from neo3 import wallet as neo3_wallet + +logger = logging.getLogger("neofs.testlib.utils") + + +def init_wallet(wallet_path: str, wallet_password: str): + """ + Create new wallet and new account. + Args: + wallet_path: The path to the wallet to save wallet. + wallet_password: The password for new wallet. + """ + wallet = neo3_wallet.Wallet() + account = neo3_wallet.Account.create_new(wallet_password) + wallet.account_add(account) + with open(wallet_path, "w") as out: + json.dump(wallet.to_json(), out) + logger.info(f"Init new wallet: {wallet_path}, address: {account.address}") + + +def get_last_address_from_wallet(wallet_path: str, wallet_password: str): + """ + Extracting the last address from the given wallet. + Args: + wallet_path: The path to the wallet to extract address from. + wallet_password: The password for the given wallet. + Returns: + The address for the wallet. + """ + with open(wallet_path) as wallet_file: + wallet = neo3_wallet.Wallet.from_json(json.load(wallet_file), password=wallet_password) + address = wallet.accounts[-1].address + logger.info(f"got address: {address}") + return address \ No newline at end of file From b50c4cba7bf6e9542a3a55c7ae76ed5c49c94d64 Mon Sep 17 00:00:00 2001 From: Vladimir Avdeev Date: Wed, 26 Oct 2022 12:57:40 +0300 Subject: [PATCH 021/363] Bump version 0.2.1 -> 0.2.2 Signed-off-by: Vladimir Avdeev --- pyproject.toml | 4 ++-- src/neofs_testlib/__init__.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index cb9ddb7..064cd59 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "neofs-testlib" -version = "0.2.1" +version = "0.2.2" description = "Building blocks and utilities to facilitate development of automated tests for NeoFS system" readme = "README.md" authors = [{ name = "NSPCC", email = "info@nspcc.ru" }] @@ -48,7 +48,7 @@ line-length = 100 target-version = ["py39"] [tool.bumpver] -current_version = "0.2.1" +current_version = "0.2.2" version_pattern = "MAJOR.MINOR.PATCH" commit_message = "Bump version {old_version} -> {new_version}" commit = true diff --git a/src/neofs_testlib/__init__.py b/src/neofs_testlib/__init__.py index 3ced358..b5fdc75 100644 --- a/src/neofs_testlib/__init__.py +++ b/src/neofs_testlib/__init__.py @@ -1 +1 @@ -__version__ = "0.2.1" +__version__ = "0.2.2" From 9af8f89305fb014fe7bf40c7d65d9d49f5aa26c8 Mon Sep 17 00:00:00 2001 From: Vladimir Avdeev Date: Mon, 31 Oct 2022 12:31:28 +0300 Subject: [PATCH 022/363] Add storagegroup, session and sign in neofs_cli lib Signed-off-by: Vladimir Avdeev --- src/neofs_testlib/blockchain/multisig.py | 6 +- .../blockchain/role_designation.py | 22 +-- src/neofs_testlib/blockchain/rpc_client.py | 8 +- src/neofs_testlib/cli/cli_command.py | 2 + src/neofs_testlib/cli/neofs_cli/cli.py | 9 ++ src/neofs_testlib/cli/neofs_cli/container.py | 14 +- src/neofs_testlib/cli/neofs_cli/netmap.py | 6 +- src/neofs_testlib/cli/neofs_cli/object.py | 16 +- src/neofs_testlib/cli/neofs_cli/session.py | 41 +++++ .../cli/neofs_cli/storagegroup.py | 147 ++++++++++++++++++ src/neofs_testlib/cli/neofs_cli/util.py | 56 +++++++ 11 files changed, 290 insertions(+), 37 deletions(-) create mode 100644 src/neofs_testlib/cli/neofs_cli/session.py create mode 100644 src/neofs_testlib/cli/neofs_cli/storagegroup.py create mode 100644 src/neofs_testlib/cli/neofs_cli/util.py diff --git a/src/neofs_testlib/blockchain/multisig.py b/src/neofs_testlib/blockchain/multisig.py index 9dafd72..229f2a0 100644 --- a/src/neofs_testlib/blockchain/multisig.py +++ b/src/neofs_testlib/blockchain/multisig.py @@ -1,5 +1,3 @@ -from typing import List - from neofs_testlib.cli import NeoGo @@ -14,8 +12,8 @@ class Multisig: contract_hash: str, contract_args: str, multisig_hash: str, - wallets: List[str], - passwords: List[str], + wallets: list[str], + passwords: list[str], address: str, endpoint: str, ) -> None: diff --git a/src/neofs_testlib/blockchain/role_designation.py b/src/neofs_testlib/blockchain/role_designation.py index a97438e..cfbce29 100644 --- a/src/neofs_testlib/blockchain/role_designation.py +++ b/src/neofs_testlib/blockchain/role_designation.py @@ -1,6 +1,6 @@ import json from time import sleep -from typing import List, Optional +from typing import Optional from cli import NeoGo from shell import Shell @@ -24,7 +24,7 @@ class RoleDesignation: def set_notary_nodes( self, addr: str, - pubkeys: List[str], + pubkeys: list[str], script_hash: str, wallet: str, passwd: str, @@ -47,7 +47,7 @@ class RoleDesignation: def set_inner_ring( self, addr: str, - pubkeys: List[str], + pubkeys: list[str], script_hash: str, wallet: str, passwd: str, @@ -70,7 +70,7 @@ class RoleDesignation: def set_oracles( self, addr: str, - pubkeys: List[str], + pubkeys: list[str], script_hash: str, wallet: str, passwd: str, @@ -92,10 +92,10 @@ class RoleDesignation: def set_notary_nodes_multisig_tx( self, - pubkeys: List[str], + pubkeys: list[str], script_hash: str, - wallets: List[str], - passwords: List[str], + wallets: list[str], + passwords: list[str], address: str, endpoint: str, invoke_tx_file: str, @@ -118,10 +118,10 @@ class RoleDesignation: def set_inner_ring_multisig_tx( self, - pubkeys: List[str], + pubkeys: list[str], script_hash: str, - wallets: List[str], - passwords: List[str], + wallets: list[str], + passwords: list[str], address: str, endpoint: str, invoke_tx_file: str, @@ -142,7 +142,7 @@ class RoleDesignation: ) sleep(self.block_period) - def check_candidates(self, contract_hash: str, endpoint: str) -> Optional[List[str]]: + def check_candidates(self, contract_hash: str, endpoint: str) -> Optional[list[str]]: out = self.neogo.contract.testinvokefunction( scripthash=contract_hash, method="innerRingCandidates", diff --git a/src/neofs_testlib/blockchain/rpc_client.py b/src/neofs_testlib/blockchain/rpc_client.py index b4e85c1..0ca0212 100644 --- a/src/neofs_testlib/blockchain/rpc_client.py +++ b/src/neofs_testlib/blockchain/rpc_client.py @@ -1,6 +1,6 @@ import json import logging -from typing import Any, Dict, List, Optional +from typing import Any, Dict, Optional import requests @@ -29,8 +29,8 @@ class RPCClient: self, sc_hash: str, function: str, - params: Optional[List] = None, - signers: Optional[List] = None, + params: Optional[list] = None, + signers: Optional[list] = None, ) -> Dict[str, Any]: return self._call_endpoint( "invokefunction", params=[sc_hash, function, params or [], signers or []] @@ -75,6 +75,6 @@ class RPCClient: ) from exc -def _build_payload(method, params: Optional[List] = None): +def _build_payload(method, params: Optional[list] = None): payload = json.dumps({"jsonrpc": "2.0", "method": method, "params": params or [], "id": 1}) return payload.replace("'", '"') diff --git a/src/neofs_testlib/cli/cli_command.py b/src/neofs_testlib/cli/cli_command.py index 772b9da..d3ce86d 100644 --- a/src/neofs_testlib/cli/cli_command.py +++ b/src/neofs_testlib/cli/cli_command.py @@ -17,6 +17,8 @@ class CliCommand: "doc_type": "type", "to_address": "to", "from_address": "from", + "to_file": "to", + "from_file": "from", } def __init__(self, shell: Shell, cli_exec_path: str, **base_params): diff --git a/src/neofs_testlib/cli/neofs_cli/cli.py b/src/neofs_testlib/cli/neofs_cli/cli.py index c65d86f..d80c3e8 100644 --- a/src/neofs_testlib/cli/neofs_cli/cli.py +++ b/src/neofs_testlib/cli/neofs_cli/cli.py @@ -5,6 +5,9 @@ from neofs_testlib.cli.neofs_cli.acl import NeofsCliACL from neofs_testlib.cli.neofs_cli.container import NeofsCliContainer from neofs_testlib.cli.neofs_cli.netmap import NeofsCliNetmap from neofs_testlib.cli.neofs_cli.object import NeofsCliObject +from neofs_testlib.cli.neofs_cli.session import NeofsCliSession +from neofs_testlib.cli.neofs_cli.storagegroup import NeofsCliStorageGroup +from neofs_testlib.cli.neofs_cli.util import NeofsCliUtil from neofs_testlib.cli.neofs_cli.version import NeofsCliVersion from neofs_testlib.shell import Shell @@ -15,6 +18,9 @@ class NeofsCli: container: Optional[NeofsCliContainer] = None netmap: Optional[NeofsCliNetmap] = None object: Optional[NeofsCliObject] = None + session: Optional[NeofsCliSession] = None + storagegroup: Optional[NeofsCliStorageGroup] = None + util: Optional[NeofsCliUtil] = None version: Optional[NeofsCliVersion] = None def __init__(self, shell: Shell, neofs_cli_exec_path: str, config_file: Optional[str] = None): @@ -23,4 +29,7 @@ class NeofsCli: self.container = NeofsCliContainer(shell, neofs_cli_exec_path, config=config_file) self.netmap = NeofsCliNetmap(shell, neofs_cli_exec_path, config=config_file) self.object = NeofsCliObject(shell, neofs_cli_exec_path, config=config_file) + self.session = NeofsCliSession(shell, neofs_cli_exec_path, config=config_file) + self.storagegroup = NeofsCliStorageGroup(shell, neofs_cli_exec_path, config=config_file) + self.util = NeofsCliUtil(shell, neofs_cli_exec_path, config=config_file) self.version = NeofsCliVersion(shell, neofs_cli_exec_path, config=config_file) diff --git a/src/neofs_testlib/cli/neofs_cli/container.py b/src/neofs_testlib/cli/neofs_cli/container.py index 270d820..ca0b081 100644 --- a/src/neofs_testlib/cli/neofs_cli/container.py +++ b/src/neofs_testlib/cli/neofs_cli/container.py @@ -42,7 +42,7 @@ class NeofsCliContainer(CliCommand): subnet: String representation of container subnetwork. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Request X-Headers in form of Key=Value. + xhdr: Dict with request X-Headers. Returns: Command's result. @@ -77,7 +77,7 @@ class NeofsCliContainer(CliCommand): session: Path to a JSON-encoded container session token. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Request X-Headers in form of Key=Value. + xhdr: Dict with request X-Headers. Returns: Command's result. @@ -112,7 +112,7 @@ class NeofsCliContainer(CliCommand): to: Path to dump encoded container. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Request X-Headers in form of Key=Value. + xhdr: Dict with request X-Headers. Returns: Command's result. @@ -146,7 +146,7 @@ class NeofsCliContainer(CliCommand): session: Path to a JSON-encoded container session token. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Request X-Headers in form of Key=Value. + xhdr: Dict with request X-Headers. Returns: Command's result. @@ -176,7 +176,7 @@ class NeofsCliContainer(CliCommand): rpc_endpoint: Remote node address (as 'multiaddr' or ':'). ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Request X-Headers in form of Key=Value. + xhdr: Dict with request X-Headers. Returns: Command's result. @@ -204,7 +204,7 @@ class NeofsCliContainer(CliCommand): rpc_endpoint: Remote node address (as 'multiaddr' or ':'). ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Request X-Headers in form of Key=Value. + xhdr: Dict with request X-Headers. Returns: Command's result. @@ -239,7 +239,7 @@ class NeofsCliContainer(CliCommand): table: Path to file with JSON or binary encoded EACL table. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Request X-Headers in form of Key=Value. + xhdr: Dict with request X-Headers. Returns: Command's result. diff --git a/src/neofs_testlib/cli/neofs_cli/netmap.py b/src/neofs_testlib/cli/neofs_cli/netmap.py index 7144b8f..0100fc4 100644 --- a/src/neofs_testlib/cli/neofs_cli/netmap.py +++ b/src/neofs_testlib/cli/neofs_cli/netmap.py @@ -23,7 +23,7 @@ class NeofsCliNetmap(CliCommand): rpc_endpoint: Remote node address (as 'multiaddr' or ':'). ttl: TTL value in request meta header (default 2). wallet: Path to the wallet or binary key. - xhdr: Request X-Headers in form of Key=Value. + xhdr: Dict with request X-Headers. Returns: Command's result. @@ -81,7 +81,7 @@ class NeofsCliNetmap(CliCommand): rpc_endpoint: Remote node address (as 'multiaddr' or ':'). ttl: TTL value in request meta header (default 2). wallet: Path to the wallet or binary key. - xhdr: Request X-Headers in form of Key=Value. + xhdr: Dict with request X-Headers. Returns: Command's result. @@ -109,7 +109,7 @@ class NeofsCliNetmap(CliCommand): rpc_endpoint: Remote node address (as 'multiaddr' or ':'). ttl: TTL value in request meta header (default 2). wallet: Path to the wallet or binary key. - xhdr: Request X-Headers in form of Key=Value. + xhdr: Dict with request X-Headers. Returns: Command's result. diff --git a/src/neofs_testlib/cli/neofs_cli/object.py b/src/neofs_testlib/cli/neofs_cli/object.py index ba2cbce..0e54595 100644 --- a/src/neofs_testlib/cli/neofs_cli/object.py +++ b/src/neofs_testlib/cli/neofs_cli/object.py @@ -29,7 +29,7 @@ class NeofsCliObject(CliCommand): session: Path to a JSON-encoded container session token. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Request X-Headers in form of Key=Value. + xhdr: Dict with request X-Headers. Returns: Command's result. @@ -71,7 +71,7 @@ class NeofsCliObject(CliCommand): session: Path to a JSON-encoded container session token. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Request X-Headers in form of Key=Value. + xhdr: Dict with request X-Headers. Returns: Command's result. @@ -109,7 +109,7 @@ class NeofsCliObject(CliCommand): ttl: TTL value in request meta header (default 2). hash_type: Hash type. Either 'sha256' or 'tz' (default "sha256"). wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Request X-Headers in form of Key=Value. + xhdr: Dict with request X-Headers. Returns: Command's result. @@ -155,7 +155,7 @@ class NeofsCliObject(CliCommand): session: Path to a JSON-encoded container session token. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Request X-Headers in form of Key=Value. + xhdr: Dict with request X-Headers. Returns: Command's result. @@ -191,7 +191,7 @@ class NeofsCliObject(CliCommand): session: Path to a JSON-encoded container session token. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Request X-Headers in form of Key=Value. + xhdr: Dict with request X-Headers. Returns: Command's result. @@ -238,7 +238,7 @@ class NeofsCliObject(CliCommand): session: Path to a JSON-encoded container session token. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Request X-Headers in form of Key=Value. + xhdr: Dict with request X-Headers. Returns: Command's result. @@ -280,7 +280,7 @@ class NeofsCliObject(CliCommand): session: Path to a JSON-encoded container session token. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Request X-Headers in form of Key=Value. + xhdr: Dict with request X-Headers. Returns: Command's result. @@ -320,7 +320,7 @@ class NeofsCliObject(CliCommand): session: Path to a JSON-encoded container session token. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. - xhdr: Request X-Headers in form of Key=Value. + xhdr: Dict with request X-Headers. Returns: Command's result. diff --git a/src/neofs_testlib/cli/neofs_cli/session.py b/src/neofs_testlib/cli/neofs_cli/session.py new file mode 100644 index 0000000..4e33a7a --- /dev/null +++ b/src/neofs_testlib/cli/neofs_cli/session.py @@ -0,0 +1,41 @@ +from typing import Optional + +from neofs_testlib.cli.cli_command import CliCommand +from neofs_testlib.shell import CommandResult + + +class NeofsCliSession(CliCommand): + def create( + self, + rpc_endpoint: str, + wallet: str, + wallet_password: str, + out: str, + lifetime: Optional[int] = None, + address: Optional[str] = None, + json: Optional[bool] = False, + ) -> CommandResult: + """ + Create session token. + + Args: + address: Address of wallet account. + out: File to write session token to. + lifetime: Number of epochs for token to stay valid. + json: Output token in JSON. + wallet: WIF (NEP-2) string or path to the wallet or binary key. + wallet_password: Wallet password. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + + Returns: + Command's result. + """ + return self._execute_with_password( + "session create", + wallet_password, + **{ + param: value + for param, value in locals().items() + if param not in ["self", "wallet_password"] + }, + ) diff --git a/src/neofs_testlib/cli/neofs_cli/storagegroup.py b/src/neofs_testlib/cli/neofs_cli/storagegroup.py new file mode 100644 index 0000000..514abf5 --- /dev/null +++ b/src/neofs_testlib/cli/neofs_cli/storagegroup.py @@ -0,0 +1,147 @@ +from typing import Optional + +from neofs_testlib.cli.cli_command import CliCommand +from neofs_testlib.shell import CommandResult + + +class NeofsCliStorageGroup(CliCommand): + def put( + self, + rpc_endpoint: str, + wallet: str, + cid: str, + members: list[str], + ttl: Optional[int] = None, + bearer: Optional[str] = None, + lifetime: Optional[int] = None, + address: Optional[str] = None, + xhdr: Optional[dict] = None, + ) -> CommandResult: + """ + Put storage group to NeoFS. + + Args: + address: Address of wallet account. + bearer: File with signed JSON or binary encoded bearer token. + cid: Container ID. + members: ID list of storage group members. + lifetime: Storage group lifetime in epochs. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + ttl: TTL value in request meta header. + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Dict with request X-Headers. + + Returns: + Command's result. + """ + members = ",".join(members) + return self._execute( + "storagegroup put", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def get( + self, + rpc_endpoint: str, + wallet: str, + cid: str, + id: str, + raw: Optional[bool] = False, + ttl: Optional[int] = None, + bearer: Optional[str] = None, + lifetime: Optional[int] = None, + address: Optional[str] = None, + xhdr: Optional[dict] = None, + ) -> CommandResult: + """ + Get storage group from NeoFS. + + Args: + address: Address of wallet account. + bearer: File with signed JSON or binary encoded bearer token. + cid: Container ID. + id: Storage group identifier. + raw: Set raw request option. + lifetime: Storage group lifetime in epochs. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + ttl: TTL value in request meta header. + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Dict with request X-Headers. + + Returns: + Command's result. + """ + return self._execute( + "storagegroup get", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def list( + self, + rpc_endpoint: str, + wallet: str, + cid: str, + raw: Optional[bool] = False, + ttl: Optional[int] = None, + bearer: Optional[str] = None, + lifetime: Optional[int] = None, + address: Optional[str] = None, + xhdr: Optional[dict] = None, + ) -> CommandResult: + """ + List storage groups in NeoFS container. + + Args: + address: Address of wallet account. + bearer: File with signed JSON or binary encoded bearer token. + cid: Container ID. + raw: Set raw request option. + lifetime: Storage group lifetime in epochs. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + ttl: TTL value in request meta header. + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Dict with request X-Headers. + + Returns: + Command's result. + """ + return self._execute( + "storagegroup list", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def delete( + self, + rpc_endpoint: str, + wallet: str, + cid: str, + id: str, + raw: Optional[bool] = False, + ttl: Optional[int] = None, + bearer: Optional[str] = None, + lifetime: Optional[int] = None, + address: Optional[str] = None, + xhdr: Optional[dict] = None, + ) -> CommandResult: + """ + Delete storage group from NeoFS. + + Args: + address: Address of wallet account. + bearer: File with signed JSON or binary encoded bearer token. + cid: Container ID. + id: Storage group identifier. + raw: Set raw request option. + lifetime: Storage group lifetime in epochs. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + ttl: TTL value in request meta header. + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Dict with request X-Headers. + + Returns: + Command's result. + """ + return self._execute( + "storagegroup delete", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/neofs_testlib/cli/neofs_cli/util.py b/src/neofs_testlib/cli/neofs_cli/util.py new file mode 100644 index 0000000..786c156 --- /dev/null +++ b/src/neofs_testlib/cli/neofs_cli/util.py @@ -0,0 +1,56 @@ +from typing import Optional + +from neofs_testlib.cli.cli_command import CliCommand +from neofs_testlib.shell import CommandResult + + +class NeofsCliUtil(CliCommand): + def sign_bearer_token( + self, + wallet: str, + from_file: str, + to_file: str, + address: Optional[str] = None, + json: Optional[bool] = False, + ) -> CommandResult: + """ + Sign bearer token to use it in requests. + + Args: + address: Address of wallet account. + from_file: File with JSON or binary encoded bearer token to sign. + to_file: File to dump signed bearer token (default: binary encoded). + json: Dump bearer token in JSON encoding. + wallet: WIF (NEP-2) string or path to the wallet or binary key. + + Returns: + Command's result. + """ + return self._execute( + "util sign bearer-token", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def sign_session_token( + self, + wallet: str, + from_file: str, + to_file: str, + address: Optional[str] = None, + ) -> CommandResult: + """ + Sign session token to use it in requests. + + Args: + address: Address of wallet account. + from_file: File with JSON encoded session token to sign. + to_file: File to dump signed bearer token (default: binary encoded). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + + Returns: + Command's result. + """ + return self._execute( + "util sign session-token", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) From af10bbfea670d89fa0f9f5849288801e161b02fe Mon Sep 17 00:00:00 2001 From: Vladimir Avdeev Date: Mon, 31 Oct 2022 12:35:45 +0300 Subject: [PATCH 023/363] Bump version 0.2.2 -> 0.3.0 Signed-off-by: Vladimir Avdeev --- pyproject.toml | 4 ++-- src/neofs_testlib/__init__.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 064cd59..823fe0a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "neofs-testlib" -version = "0.2.2" +version = "0.3.0" description = "Building blocks and utilities to facilitate development of automated tests for NeoFS system" readme = "README.md" authors = [{ name = "NSPCC", email = "info@nspcc.ru" }] @@ -48,7 +48,7 @@ line-length = 100 target-version = ["py39"] [tool.bumpver] -current_version = "0.2.2" +current_version = "0.3.0" version_pattern = "MAJOR.MINOR.PATCH" commit_message = "Bump version {old_version} -> {new_version}" commit = true diff --git a/src/neofs_testlib/__init__.py b/src/neofs_testlib/__init__.py index b5fdc75..493f741 100644 --- a/src/neofs_testlib/__init__.py +++ b/src/neofs_testlib/__init__.py @@ -1 +1 @@ -__version__ = "0.2.2" +__version__ = "0.3.0" From dc0f115a73c318cac21bbc84348c4381e4dbf746 Mon Sep 17 00:00:00 2001 From: Vladimir Domnich Date: Wed, 2 Nov 2022 12:43:39 +0300 Subject: [PATCH 024/363] Update instructions for version tags Signed-off-by: Vladimir Domnich --- CONTRIBUTING.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 53d9d6b..c034916 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -206,10 +206,10 @@ To update a version of the library, please, take the following steps: $ git commit --amend --signoff ``` 4. Push the changes to remote. -5. After this commit is merged to upstream, create a tag on the master branch of upstream: +5. After this commit is merged to upstream, create a tag on the master branch of upstream. Tag name should be formatted as "v{new_version}": ```shell - $ git tag - $ git push upstream + $ git tag v + $ git push upstream v ``` ## Building and publishing package From c6603dbf84c32945dabbc8c097ff661b7631047f Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Thu, 3 Nov 2022 12:18:57 +0300 Subject: [PATCH 025/363] Add option to skip log output to logger Signed-off-by: Andrey Berezin --- src/neofs_testlib/shell/interfaces.py | 2 ++ src/neofs_testlib/shell/ssh_shell.py | 10 +++++++--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/src/neofs_testlib/shell/interfaces.py b/src/neofs_testlib/shell/interfaces.py index 52e77a3..ca02582 100644 --- a/src/neofs_testlib/shell/interfaces.py +++ b/src/neofs_testlib/shell/interfaces.py @@ -44,12 +44,14 @@ class CommandOptions: timeout: Timeout for command execution (in seconds). check: Controls whether to check return code of the command. Set to False to ignore non-zero return codes. + no_log: Do not print output to logger if True. """ interactive_inputs: Optional[list[InteractiveInput]] = None close_stdin: bool = False timeout: int = 30 check: bool = True + no_log: bool = False @dataclass diff --git a/src/neofs_testlib/shell/ssh_shell.py b/src/neofs_testlib/shell/ssh_shell.py index 4e918c1..2b0bae3 100644 --- a/src/neofs_testlib/shell/ssh_shell.py +++ b/src/neofs_testlib/shell/ssh_shell.py @@ -36,13 +36,15 @@ class HostIsNotAvailable(Exception): def log_command(func): @wraps(func) - def wrapper(shell: "SSHShell", command: str, *args, **kwargs) -> CommandResult: + def wrapper( + shell: "SSHShell", command: str, options: CommandOptions, *args, **kwargs + ) -> CommandResult: command_info = command.removeprefix("$ProgressPreference='SilentlyContinue'\n") with reporter.step(command_info): logger.info(f'Execute command "{command}" on "{shell.host}"') start_time = datetime.utcnow() - result = func(shell, command, *args, **kwargs) + result = func(shell, command, options, *args, **kwargs) end_time = datetime.utcnow() elapsed_time = end_time - start_time @@ -55,7 +57,9 @@ def log_command(func): f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}" ) - logger.info(log_message) + if not options.no_log: + logger.info(log_message) + reporter.attach(log_message, "SSH command.txt") return result From 8016ad4b862c57e53ce958d56078482d1502cf87 Mon Sep 17 00:00:00 2001 From: Vladimir Avdeev Date: Thu, 3 Nov 2022 17:03:06 +0300 Subject: [PATCH 026/363] Bump version 0.3.0 -> 0.4.0 Signed-off-by: Vladimir Avdeev --- pyproject.toml | 4 ++-- src/neofs_testlib/__init__.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 823fe0a..6e7e962 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "neofs-testlib" -version = "0.3.0" +version = "0.4.0" description = "Building blocks and utilities to facilitate development of automated tests for NeoFS system" readme = "README.md" authors = [{ name = "NSPCC", email = "info@nspcc.ru" }] @@ -48,7 +48,7 @@ line-length = 100 target-version = ["py39"] [tool.bumpver] -current_version = "0.3.0" +current_version = "0.4.0" version_pattern = "MAJOR.MINOR.PATCH" commit_message = "Bump version {old_version} -> {new_version}" commit = true diff --git a/src/neofs_testlib/__init__.py b/src/neofs_testlib/__init__.py index 493f741..6a9beea 100644 --- a/src/neofs_testlib/__init__.py +++ b/src/neofs_testlib/__init__.py @@ -1 +1 @@ -__version__ = "0.3.0" +__version__ = "0.4.0" From cbfcbb559cb2ff717a2afa863fbbbb6215fc1068 Mon Sep 17 00:00:00 2001 From: "a.lipay" Date: Wed, 9 Nov 2022 18:24:32 +0300 Subject: [PATCH 027/363] fix interactive ssh and tests Signed-off-by: a.lipay --- src/neofs_testlib/shell/ssh_shell.py | 4 +++- tests/test_ssh_shell.py | 11 ++++++----- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/src/neofs_testlib/shell/ssh_shell.py b/src/neofs_testlib/shell/ssh_shell.py index 2b0bae3..f4870b4 100644 --- a/src/neofs_testlib/shell/ssh_shell.py +++ b/src/neofs_testlib/shell/ssh_shell.py @@ -142,7 +142,9 @@ class SSHShell(Shell): @log_command def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult: - stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout) + stdin, stdout, stderr = self._connection.exec_command( + command, timeout=options.timeout, get_pty=True + ) for interactive_input in options.interactive_inputs: input = interactive_input.input if not input.endswith("\n"): diff --git a/tests/test_ssh_shell.py b/tests/test_ssh_shell.py index 849a2fd..a57b479 100644 --- a/tests/test_ssh_shell.py +++ b/tests/test_ssh_shell.py @@ -41,9 +41,8 @@ class TestSSHShellInteractive(TestCase): f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs) ) - # TODO: we have inconsistency with local shell here, ssh does not echo input into stdout self.assertEqual(0, result.return_code) - self.assertEqual(["Password:", "test"], get_output_lines(result)) + self.assertEqual(["Password: test", "test"], get_output_lines(result)) self.assertEqual("", result.stderr) def test_command_with_several_prompts(self): @@ -60,9 +59,10 @@ class TestSSHShellInteractive(TestCase): f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs) ) - # TODO: we have inconsistency with local shell here, ssh does not echo input into stdout self.assertEqual(0, result.return_code) - self.assertEqual(["Input1:", "test1", "Input2:", "test2"], get_output_lines(result)) + self.assertEqual( + ["Input1: test1", "test1", "Input2: test2", "test2"], get_output_lines(result) + ) self.assertEqual("", result.stderr) def test_invalid_command_with_check(self): @@ -73,7 +73,7 @@ class TestSSHShellInteractive(TestCase): self.shell.exec(f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs)) error = format_error_details(raised.exception) - self.assertIn("Error", error) + self.assertIn("SyntaxError", error) self.assertIn("return code: 1", error) def test_invalid_command_without_check(self): @@ -84,6 +84,7 @@ class TestSSHShellInteractive(TestCase): f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs, check=False), ) + self.assertIn("SyntaxError", result.stdout) self.assertEqual(1, result.return_code) def test_non_existing_binary(self): From 841a61fc305ed12852f4193de8dc1a4c944291fe Mon Sep 17 00:00:00 2001 From: Vladimir Avdeev Date: Thu, 10 Nov 2022 16:29:52 +0300 Subject: [PATCH 028/363] Add unit tests for utils Signed-off-by: Vladimir Avdeev --- src/neofs_testlib/utils/wallet.py | 2 +- tests/test_ulils.py | 74 +++++++++++++++++++++++++++++++ 2 files changed, 75 insertions(+), 1 deletion(-) create mode 100644 tests/test_ulils.py diff --git a/src/neofs_testlib/utils/wallet.py b/src/neofs_testlib/utils/wallet.py index 656c41f..9cd248b 100644 --- a/src/neofs_testlib/utils/wallet.py +++ b/src/neofs_testlib/utils/wallet.py @@ -34,4 +34,4 @@ def get_last_address_from_wallet(wallet_path: str, wallet_password: str): wallet = neo3_wallet.Wallet.from_json(json.load(wallet_file), password=wallet_password) address = wallet.accounts[-1].address logger.info(f"got address: {address}") - return address \ No newline at end of file + return address diff --git a/tests/test_ulils.py b/tests/test_ulils.py new file mode 100644 index 0000000..50cf5fc --- /dev/null +++ b/tests/test_ulils.py @@ -0,0 +1,74 @@ +import json +import os +from unittest import TestCase +from uuid import uuid4 + +from neo3 import wallet as neo3_wallet + +from neofs_testlib.utils import converters, wallet + + +class TestUtils(TestCase): + def test_converters_str_to_ascii_hex(self): + source_str = "" + result_str = "" + self.assertEqual(converters.str_to_ascii_hex(source_str), result_str) + + source_str = '"test_data" f0r ^convert*' + result_str = "22746573745f646174612220663072205e636f6e766572742a" + self.assertEqual(converters.str_to_ascii_hex(source_str), result_str) + + source_str = "" + result_bytes = b"" + self.assertEqual(converters.ascii_hex_to_str(source_str), result_bytes) + + source_str = "22746573745f646174612220663072205e636f6e766572742a" + result_bytes = b'"test_data" f0r ^convert*' + self.assertEqual(converters.ascii_hex_to_str(source_str), result_bytes) + + def test_process_b64_bytearray_reverse(self): + source_str = "" + result_bytes = b"" + self.assertEqual(converters.process_b64_bytearray_reverse(source_str), result_bytes) + + source_str = "InRlc3RfZGF0YSIgZjByIF5jb252ZXJ0Kg==" + result_bytes = b"2a747265766e6f635e207230662022617461645f7473657422" + self.assertEqual(converters.process_b64_bytearray_reverse(source_str), result_bytes) + + def test_process_b64_bytearray(self): + source_str = "" + result_bytes = b"" + self.assertEqual(converters.process_b64_bytearray(source_str), result_bytes) + + source_str = "InRlc3RfZGF0YSIgZjByIF5jb252ZXJ0Kg==" + result_bytes = b"22746573745f646174612220663072205e636f6e766572742a" + self.assertEqual(converters.process_b64_bytearray(source_str), result_bytes) + + def test_contract_hash_to_address(self): + source_str = "d01a381aae45f1ed181db9d554cc5ccc69c69f4e" + result_str = "NT5hJ5peVmvYdZCsFKUM5MTcEGw5TB4k89" + self.assertEqual(converters.contract_hash_to_address(source_str), result_str) + + def test_init_wallet(self): + wallet_file_path = f"{str(uuid4())}.json" + for password in ("", "password"): + wrong_password = "wrong_password" + wallet.init_wallet(wallet_file_path, password) + self.assertTrue(os.path.exists(wallet_file_path)) + with open(wallet_file_path, "r") as wallet_file: + neo3_wallet.Wallet.from_json(json.load(wallet_file), password=password) + with self.assertRaises(Exception): + neo3_wallet.Wallet.from_json(json.load(wallet_file), password=wrong_password) + os.unlink(wallet_file_path) + + def test_get_last_address_from_wallet(self): + wallet_file_path = f"{str(uuid4())}.json" + for password in ("", "password"): + wallet.init_wallet(wallet_file_path, password) + with open(wallet_file_path, "r") as wallet_file: + wlt = neo3_wallet.Wallet.from_json(json.load(wallet_file), password=password) + last_address = wlt.accounts[-1].address + self.assertEqual( + wallet.get_last_address_from_wallet(wallet_file_path, password), last_address + ) + os.unlink(wallet_file_path) From 3816a3c7f1086486c2d61ed55d68985fdb8946d1 Mon Sep 17 00:00:00 2001 From: Vladimir Avdeev Date: Tue, 8 Nov 2022 15:58:15 +0300 Subject: [PATCH 029/363] Add unit tests for cli helpers Signed-off-by: Vladimir Avdeev --- tests/test_cli.py | 174 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 174 insertions(+) create mode 100644 tests/test_cli.py diff --git a/tests/test_cli.py b/tests/test_cli.py new file mode 100644 index 0000000..f607121 --- /dev/null +++ b/tests/test_cli.py @@ -0,0 +1,174 @@ +from unittest import TestCase +from unittest.mock import Mock + +from neofs_testlib.cli import NeofsAdm, NeofsCli, NeoGo +from neofs_testlib.cli.cli_command import CliCommand +from neofs_testlib.shell.interfaces import CommandOptions, InteractiveInput + + +class TestCli(TestCase): + neofs_adm_exec_path = "neo-adm-exec" + neofs_go_exec_path = "neo-go-exec" + neofs_cli_exec_path = "neo-cli-exec" + + address = "0x0000000000000000000" + addresses = ["0x000000", "0xDEADBEEF", "0xBABECAFE"] + amount = 100 + file1 = "file_1" + file2 = "directory/file_2" + manifest = "manifest1" + token = "GAS" + rpc_endpoint = "endpoint-1" + sysgas: float = 0.001 + wallet = "wallet1" + wallet_password = "P@$$w0rd" + config_file = "config.yml" + basic_acl = "1FBFBFFF" + policy = "policy1" + timeout = 20 + xhdr = {"param1": "value1", "param2": "value2"} + + def test_container_create(self): + shell = Mock() + neofs_cli = NeofsCli( + config_file=self.config_file, + neofs_cli_exec_path=self.neofs_cli_exec_path, + shell=shell, + ) + neofs_cli.container.create( + rpc_endpoint=self.rpc_endpoint, + wallet=self.wallet, + basic_acl=self.basic_acl, + policy=self.policy, + await_mode=True, + xhdr=self.xhdr, + ) + + xhdr = ",".join(f"{param}={value}" for param, value in self.xhdr.items()) + expected_command = ( + f"{self.neofs_cli_exec_path} --config {self.config_file} container create " + f"--rpc-endpoint '{self.rpc_endpoint}' --wallet '{self.wallet}' " + f"--basic-acl '{self.basic_acl}' --await --policy '{self.policy}' " + f"--xhdr '{xhdr}'" + ) + + shell.exec.assert_called_once_with(expected_command) + + def test_bad_wallet_argument(self): + shell = Mock() + neo_go = NeoGo( + shell=shell, config_path=self.config_file, neo_go_exec_path=self.neofs_go_exec_path + ) + with self.assertRaises(Exception) as exc_msg: + neo_go.contract.add_group( + address=self.address, + manifest=self.manifest, + wallet_password=self.wallet_password, + ) + self.assertEqual(CliCommand.WALLET_SOURCE_ERROR_MSG, str(exc_msg.exception)) + + with self.assertRaises(Exception) as exc_msg: + neo_go.contract.add_group( + wallet=self.wallet, + wallet_password=self.wallet_password, + wallet_config=self.config_file, + address=self.address, + manifest=self.manifest, + ) + self.assertEqual(CliCommand.WALLET_SOURCE_ERROR_MSG, str(exc_msg.exception)) + + with self.assertRaises(Exception) as exc_msg: + neo_go.contract.add_group( + wallet=self.wallet, + address=self.address, + manifest=self.manifest, + ) + self.assertEqual(CliCommand.WALLET_PASSWD_ERROR_MSG, str(exc_msg.exception)) + + def test_wallet_sign(self): + shell = Mock() + neo_go = NeoGo( + shell=shell, config_path=self.config_file, neo_go_exec_path=self.neofs_go_exec_path + ) + neo_go.wallet.sign( + input_file=self.file1, + out=self.file2, + rpc_endpoint=self.rpc_endpoint, + address=self.address, + wallet=self.wallet, + wallet_password=self.wallet_password, + timeout=self.timeout, + ) + + expected_command = ( + f"{self.neofs_go_exec_path} --config_path {self.config_file} wallet sign " + f"--input-file '{self.file1}' --address '{self.address}' " + f"--rpc-endpoint '{self.rpc_endpoint}' --wallet '{self.wallet}' " + f"--out '{self.file2}' --timeout '{self.timeout}s'" + ) + + shell.exec.assert_called_once_with( + expected_command, + options=CommandOptions( + interactive_inputs=[ + InteractiveInput(prompt_pattern="assword", input=self.wallet_password) + ] + ), + ) + + def test_subnet_create(self): + shell = Mock() + neofs_adm = NeofsAdm( + config_file=self.config_file, + neofs_adm_exec_path=self.neofs_adm_exec_path, + shell=shell, + ) + neofs_adm.subnet.create( + address=self.address, + rpc_endpoint=self.rpc_endpoint, + wallet=self.wallet, + notary=True, + ) + + expected_command = ( + f"{self.neofs_adm_exec_path} --config {self.config_file} morph subnet create " + f"--rpc-endpoint '{self.rpc_endpoint}' --address '{self.address}' " + f"--wallet '{self.wallet}' --notary" + ) + + shell.exec.assert_called_once_with(expected_command) + + def test_wallet_nep17_multitransfer(self): + shell = Mock() + neo_go = NeoGo( + shell=shell, config_path=self.config_file, neo_go_exec_path=self.neofs_go_exec_path + ) + neo_go.nep17.multitransfer( + wallet=self.wallet, + token=self.token, + to_address=self.addresses, + sysgas=self.sysgas, + rpc_endpoint=self.rpc_endpoint, + amount=self.amount, + force=True, + from_address=self.address, + timeout=self.timeout, + ) + + to_address = "".join(f" --to '{address}'" for address in self.addresses) + expected_command = ( + f"{self.neofs_go_exec_path} --config_path {self.config_file} " + f"wallet nep17 multitransfer --token '{self.token}'" + f"{to_address} --sysgas '{self.sysgas}' --rpc-endpoint '{self.rpc_endpoint}' " + f"--wallet '{self.wallet}' --from '{self.address}' --force --amount {self.amount} " + f"--timeout '{self.timeout}s'" + ) + + shell.exec.assert_called_once_with(expected_command) + + def test_version(self): + shell = Mock() + neofs_adm = NeofsAdm(shell=shell, neofs_adm_exec_path=self.neofs_adm_exec_path) + neofs_adm.version.get() + + shell.exec.assert_called_once_with(f"{self.neofs_adm_exec_path} --version") From 7b8b286a14a500bc6bb34398342a71a7467e7f6f Mon Sep 17 00:00:00 2001 From: Vladimir Avdeev Date: Thu, 17 Nov 2022 10:14:29 +0300 Subject: [PATCH 030/363] Add unit tests for hosting Signed-off-by: Vladimir Avdeev --- tests/{test_ulils.py => test_converters.py} | 36 +------ tests/test_hosting.py | 113 ++++++++++++++++++++ tests/test_wallet.py | 38 +++++++ 3 files changed, 155 insertions(+), 32 deletions(-) rename tests/{test_ulils.py => test_converters.py} (55%) create mode 100644 tests/test_hosting.py create mode 100644 tests/test_wallet.py diff --git a/tests/test_ulils.py b/tests/test_converters.py similarity index 55% rename from tests/test_ulils.py rename to tests/test_converters.py index 50cf5fc..f453c42 100644 --- a/tests/test_ulils.py +++ b/tests/test_converters.py @@ -1,15 +1,10 @@ -import json -import os from unittest import TestCase -from uuid import uuid4 -from neo3 import wallet as neo3_wallet - -from neofs_testlib.utils import converters, wallet +from neofs_testlib.utils import converters -class TestUtils(TestCase): - def test_converters_str_to_ascii_hex(self): +class TestConverters(TestCase): + def test_str_to_ascii_hex(self): source_str = "" result_str = "" self.assertEqual(converters.str_to_ascii_hex(source_str), result_str) @@ -18,6 +13,7 @@ class TestUtils(TestCase): result_str = "22746573745f646174612220663072205e636f6e766572742a" self.assertEqual(converters.str_to_ascii_hex(source_str), result_str) + def test_ascii_hex_to_str(self): source_str = "" result_bytes = b"" self.assertEqual(converters.ascii_hex_to_str(source_str), result_bytes) @@ -48,27 +44,3 @@ class TestUtils(TestCase): source_str = "d01a381aae45f1ed181db9d554cc5ccc69c69f4e" result_str = "NT5hJ5peVmvYdZCsFKUM5MTcEGw5TB4k89" self.assertEqual(converters.contract_hash_to_address(source_str), result_str) - - def test_init_wallet(self): - wallet_file_path = f"{str(uuid4())}.json" - for password in ("", "password"): - wrong_password = "wrong_password" - wallet.init_wallet(wallet_file_path, password) - self.assertTrue(os.path.exists(wallet_file_path)) - with open(wallet_file_path, "r") as wallet_file: - neo3_wallet.Wallet.from_json(json.load(wallet_file), password=password) - with self.assertRaises(Exception): - neo3_wallet.Wallet.from_json(json.load(wallet_file), password=wrong_password) - os.unlink(wallet_file_path) - - def test_get_last_address_from_wallet(self): - wallet_file_path = f"{str(uuid4())}.json" - for password in ("", "password"): - wallet.init_wallet(wallet_file_path, password) - with open(wallet_file_path, "r") as wallet_file: - wlt = neo3_wallet.Wallet.from_json(json.load(wallet_file), password=password) - last_address = wlt.accounts[-1].address - self.assertEqual( - wallet.get_last_address_from_wallet(wallet_file_path, password), last_address - ) - os.unlink(wallet_file_path) diff --git a/tests/test_hosting.py b/tests/test_hosting.py new file mode 100644 index 0000000..17cacb4 --- /dev/null +++ b/tests/test_hosting.py @@ -0,0 +1,113 @@ +from unittest import TestCase + +from neofs_testlib.hosting import CLIConfig, Hosting, ServiceConfig + + +class TestHosting(TestCase): + SERVICE_NAME_PREFIX = "service" + HOST1_ADDRESS = "10.10.10.10" + HOST1_PLUGIN = "docker" + HOST1_ATTRIBUTES = {"param1": "value1"} + SERVICE1_ATTRIBUTES = {"rpc_endpoint": "service1_endpoint"} + HOST1_CLIS = [{"name": "cli1", "exec_path": "cli1.exe", "attributes": {"param1": "value1"}}] + SERVICE1 = {"name": f"{SERVICE_NAME_PREFIX}1", "attributes": SERVICE1_ATTRIBUTES} + HOST1_SERVICES = [SERVICE1] + HOST1 = { + "address": HOST1_ADDRESS, + "plugin_name": HOST1_PLUGIN, + "attributes": HOST1_ATTRIBUTES, + "clis": HOST1_CLIS, + "services": HOST1_SERVICES, + } + + HOST2_ADDRESS = "localhost" + HOST2_PLUGIN = "docker" + HOST2_ATTRIBUTES = {"param2": "value2"} + SERVICE2_ATTRIBUTES = {"rpc_endpoint": "service2_endpoint"} + SERVICE3_ATTRIBUTES = {"rpc_endpoint": "service3_endpoint"} + HOST2_CLIS = [{"name": "cli2", "exec_path": "/bin/cli", "attributes": {}}] + SERVICE2 = {"name": f"{SERVICE_NAME_PREFIX}", "attributes": SERVICE2_ATTRIBUTES} + SERVICE3 = {"name": f"text_before_{SERVICE_NAME_PREFIX}3", "attributes": SERVICE3_ATTRIBUTES} + HOST2_SERVICES = [SERVICE2, SERVICE3] + HOST2 = { + "address": HOST2_ADDRESS, + "plugin_name": HOST2_PLUGIN, + "attributes": HOST2_ATTRIBUTES, + "clis": HOST2_CLIS, + "services": HOST2_SERVICES, + } + HOSTING_CONFIG = {"hosts": [HOST1, HOST2]} + + def test_hosting_configure(self): + hosting = Hosting() + hosting.configure(self.HOSTING_CONFIG) + self.assertEqual(len(hosting.hosts), 2) + + def test_get_host_by_address(self): + hosting = Hosting() + hosting.configure(self.HOSTING_CONFIG) + + host1 = hosting.get_host_by_address(self.HOST1_ADDRESS) + self.assertEqual(host1.config.address, self.HOST1_ADDRESS) + self.assertEqual(host1.config.plugin_name, self.HOST1_PLUGIN) + self.assertDictEqual(host1.config.attributes, self.HOST1_ATTRIBUTES) + self.assertListEqual(host1.config.clis, [CLIConfig(**cli) for cli in self.HOST1_CLIS]) + self.assertListEqual( + host1.config.services, [ServiceConfig(**service) for service in self.HOST1_SERVICES] + ) + + host2 = hosting.get_host_by_address(self.HOST2_ADDRESS) + self.assertEqual(host2.config.address, self.HOST2_ADDRESS) + self.assertEqual(host2.config.plugin_name, self.HOST2_PLUGIN) + self.assertDictEqual(host2.config.attributes, self.HOST2_ATTRIBUTES) + self.assertListEqual(host2.config.clis, [CLIConfig(**cli) for cli in self.HOST2_CLIS]) + self.assertListEqual( + host2.config.services, [ServiceConfig(**service) for service in self.HOST2_SERVICES] + ) + + def test_get_host_by_service(self): + hosting = Hosting() + hosting.configure(self.HOSTING_CONFIG) + + host_with_service1 = hosting.get_host_by_service(self.SERVICE1["name"]) + host_with_service2 = hosting.get_host_by_service(self.SERVICE2["name"]) + host_with_service3 = hosting.get_host_by_service(self.SERVICE3["name"]) + + self.assertEqual(host_with_service1.config.address, self.HOST1_ADDRESS) + self.assertEqual(host_with_service2.config.address, self.HOST2_ADDRESS) + self.assertEqual(host_with_service3.config.address, self.HOST2_ADDRESS) + + def test_get_service_config(self): + hosting = Hosting() + hosting.configure(self.HOSTING_CONFIG) + + service1_config = hosting.get_service_config(self.SERVICE1["name"]) + service2_config = hosting.get_service_config(self.SERVICE2["name"]) + service3_config = hosting.get_service_config(self.SERVICE3["name"]) + + self.assertEqual(service1_config.name, self.SERVICE1["name"]) + self.assertDictEqual(service1_config.attributes, self.SERVICE1_ATTRIBUTES) + + self.assertEqual(service2_config.name, self.SERVICE2["name"]) + self.assertDictEqual(service2_config.attributes, self.SERVICE2_ATTRIBUTES) + + self.assertEqual(service3_config.name, self.SERVICE3["name"]) + self.assertDictEqual(service3_config.attributes, self.SERVICE3_ATTRIBUTES) + + def test_find_service_configs(self): + hosting = Hosting() + hosting.configure(self.HOSTING_CONFIG) + + all_services = hosting.find_service_configs(r".+") + self.assertEqual(len(all_services), 3) + + services = hosting.find_service_configs(rf"^{self.SERVICE_NAME_PREFIX}") + self.assertEqual(len(services), 2) + for service in services: + self.assertEqual( + service.name[: len(self.SERVICE_NAME_PREFIX)], self.SERVICE_NAME_PREFIX + ) + + service1 = hosting.find_service_configs(self.SERVICE1["name"]) + self.assertEqual(len(service1), 1) + self.assertDictEqual(service1[0].attributes, self.SERVICE1_ATTRIBUTES) diff --git a/tests/test_wallet.py b/tests/test_wallet.py new file mode 100644 index 0000000..b9352e9 --- /dev/null +++ b/tests/test_wallet.py @@ -0,0 +1,38 @@ +import json +import os +from unittest import TestCase +from uuid import uuid4 + +from neo3.wallet import Wallet + +from neofs_testlib.utils.wallet import init_wallet, get_last_address_from_wallet + + +class TestWallet(TestCase): + DEFAULT_PASSWORD = "password" + EMPTY_PASSWORD = "" + + def test_init_wallet(self): + wallet_file_path = f"{str(uuid4())}.json" + for password in (self.EMPTY_PASSWORD, self.DEFAULT_PASSWORD): + wrong_password = "wrong_password" + init_wallet(wallet_file_path, password) + self.assertTrue(os.path.exists(wallet_file_path)) + with open(wallet_file_path, "r") as wallet_file: + Wallet.from_json(json.load(wallet_file), password=password) + with self.assertRaises(ValueError): + with open(wallet_file_path, "r") as wallet_file: + Wallet.from_json(json.load(wallet_file), password=wrong_password) + os.unlink(wallet_file_path) + + def test_get_last_address_from_wallet(self): + wallet_file_path = f"{str(uuid4())}.json" + init_wallet(wallet_file_path, self.DEFAULT_PASSWORD) + with open(wallet_file_path, "r") as wallet_file: + wallet = Wallet.from_json(json.load(wallet_file), password=self.DEFAULT_PASSWORD) + last_address = wallet.accounts[-1].address + self.assertEqual( + get_last_address_from_wallet(wallet_file_path, self.DEFAULT_PASSWORD), + last_address, + ) + os.unlink(wallet_file_path) From 520cfe3bbaceb62c7d53cb2bab56e78fff4e5b6e Mon Sep 17 00:00:00 2001 From: "a.lipay" Date: Thu, 17 Nov 2022 06:53:28 +0300 Subject: [PATCH 031/363] Fix dump_keys function. Address argument is optional Signed-off-by: a.lipay --- src/neofs_testlib/cli/neogo/wallet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/neofs_testlib/cli/neogo/wallet.py b/src/neofs_testlib/cli/neogo/wallet.py index 9e95a51..c3a44b6 100644 --- a/src/neofs_testlib/cli/neogo/wallet.py +++ b/src/neofs_testlib/cli/neogo/wallet.py @@ -148,7 +148,7 @@ class NeoGoWallet(CliCommand): def dump_keys( self, - address: str, + address: Optional[str] = None, wallet: Optional[str] = None, wallet_config: Optional[str] = None, ) -> CommandResult: From 8b9706e777639682e27f475932d871c33fcc23d4 Mon Sep 17 00:00:00 2001 From: Vladimir Avdeev Date: Fri, 18 Nov 2022 09:49:17 +0300 Subject: [PATCH 032/363] Add wallet_password param for NeofsAuthmateSecret Signed-off-by: Vladimir Avdeev --- src/neofs_testlib/cli/neofs_authmate/secret.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/neofs_testlib/cli/neofs_authmate/secret.py b/src/neofs_testlib/cli/neofs_authmate/secret.py index 8826249..8c19eba 100644 --- a/src/neofs_testlib/cli/neofs_authmate/secret.py +++ b/src/neofs_testlib/cli/neofs_authmate/secret.py @@ -8,6 +8,7 @@ class NeofsAuthmateSecret(CliCommand): def obtain( self, wallet: str, + wallet_password: str, peer: str, gate_wallet: str, access_key_id: str, @@ -18,6 +19,7 @@ class NeofsAuthmateSecret(CliCommand): Args: wallet: Path to the wallet. + wallet_password: Wallet password. address: Address of wallet account. peer: Address of neofs peer to connect to. gate_wallet: Path to the wallet. @@ -27,8 +29,9 @@ class NeofsAuthmateSecret(CliCommand): Returns: Command's result. """ - return self._execute( + return self._execute_with_password( "obtain-secret", + wallet_password, **{ param: param_value for param, param_value in locals().items() @@ -39,6 +42,7 @@ class NeofsAuthmateSecret(CliCommand): def issue( self, wallet: str, + wallet_password: str, peer: str, bearer_rules: str, gate_public_key: str, @@ -55,6 +59,7 @@ class NeofsAuthmateSecret(CliCommand): Args: wallet: Path to the wallet. + wallet_password: Wallet password. address: Address of wallet account. peer: Address of a neofs peer to connect to. bearer_rules: Rules for bearer token as plain json string. @@ -75,8 +80,9 @@ class NeofsAuthmateSecret(CliCommand): Returns: Command's result. """ - return self._execute( + return self._execute_with_password( "issue-secret", + wallet_password, **{ param: param_value for param, param_value in locals().items() From 90c5d9a28ae2447d00fea7f9d41e544be1ad4a34 Mon Sep 17 00:00:00 2001 From: Vladimir Avdeev Date: Fri, 18 Nov 2022 10:17:34 +0300 Subject: [PATCH 033/363] Bump version 0.4.0 -> 0.5.0 Signed-off-by: Vladimir Avdeev --- pyproject.toml | 4 ++-- src/neofs_testlib/__init__.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 6e7e962..6b8d6e3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "neofs-testlib" -version = "0.4.0" +version = "0.5.0" description = "Building blocks and utilities to facilitate development of automated tests for NeoFS system" readme = "README.md" authors = [{ name = "NSPCC", email = "info@nspcc.ru" }] @@ -48,7 +48,7 @@ line-length = 100 target-version = ["py39"] [tool.bumpver] -current_version = "0.4.0" +current_version = "0.5.0" version_pattern = "MAJOR.MINOR.PATCH" commit_message = "Bump version {old_version} -> {new_version}" commit = true diff --git a/src/neofs_testlib/__init__.py b/src/neofs_testlib/__init__.py index 6a9beea..3d18726 100644 --- a/src/neofs_testlib/__init__.py +++ b/src/neofs_testlib/__init__.py @@ -1 +1 @@ -__version__ = "0.4.0" +__version__ = "0.5.0" From 9b00f8979882bc278c6c8d264cd76b7a333726f6 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 22 Nov 2022 13:42:13 +0300 Subject: [PATCH 034/363] Add expire-at flag to object lock command Signed-off-by: Andrey Berezin --- src/neofs_testlib/cli/neofs_cli/object.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/neofs_testlib/cli/neofs_cli/object.py b/src/neofs_testlib/cli/neofs_cli/object.py index 0e54595..5b25a79 100644 --- a/src/neofs_testlib/cli/neofs_cli/object.py +++ b/src/neofs_testlib/cli/neofs_cli/object.py @@ -171,7 +171,8 @@ class NeofsCliObject(CliCommand): wallet: str, cid: str, oid: str, - lifetime: int, + lifetime: Optional[int] = None, + expire_at: Optional[int] = None, address: Optional[str] = None, bearer: Optional[str] = None, session: Optional[str] = None, @@ -186,7 +187,8 @@ class NeofsCliObject(CliCommand): bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. oid: Object ID. - lifetime: Object lifetime. + lifetime: Lock lifetime. + expire_at: Lock expiration epoch. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). session: Path to a JSON-encoded container session token. ttl: TTL value in request meta header (default 2). From 04bc5a76fe701256541f6d0f3430293619eac01d Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 22 Nov 2022 16:21:50 +0300 Subject: [PATCH 035/363] Update session usages for object cli api Signed-off-by: Andrey Berezin --- src/neofs_testlib/cli/neofs_cli/object.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/src/neofs_testlib/cli/neofs_cli/object.py b/src/neofs_testlib/cli/neofs_cli/object.py index 5b25a79..ae847c9 100644 --- a/src/neofs_testlib/cli/neofs_cli/object.py +++ b/src/neofs_testlib/cli/neofs_cli/object.py @@ -26,7 +26,7 @@ class NeofsCliObject(CliCommand): cid: Container ID. oid: Object ID. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - session: Path to a JSON-encoded container session token. + session: Filepath to a JSON- or binary-encoded token of the object DELETE session. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. @@ -68,7 +68,7 @@ class NeofsCliObject(CliCommand): oid: Object ID. raw: Set raw request option. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - session: Path to a JSON-encoded container session token. + session: Filepath to a JSON- or binary-encoded token of the object GET session. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. @@ -92,6 +92,7 @@ class NeofsCliObject(CliCommand): range: Optional[str] = None, salt: Optional[str] = None, ttl: Optional[int] = None, + session: Optional[str] = None, hash_type: Optional[str] = None, xhdr: Optional[dict] = None, ) -> CommandResult: @@ -107,6 +108,7 @@ class NeofsCliObject(CliCommand): rpc_endpoint: Remote node address (as 'multiaddr' or ':'). salt: Salt in hex format. ttl: TTL value in request meta header (default 2). + session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session. hash_type: Hash type. Either 'sha256' or 'tz' (default "sha256"). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. @@ -152,7 +154,7 @@ class NeofsCliObject(CliCommand): proto: Marshal output in Protobuf. raw: Set raw request option. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - session: Path to a JSON-encoded container session token. + session: Filepath to a JSON- or binary-encoded token of the object HEAD session. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. @@ -190,7 +192,7 @@ class NeofsCliObject(CliCommand): lifetime: Lock lifetime. expire_at: Lock expiration epoch. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - session: Path to a JSON-encoded container session token. + session: Filepath to a JSON- or binary-encoded token of the object PUT session. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. @@ -237,7 +239,7 @@ class NeofsCliObject(CliCommand): notify: Object notification in the form of *epoch*:*topic*; '-' topic means using default. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - session: Path to a JSON-encoded container session token. + session: Filepath to a JSON- or binary-encoded token of the object PUT session. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. @@ -279,7 +281,7 @@ class NeofsCliObject(CliCommand): range: Range to take data from in the form offset:length. raw: Set raw request option. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - session: Path to a JSON-encoded container session token. + session: Filepath to a JSON- or binary-encoded token of the object RANGE session. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. @@ -319,7 +321,7 @@ class NeofsCliObject(CliCommand): phy: Search physically stored objects. root: Search for user objects. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). - session: Path to a JSON-encoded container session token. + session: Filepath to a JSON- or binary-encoded token of the object SEARCH session. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. From f3bd1e21628b578c989d9e32e357b51b12c1d3af Mon Sep 17 00:00:00 2001 From: Vladimir Avdeev Date: Tue, 22 Nov 2022 16:57:32 +0300 Subject: [PATCH 036/363] Bump version 0.5.0 -> 0.6.0 Signed-off-by: Vladimir Avdeev --- pyproject.toml | 4 ++-- src/neofs_testlib/__init__.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 6b8d6e3..f9b4caa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "neofs-testlib" -version = "0.5.0" +version = "0.6.0" description = "Building blocks and utilities to facilitate development of automated tests for NeoFS system" readme = "README.md" authors = [{ name = "NSPCC", email = "info@nspcc.ru" }] @@ -48,7 +48,7 @@ line-length = 100 target-version = ["py39"] [tool.bumpver] -current_version = "0.5.0" +current_version = "0.6.0" version_pattern = "MAJOR.MINOR.PATCH" commit_message = "Bump version {old_version} -> {new_version}" commit = true diff --git a/src/neofs_testlib/__init__.py b/src/neofs_testlib/__init__.py index 3d18726..906d362 100644 --- a/src/neofs_testlib/__init__.py +++ b/src/neofs_testlib/__init__.py @@ -1 +1 @@ -__version__ = "0.5.0" +__version__ = "0.6.0" From 977921cf499fa35f4c7ae49518ee3184e7ed58f7 Mon Sep 17 00:00:00 2001 From: "a.lipay" Date: Wed, 23 Nov 2022 19:21:38 +0300 Subject: [PATCH 037/363] Add cache_only param to delete_storage_node_data method Signed-off-by: a.lipay --- src/neofs_testlib/hosting/docker_host.py | 5 +++-- src/neofs_testlib/hosting/interfaces.py | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/neofs_testlib/hosting/docker_host.py b/src/neofs_testlib/hosting/docker_host.py index 34ebb87..ae97a32 100644 --- a/src/neofs_testlib/hosting/docker_host.py +++ b/src/neofs_testlib/hosting/docker_host.py @@ -116,7 +116,7 @@ class DockerHost(Host): timeout=service_attributes.stop_timeout, ) - def delete_storage_node_data(self, service_name: str) -> None: + def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None: service_attributes = self._get_service_attributes(service_name) client = self._get_docker_client() @@ -124,7 +124,8 @@ class DockerHost(Host): volume_path = volume_info["Mountpoint"] shell = self.get_shell() - shell.exec(f"rm -rf {volume_path}/*") + cmd = f"rm -rf {volume_path}/meta*" if cache_only else f"rm -rf {volume_path}/*" + shell.exec(cmd) def dump_logs( self, diff --git a/src/neofs_testlib/hosting/interfaces.py b/src/neofs_testlib/hosting/interfaces.py index b004689..50eda0d 100644 --- a/src/neofs_testlib/hosting/interfaces.py +++ b/src/neofs_testlib/hosting/interfaces.py @@ -101,11 +101,12 @@ class Host(ABC): """ @abstractmethod - def delete_storage_node_data(self, service_name: str) -> None: + def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None: """Erases all data of the storage node with specified name. Args: service_name: Name of storage node service. + cache_only: To delete cache only. """ @abstractmethod From b01a56f897a348a38ecda4879ad05a1d98c3477c Mon Sep 17 00:00:00 2001 From: "a.lipay" Date: Thu, 24 Nov 2022 14:18:41 +0300 Subject: [PATCH 038/363] Bump version 0.6.0 -> 0.7.0 Signed-off-by: a.lipay --- pyproject.toml | 4 ++-- src/neofs_testlib/__init__.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index f9b4caa..9e2b443 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "neofs-testlib" -version = "0.6.0" +version = "0.7.0" description = "Building blocks and utilities to facilitate development of automated tests for NeoFS system" readme = "README.md" authors = [{ name = "NSPCC", email = "info@nspcc.ru" }] @@ -48,7 +48,7 @@ line-length = 100 target-version = ["py39"] [tool.bumpver] -current_version = "0.6.0" +current_version = "0.7.0" version_pattern = "MAJOR.MINOR.PATCH" commit_message = "Bump version {old_version} -> {new_version}" commit = true diff --git a/src/neofs_testlib/__init__.py b/src/neofs_testlib/__init__.py index 906d362..49e0fc1 100644 --- a/src/neofs_testlib/__init__.py +++ b/src/neofs_testlib/__init__.py @@ -1 +1 @@ -__version__ = "0.6.0" +__version__ = "0.7.0" From 7c127d9d74da9e6a3156bf0a611b364f1ec6fedd Mon Sep 17 00:00:00 2001 From: Vladimir Avdeev Date: Mon, 5 Dec 2022 12:58:13 +0300 Subject: [PATCH 039/363] Bump version 0.7.0 -> 0.7.1 Signed-off-by: Vladimir Avdeev --- pyproject.toml | 4 ++-- src/neofs_testlib/__init__.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 9e2b443..2123390 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "neofs-testlib" -version = "0.7.0" +version = "0.7.1" description = "Building blocks and utilities to facilitate development of automated tests for NeoFS system" readme = "README.md" authors = [{ name = "NSPCC", email = "info@nspcc.ru" }] @@ -48,7 +48,7 @@ line-length = 100 target-version = ["py39"] [tool.bumpver] -current_version = "0.7.0" +current_version = "0.7.1" version_pattern = "MAJOR.MINOR.PATCH" commit_message = "Bump version {old_version} -> {new_version}" commit = true diff --git a/src/neofs_testlib/__init__.py b/src/neofs_testlib/__init__.py index 49e0fc1..a5f830a 100644 --- a/src/neofs_testlib/__init__.py +++ b/src/neofs_testlib/__init__.py @@ -1 +1 @@ -__version__ = "0.7.0" +__version__ = "0.7.1" From 647f89459d3f3996161bcf7a784e82bbfc551263 Mon Sep 17 00:00:00 2001 From: Vladimir Avdeev Date: Mon, 5 Dec 2022 23:14:17 +0300 Subject: [PATCH 040/363] Add neofs-cli control shards command Signed-off-by: Vladimir Avdeev --- src/neofs_testlib/cli/neofs_cli/cli.py | 3 + src/neofs_testlib/cli/neofs_cli/shards.py | 138 ++++++++++++++++++++++ 2 files changed, 141 insertions(+) create mode 100644 src/neofs_testlib/cli/neofs_cli/shards.py diff --git a/src/neofs_testlib/cli/neofs_cli/cli.py b/src/neofs_testlib/cli/neofs_cli/cli.py index d80c3e8..cfe8e5d 100644 --- a/src/neofs_testlib/cli/neofs_cli/cli.py +++ b/src/neofs_testlib/cli/neofs_cli/cli.py @@ -6,6 +6,7 @@ from neofs_testlib.cli.neofs_cli.container import NeofsCliContainer from neofs_testlib.cli.neofs_cli.netmap import NeofsCliNetmap from neofs_testlib.cli.neofs_cli.object import NeofsCliObject from neofs_testlib.cli.neofs_cli.session import NeofsCliSession +from neofs_testlib.cli.neofs_cli.shards import NeofsCliShards from neofs_testlib.cli.neofs_cli.storagegroup import NeofsCliStorageGroup from neofs_testlib.cli.neofs_cli.util import NeofsCliUtil from neofs_testlib.cli.neofs_cli.version import NeofsCliVersion @@ -19,6 +20,7 @@ class NeofsCli: netmap: Optional[NeofsCliNetmap] = None object: Optional[NeofsCliObject] = None session: Optional[NeofsCliSession] = None + shards: Optional[NeofsCliShards] = None storagegroup: Optional[NeofsCliStorageGroup] = None util: Optional[NeofsCliUtil] = None version: Optional[NeofsCliVersion] = None @@ -30,6 +32,7 @@ class NeofsCli: self.netmap = NeofsCliNetmap(shell, neofs_cli_exec_path, config=config_file) self.object = NeofsCliObject(shell, neofs_cli_exec_path, config=config_file) self.session = NeofsCliSession(shell, neofs_cli_exec_path, config=config_file) + self.shards = NeofsCliShards(shell, neofs_cli_exec_path, config=config_file) self.storagegroup = NeofsCliStorageGroup(shell, neofs_cli_exec_path, config=config_file) self.util = NeofsCliUtil(shell, neofs_cli_exec_path, config=config_file) self.version = NeofsCliVersion(shell, neofs_cli_exec_path, config=config_file) diff --git a/src/neofs_testlib/cli/neofs_cli/shards.py b/src/neofs_testlib/cli/neofs_cli/shards.py new file mode 100644 index 0000000..dd57827 --- /dev/null +++ b/src/neofs_testlib/cli/neofs_cli/shards.py @@ -0,0 +1,138 @@ +from typing import Optional + +from neofs_testlib.cli.cli_command import CliCommand +from neofs_testlib.shell import CommandResult + + +class NeofsCliShards(CliCommand): + def flush_cache( + self, + endpoint: str, + wallet: str, + wallet_password: str, + id: Optional[list[str]], + address: Optional[str] = None, + all: bool = False, + ) -> CommandResult: + """ + Flush objects from the write-cache to the main storage. + + Args: + address: Address of wallet account. + id: List of shard IDs in base58 encoding. + all: Process all shards. + endpoint: Remote node address (as 'multiaddr' or ':'). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + wallet_password: Wallet password. + + Returns: + Command's result. + """ + return self._execute_with_password( + f"control shards flush-cache", + wallet_password, + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def set_mode( + self, + endpoint: str, + wallet: str, + wallet_password: str, + mode: str, + id: Optional[list[str]], + address: Optional[str] = None, + all: bool = False, + clear_errors: bool = False, + ) -> CommandResult: + """ + Set work mode of the shard. + + Args: + address: Address of wallet account. + id: List of shard IDs in base58 encoding. + mode: New shard mode ('degraded-read-only', 'read-only', 'read-write'). + all: Process all shards. + clear_errors: Set shard error count to 0. + endpoint: Remote node address (as 'multiaddr' or ':'). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + wallet_password: Wallet password. + + Returns: + Command's result. + """ + return self._execute_with_password( + f"control shards set-mode", + wallet_password, + **{ + param: value + for param, value in locals().items() + if param not in ["self", "wallet_password"] + }, + ) + + def dump( + self, + endpoint: str, + wallet: str, + wallet_password: str, + id: str, + path: str, + address: Optional[str] = None, + no_errors: bool = False, + ) -> CommandResult: + """ + Dump objects from shard to a file. + + Args: + address: Address of wallet account. + no_errors: Skip invalid/unreadable objects. + id: Shard ID in base58 encoding. + path: File to write objects to. + endpoint: Remote node address (as 'multiaddr' or ':'). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + wallet_password: Wallet password. + + Returns: + Command's result. + """ + return self._execute_with_password( + f"control shards dump", + wallet_password, + **{ + param: value + for param, value in locals().items() + if param not in ["self", "wallet_password"] + }, + ) + + def list( + self, + endpoint: str, + wallet: str, + wallet_password: str, + address: Optional[str] = None, + json_mode: bool = False, + ) -> CommandResult: + """ + List shards of the storage node. + + Args: + address: Address of wallet account. + json_mode: Print shard info as a JSON array. + endpoint: Remote node address (as 'multiaddr' or ':'). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + wallet_password: Wallet password. + + Returns: + Command's result. + """ + return self._execute_with_password( + f"control shards list", + wallet_password, + **{ + param: value + for param, value in locals().items() + if param not in ["self", "wallet_password"] + }, + ) From 631a62eecfac7b632e219aa8c1231d131aa6f60a Mon Sep 17 00:00:00 2001 From: Vladimir Avdeev Date: Tue, 6 Dec 2022 09:28:50 +0300 Subject: [PATCH 041/363] Bump version 0.7.1 -> 0.8.0 Signed-off-by: Vladimir Avdeev --- pyproject.toml | 4 ++-- src/neofs_testlib/__init__.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 2123390..e8591e5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "neofs-testlib" -version = "0.7.1" +version = "0.8.0" description = "Building blocks and utilities to facilitate development of automated tests for NeoFS system" readme = "README.md" authors = [{ name = "NSPCC", email = "info@nspcc.ru" }] @@ -48,7 +48,7 @@ line-length = 100 target-version = ["py39"] [tool.bumpver] -current_version = "0.7.1" +current_version = "0.8.0" version_pattern = "MAJOR.MINOR.PATCH" commit_message = "Bump version {old_version} -> {new_version}" commit = true diff --git a/src/neofs_testlib/__init__.py b/src/neofs_testlib/__init__.py index a5f830a..777f190 100644 --- a/src/neofs_testlib/__init__.py +++ b/src/neofs_testlib/__init__.py @@ -1 +1 @@ -__version__ = "0.7.1" +__version__ = "0.8.0" From efdc7cf90a81a8f74bf6c4df9cdc2bfa9d232fef Mon Sep 17 00:00:00 2001 From: Vladimir Avdeev Date: Thu, 8 Dec 2022 10:42:45 +0300 Subject: [PATCH 042/363] Remove retry command execute for noninteractive Shell.exec() Signed-off-by: Vladimir Avdeev --- src/neofs_testlib/shell/local_shell.py | 81 +++++++++----------------- tests/test_local_shell.py | 2 +- 2 files changed, 27 insertions(+), 56 deletions(-) diff --git a/src/neofs_testlib/shell/local_shell.py b/src/neofs_testlib/shell/local_shell.py index b20988c..f16339f 100644 --- a/src/neofs_testlib/shell/local_shell.py +++ b/src/neofs_testlib/shell/local_shell.py @@ -35,53 +35,35 @@ class LocalShell(Shell): def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult: start_time = datetime.utcnow() log_file = tempfile.TemporaryFile() # File is reliable cross-platform way to capture output - result = None - command_process = None try: command_process = pexpect.spawn(command, timeout=options.timeout) - command_process.delaybeforesend = 1 - command_process.logfile_read = log_file + except (pexpect.ExceptionPexpect, OSError) as exc: + raise RuntimeError(f"Command: {command}") from exc + command_process.delaybeforesend = 1 + command_process.logfile_read = log_file + + try: for interactive_input in options.interactive_inputs: command_process.expect(interactive_input.prompt_pattern) command_process.sendline(interactive_input.input) - - result = self._get_pexpect_process_result(command_process, command) - if options.check and result.return_code != 0: - raise RuntimeError( - f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}" - ) - - return result - except pexpect.ExceptionPexpect as exc: - result = self._get_pexpect_process_result(command_process, command) - message = ( - f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}" - ) + except (pexpect.ExceptionPexpect, OSError) as exc: if options.check: - raise RuntimeError(message) from exc - else: - logger.exception(message) - return result - except OSError as exc: - result = self._get_pexpect_process_result(command_process, command) - message = ( - f"Command: {command}\nreturn code: {result.return_code}\nOutput: {exc.strerror}" - ) - if options.check: - raise RuntimeError(message) from exc - else: - logger.exception(message) - return result - except Exception: - result = self._get_pexpect_process_result(command_process, command) - raise + raise RuntimeError(f"Command: {command}") from exc finally: + result = self._get_pexpect_process_result(command_process) log_file.close() end_time = datetime.utcnow() self._report_command_result(command, start_time, end_time, result) + if options.check and result.return_code != 0: + raise RuntimeError( + f"Command: {command}\nreturn code: {result.return_code}\n" + f"Output: {result.stdout}" + ) + return result + def _exec_non_interactive(self, command: str, options: CommandOptions) -> CommandResult: start_time = datetime.utcnow() result = None @@ -99,13 +81,16 @@ class LocalShell(Shell): result = CommandResult( stdout=command_process.stdout or "", - stderr=command_process.stderr or "", + stderr="", return_code=command_process.returncode, ) - return result except subprocess.CalledProcessError as exc: # TODO: always set check flag to false and capture command result normally - result = self._get_failing_command_result(command) + result = CommandResult( + stdout=exc.stdout or "", + stderr="", + return_code=exc.returncode, + ) raise RuntimeError( f"Command: {command}\nError:\n" f"return code: {exc.returncode}\n" @@ -113,29 +98,15 @@ class LocalShell(Shell): ) from exc except OSError as exc: raise RuntimeError(f"Command: {command}\nOutput: {exc.strerror}") from exc - except Exception as exc: - result = self._get_failing_command_result(command) - raise finally: end_time = datetime.utcnow() self._report_command_result(command, start_time, end_time, result) + return result - def _get_failing_command_result(self, command: str) -> CommandResult: - return_code, cmd_output = subprocess.getstatusoutput(command) - return CommandResult(stdout=cmd_output, stderr="", return_code=return_code) - - def _get_pexpect_process_result( - self, command_process: Optional[pexpect.spawn], command: str - ) -> CommandResult: - """Captures output of the process. - - If command process is not None, captures output of this process. - If command process is None, then command fails when we attempt to start it, in this case - we use regular non-interactive process to get it's output. + def _get_pexpect_process_result(self, command_process: pexpect.spawn) -> CommandResult: + """ + Captures output of the process. """ - if command_process is None: - return self._get_failing_command_result(command) - # Wait for child process to end it's work if command_process.isalive(): command_process.expect(pexpect.EOF) diff --git a/tests/test_local_shell.py b/tests/test_local_shell.py index f9c2d89..de1e22f 100644 --- a/tests/test_local_shell.py +++ b/tests/test_local_shell.py @@ -72,7 +72,7 @@ class TestLocalShellInteractive(TestCase): self.shell.exec("not-a-command", CommandOptions(interactive_inputs=inputs)) error = format_error_details(exc.exception) - self.assertIn("return code: 127", error) + self.assertIn("The command was not found", error) class TestLocalShellNonInteractive(TestCase): From 5aeafecf39285939359fe3a2a9850771d25f63eb Mon Sep 17 00:00:00 2001 From: Vladimir Avdeev Date: Thu, 8 Dec 2022 12:12:46 +0300 Subject: [PATCH 043/363] Bump version 0.8.0 -> 0.8.1 Signed-off-by: Vladimir Avdeev --- pyproject.toml | 4 ++-- src/neofs_testlib/__init__.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index e8591e5..7919e5a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "neofs-testlib" -version = "0.8.0" +version = "0.8.1" description = "Building blocks and utilities to facilitate development of automated tests for NeoFS system" readme = "README.md" authors = [{ name = "NSPCC", email = "info@nspcc.ru" }] @@ -48,7 +48,7 @@ line-length = 100 target-version = ["py39"] [tool.bumpver] -current_version = "0.8.0" +current_version = "0.8.1" version_pattern = "MAJOR.MINOR.PATCH" commit_message = "Bump version {old_version} -> {new_version}" commit = true diff --git a/src/neofs_testlib/__init__.py b/src/neofs_testlib/__init__.py index 777f190..8088f75 100644 --- a/src/neofs_testlib/__init__.py +++ b/src/neofs_testlib/__init__.py @@ -1 +1 @@ -__version__ = "0.8.0" +__version__ = "0.8.1" From 3034b7004861f631fdafe45e1b20ab46bb850346 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Thu, 12 Jan 2023 19:48:03 +0300 Subject: [PATCH 044/363] Add disks management commands Signed-off-by: Andrey Berezin --- .gitignore | 1 + src/neofs_testlib/hosting/docker_host.py | 11 ++++++- src/neofs_testlib/hosting/interfaces.py | 40 +++++++++++++++++++++++- 3 files changed, 50 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index 8a7034d..c3d58f7 100644 --- a/.gitignore +++ b/.gitignore @@ -7,4 +7,5 @@ # ignore build artifacts /dist +/build *.egg-info diff --git a/src/neofs_testlib/hosting/docker_host.py b/src/neofs_testlib/hosting/docker_host.py index ae97a32..ed3604c 100644 --- a/src/neofs_testlib/hosting/docker_host.py +++ b/src/neofs_testlib/hosting/docker_host.py @@ -10,7 +10,7 @@ import docker from requests import HTTPError from neofs_testlib.hosting.config import ParsedAttributes -from neofs_testlib.hosting.interfaces import Host +from neofs_testlib.hosting.interfaces import DiskInfo, Host from neofs_testlib.shell import LocalShell, Shell, SSHShell from neofs_testlib.shell.command_inspectors import SudoInspector @@ -127,6 +127,15 @@ class DockerHost(Host): cmd = f"rm -rf {volume_path}/meta*" if cache_only else f"rm -rf {volume_path}/*" shell.exec(cmd) + def attach_disk(self, device: str, disk_info: DiskInfo) -> None: + raise NotImplementedError("Not supported for docker") + + def detach_disk(self, device: str) -> DiskInfo: + raise NotImplementedError("Not supported for docker") + + def is_disk_attached(self, device: str, disk_info: DiskInfo) -> bool: + raise NotImplementedError("Not supported for docker") + def dump_logs( self, directory_path: str, diff --git a/src/neofs_testlib/hosting/interfaces.py b/src/neofs_testlib/hosting/interfaces.py index 50eda0d..b90eb3d 100644 --- a/src/neofs_testlib/hosting/interfaces.py +++ b/src/neofs_testlib/hosting/interfaces.py @@ -1,11 +1,15 @@ from abc import ABC, abstractmethod from datetime import datetime -from typing import Optional +from typing import Any, Optional from neofs_testlib.hosting.config import CLIConfig, HostConfig, ServiceConfig from neofs_testlib.shell.interfaces import Shell +class DiskInfo(dict): + """Dict wrapper for disk_info for disk management commands.""" + + class Host(ABC): """Interface of a host machine where neoFS services are running. @@ -109,6 +113,40 @@ class Host(ABC): cache_only: To delete cache only. """ + @abstractmethod + def detach_disk(self, device: str) -> DiskInfo: + """Detaches disk device to simulate disk offline/failover scenario. + + Args: + device: Device name to detach + + Returns: + internal service disk info related to host plugin (i.e. volume id for cloud devices), + which may be used to identify or re-attach existing volume back + """ + + @abstractmethod + def attach_disk(self, device: str, disk_info: DiskInfo) -> None: + """Attaches disk device back. + + Args: + device: Device name to attach + service_info: any info required for host plugin to identify/attach disk + """ + + @abstractmethod + def is_disk_attached(self, device: str, disk_info: DiskInfo) -> bool: + """Checks if disk device is attached. + + Args: + device: Device name to check + service_info: any info required for host plugin to identify disk + + Returns: + True if attached + False if detached + """ + @abstractmethod def dump_logs( self, From f6b128e113edbffd078c195b27dc041289114335 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 13 Jan 2023 15:50:32 +0300 Subject: [PATCH 045/363] Bump version 0.8.1 -> 0.9.0 Signed-off-by: Andrey Berezin --- pyproject.toml | 4 ++-- src/neofs_testlib/__init__.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 7919e5a..bd14edf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "neofs-testlib" -version = "0.8.1" +version = "0.9.0" description = "Building blocks and utilities to facilitate development of automated tests for NeoFS system" readme = "README.md" authors = [{ name = "NSPCC", email = "info@nspcc.ru" }] @@ -48,7 +48,7 @@ line-length = 100 target-version = ["py39"] [tool.bumpver] -current_version = "0.8.1" +current_version = "0.9.0" version_pattern = "MAJOR.MINOR.PATCH" commit_message = "Bump version {old_version} -> {new_version}" commit = true diff --git a/src/neofs_testlib/__init__.py b/src/neofs_testlib/__init__.py index 8088f75..3e2f46a 100644 --- a/src/neofs_testlib/__init__.py +++ b/src/neofs_testlib/__init__.py @@ -1 +1 @@ -__version__ = "0.8.1" +__version__ = "0.9.0" From c55c0fccb04ee6c97a9a8446c5b53a0e9a207be0 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 18 Jan 2023 15:56:01 +0300 Subject: [PATCH 046/363] Update rm command Signed-off-by: Andrey Berezin --- src/neofs_testlib/hosting/docker_host.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/neofs_testlib/hosting/docker_host.py b/src/neofs_testlib/hosting/docker_host.py index ed3604c..cba70ef 100644 --- a/src/neofs_testlib/hosting/docker_host.py +++ b/src/neofs_testlib/hosting/docker_host.py @@ -124,7 +124,9 @@ class DockerHost(Host): volume_path = volume_info["Mountpoint"] shell = self.get_shell() - cmd = f"rm -rf {volume_path}/meta*" if cache_only else f"rm -rf {volume_path}/*" + meta_clean_cmd = f"rm -rf {volume_path}/meta*/*" + data_clean_cmd = f"; rm -rf {volume_path}/data*/*" if not cache_only else "" + cmd = f"{meta_clean_cmd}{data_clean_cmd}" shell.exec(cmd) def attach_disk(self, device: str, disk_info: DiskInfo) -> None: From 22f73e6cde71d332c7437b3ecdd5a85b6b61ef40 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Thu, 19 Jan 2023 20:49:08 +0300 Subject: [PATCH 047/363] Update hint for issue secret Signed-off-by: Andrey Berezin --- src/neofs_testlib/cli/neofs_authmate/secret.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/neofs_testlib/cli/neofs_authmate/secret.py b/src/neofs_testlib/cli/neofs_authmate/secret.py index 8c19eba..40cfc25 100644 --- a/src/neofs_testlib/cli/neofs_authmate/secret.py +++ b/src/neofs_testlib/cli/neofs_authmate/secret.py @@ -1,4 +1,4 @@ -from typing import Optional +from typing import Optional, Union from neofs_testlib.cli.cli_command import CliCommand from neofs_testlib.shell import CommandResult @@ -45,7 +45,7 @@ class NeofsAuthmateSecret(CliCommand): wallet_password: str, peer: str, bearer_rules: str, - gate_public_key: str, + gate_public_key: Union[str, list[str]], address: Optional[str] = None, container_id: Optional[str] = None, container_friendly_name: Optional[str] = None, @@ -63,7 +63,7 @@ class NeofsAuthmateSecret(CliCommand): address: Address of wallet account. peer: Address of a neofs peer to connect to. bearer_rules: Rules for bearer token as plain json string. - gate_public_key: Public 256r1 key of a gate (use flags repeatedly for multiple gates). + gate_public_key: Public 256r1 key of a gate (send list[str] of keys to use multiple gates). container_id: Auth container id to put the secret into. container_friendly_name: Friendly name of auth container to put the secret into. container_placement_policy: Placement policy of auth container to put the secret into From 469ab4db43303d2bbc8b552be8efc6c21dffbe37 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 7 Feb 2023 11:40:31 +0300 Subject: [PATCH 048/363] 1. Adding timeout control things 2. Add logs filtering Signed-off-by: Andrey Berezin --- src/neofs_testlib/cli/neofs_cli/container.py | 14 ++++++++ src/neofs_testlib/cli/neofs_cli/object.py | 16 +++++++++ src/neofs_testlib/defaults.py | 10 ++++++ src/neofs_testlib/hosting/docker_host.py | 30 ++++++++++++++++ src/neofs_testlib/hosting/interfaces.py | 37 +++++++++++++++----- src/neofs_testlib/shell/interfaces.py | 8 ++++- 6 files changed, 106 insertions(+), 9 deletions(-) create mode 100644 src/neofs_testlib/defaults.py diff --git a/src/neofs_testlib/cli/neofs_cli/container.py b/src/neofs_testlib/cli/neofs_cli/container.py index ca0b081..1952448 100644 --- a/src/neofs_testlib/cli/neofs_cli/container.py +++ b/src/neofs_testlib/cli/neofs_cli/container.py @@ -21,6 +21,7 @@ class NeofsCliContainer(CliCommand): subnet: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, + timeout: Optional[str] = None, ) -> CommandResult: """ Create a new container and register it in the NeoFS. @@ -43,6 +44,7 @@ class NeofsCliContainer(CliCommand): ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + timeout: Timeout for the operation (default 15s). Returns: Command's result. @@ -63,6 +65,7 @@ class NeofsCliContainer(CliCommand): ttl: Optional[int] = None, xhdr: Optional[dict] = None, force: bool = False, + timeout: Optional[str] = None, ) -> CommandResult: """ Delete an existing container. @@ -78,6 +81,7 @@ class NeofsCliContainer(CliCommand): ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + timeout: Timeout for the operation (default 15s). Returns: Command's result. @@ -99,6 +103,7 @@ class NeofsCliContainer(CliCommand): json_mode: bool = False, ttl: Optional[int] = None, xhdr: Optional[dict] = None, + timeout: Optional[str] = None, ) -> CommandResult: """ Get container field info. @@ -113,6 +118,7 @@ class NeofsCliContainer(CliCommand): ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + timeout: Timeout for the operation (default 15s). Returns: Command's result. @@ -133,6 +139,7 @@ class NeofsCliContainer(CliCommand): session: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, + timeout: Optional[str] = None, ) -> CommandResult: """ Get extended ACL table of container. @@ -147,6 +154,7 @@ class NeofsCliContainer(CliCommand): ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + timeout: Timeout for the operation (default 15s). Returns: Command's result. @@ -165,6 +173,7 @@ class NeofsCliContainer(CliCommand): owner: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, + timeout: Optional[str] = None, **params, ) -> CommandResult: """ @@ -177,6 +186,7 @@ class NeofsCliContainer(CliCommand): ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + timeout: Timeout for the operation (default 15s). Returns: Command's result. @@ -194,6 +204,7 @@ class NeofsCliContainer(CliCommand): address: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, + timeout: Optional[str] = None, ) -> CommandResult: """ List existing objects in container. @@ -205,6 +216,7 @@ class NeofsCliContainer(CliCommand): ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + timeout: Timeout for the operation (default 15s). Returns: Command's result. @@ -225,6 +237,7 @@ class NeofsCliContainer(CliCommand): session: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, + timeout: Optional[str] = None, ) -> CommandResult: """ Set a new extended ACL table for the container. @@ -240,6 +253,7 @@ class NeofsCliContainer(CliCommand): ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + timeout: Timeout for the operation (default 15s). Returns: Command's result. diff --git a/src/neofs_testlib/cli/neofs_cli/object.py b/src/neofs_testlib/cli/neofs_cli/object.py index ae847c9..164076c 100644 --- a/src/neofs_testlib/cli/neofs_cli/object.py +++ b/src/neofs_testlib/cli/neofs_cli/object.py @@ -16,6 +16,7 @@ class NeofsCliObject(CliCommand): session: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, + timeout: Optional[str] = None, ) -> CommandResult: """ Delete object from NeoFS. @@ -30,6 +31,7 @@ class NeofsCliObject(CliCommand): ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + timeout: Timeout for the operation (default 15s). Returns: Command's result. @@ -54,6 +56,7 @@ class NeofsCliObject(CliCommand): session: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, + timeout: Optional[str] = None, ) -> CommandResult: """ Get object from NeoFS. @@ -72,6 +75,7 @@ class NeofsCliObject(CliCommand): ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + timeout: Timeout for the operation (default 15s). Returns: Command's result. @@ -95,6 +99,7 @@ class NeofsCliObject(CliCommand): session: Optional[str] = None, hash_type: Optional[str] = None, xhdr: Optional[dict] = None, + timeout: Optional[str] = None, ) -> CommandResult: """ Get object hash. @@ -112,6 +117,7 @@ class NeofsCliObject(CliCommand): hash_type: Hash type. Either 'sha256' or 'tz' (default "sha256"). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + timeout: Timeout for the operation (default 15s). Returns: Command's result. @@ -139,6 +145,7 @@ class NeofsCliObject(CliCommand): session: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, + timeout: Optional[str] = None, ) -> CommandResult: """ Get object header. @@ -158,6 +165,7 @@ class NeofsCliObject(CliCommand): ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + timeout: Timeout for the operation (default 15s). Returns: Command's result. @@ -180,6 +188,7 @@ class NeofsCliObject(CliCommand): session: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, + timeout: Optional[str] = None, ) -> CommandResult: """ Lock object in container. @@ -196,6 +205,7 @@ class NeofsCliObject(CliCommand): ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + timeout: Timeout for the operation (default 15s). Returns: Command's result. @@ -222,6 +232,7 @@ class NeofsCliObject(CliCommand): session: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, + timeout: Optional[str] = None, ) -> CommandResult: """ Put object to NeoFS. @@ -243,6 +254,7 @@ class NeofsCliObject(CliCommand): ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + timeout: Timeout for the operation (default 15s). Returns: Command's result. @@ -267,6 +279,7 @@ class NeofsCliObject(CliCommand): session: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, + timeout: Optional[str] = None, ) -> CommandResult: """ Get payload range data of an object. @@ -285,6 +298,7 @@ class NeofsCliObject(CliCommand): ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + timeout: Timeout for the operation (default 15s). Returns: Command's result. @@ -308,6 +322,7 @@ class NeofsCliObject(CliCommand): session: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, + timeout: Optional[str] = None, ) -> CommandResult: """ Search object. @@ -325,6 +340,7 @@ class NeofsCliObject(CliCommand): ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + timeout: Timeout for the operation (default 15s). Returns: Command's result. diff --git a/src/neofs_testlib/defaults.py b/src/neofs_testlib/defaults.py new file mode 100644 index 0000000..687fbd6 --- /dev/null +++ b/src/neofs_testlib/defaults.py @@ -0,0 +1,10 @@ +class Options: + DEFAULT_SHELL_TIMEOUT = 90 + + @staticmethod + def get_default_shell_timeout(): + return Options.DEFAULT_SHELL_TIMEOUT + + @staticmethod + def set_default_shell_timeout(value: int): + Options.DEFAULT_SHELL_TIMEOUT = value diff --git a/src/neofs_testlib/hosting/docker_host.py b/src/neofs_testlib/hosting/docker_host.py index cba70ef..257847b 100644 --- a/src/neofs_testlib/hosting/docker_host.py +++ b/src/neofs_testlib/hosting/docker_host.py @@ -1,6 +1,7 @@ import json import logging import os +import re import time from dataclasses import dataclass from datetime import datetime @@ -143,6 +144,7 @@ class DockerHost(Host): directory_path: str, since: Optional[datetime] = None, until: Optional[datetime] = None, + filter_regex: Optional[str] = None, ) -> None: client = self._get_docker_client() for service_config in self._config.services: @@ -153,6 +155,12 @@ class DockerHost(Host): logger.info(f"Got exception while dumping logs of '{container_name}': {exc}") continue + if filter_regex: + logs = ( + "\n".join(match[0] for match in re.findall(filter_regex, logs, re.IGNORECASE)) + or f"No matches found in logs based on given filter '{filter_regex}'" + ) + # Save logs to the directory file_path = os.path.join( directory_path, @@ -161,6 +169,28 @@ class DockerHost(Host): with open(file_path, "wb") as file: file.write(logs) + def is_message_in_logs( + self, + message_regex: str, + since: Optional[datetime] = None, + until: Optional[datetime] = None, + ) -> bool: + client = self._get_docker_client() + for service_config in self._config.services: + container_name = self._get_service_attributes(service_config.name).container_name + try: + logs = client.logs(container_name, since=since, until=until) + except HTTPError as exc: + logger.info(f"Got exception while dumping logs of '{container_name}': {exc}") + continue + + if message_regex: + matches = re.findall(message_regex, logs, re.IGNORECASE) + if matches: + return True + + return False + def _get_service_attributes(self, service_name) -> ServiceAttributes: service_config = self.get_service_config(service_name) return ServiceAttributes.parse(service_config.attributes) diff --git a/src/neofs_testlib/hosting/interfaces.py b/src/neofs_testlib/hosting/interfaces.py index b90eb3d..ddb07b7 100644 --- a/src/neofs_testlib/hosting/interfaces.py +++ b/src/neofs_testlib/hosting/interfaces.py @@ -118,11 +118,11 @@ class Host(ABC): """Detaches disk device to simulate disk offline/failover scenario. Args: - device: Device name to detach + device: Device name to detach. Returns: internal service disk info related to host plugin (i.e. volume id for cloud devices), - which may be used to identify or re-attach existing volume back + which may be used to identify or re-attach existing volume back. """ @abstractmethod @@ -130,8 +130,8 @@ class Host(ABC): """Attaches disk device back. Args: - device: Device name to attach - service_info: any info required for host plugin to identify/attach disk + device: Device name to attach. + service_info: any info required for host plugin to identify/attach disk. """ @abstractmethod @@ -139,12 +139,12 @@ class Host(ABC): """Checks if disk device is attached. Args: - device: Device name to check - service_info: any info required for host plugin to identify disk + device: Device name to check. + service_info: any info required for host plugin to identify disk. Returns: - True if attached - False if detached + True if attached. + False if detached. """ @abstractmethod @@ -153,6 +153,7 @@ class Host(ABC): directory_path: str, since: Optional[datetime] = None, until: Optional[datetime] = None, + filter_regex: Optional[str] = None, ) -> None: """Dumps logs of all services on the host to specified directory. @@ -160,4 +161,24 @@ class Host(ABC): directory_path: Path to the directory where logs should be stored. since: If set, limits the time from which logs should be collected. Must be in UTC. until: If set, limits the time until which logs should be collected. Must be in UTC. + filter_regex: regex to filter output + """ + + @abstractmethod + def is_message_in_logs( + self, + message_regex: str, + since: Optional[datetime] = None, + until: Optional[datetime] = None, + ) -> bool: + """Checks logs on host for specified message regex. + + Args: + message_regex: message to find. + since: If set, limits the time from which logs should be collected. Must be in UTC. + until: If set, limits the time until which logs should be collected. Must be in UTC. + + Returns: + True if message found in logs in the given time frame. + False otherwise. """ diff --git a/src/neofs_testlib/shell/interfaces.py b/src/neofs_testlib/shell/interfaces.py index ca02582..e4f7dea 100644 --- a/src/neofs_testlib/shell/interfaces.py +++ b/src/neofs_testlib/shell/interfaces.py @@ -2,6 +2,8 @@ from abc import ABC, abstractmethod from dataclasses import dataclass from typing import Optional +from neofs_testlib.defaults import Options + @dataclass class InteractiveInput: @@ -49,10 +51,14 @@ class CommandOptions: interactive_inputs: Optional[list[InteractiveInput]] = None close_stdin: bool = False - timeout: int = 30 + timeout: Optional[int] = None check: bool = True no_log: bool = False + def __post_init__(self): + if self.timeout is None: + self.timeout = Options.get_default_shell_timeout() + @dataclass class CommandResult: From 11d65091ad6f593eea3487a397b151a06f0a8b69 Mon Sep 17 00:00:00 2001 From: Vladislav Karakozov Date: Tue, 7 Feb 2023 13:13:57 +0300 Subject: [PATCH 049/363] Service restart Signed-off-by: Vladislav Karakozov --- .pre-commit-config.yaml | 2 +- requirements.txt | 2 +- src/neofs_testlib/hosting/docker_host.py | 16 ++++++++++++++-- src/neofs_testlib/hosting/interfaces.py | 8 ++++++++ 4 files changed, 24 insertions(+), 4 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ad9846a..0603040 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,7 +5,7 @@ repos: - id: black language_version: python3.9 - repo: https://github.com/pycqa/isort - rev: 5.10.1 + rev: 5.12.0 hooks: - id: isort name: isort (python) diff --git a/requirements.txt b/requirements.txt index adca8f9..af0ee32 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,7 @@ requests==2.28.1 # Dev dependencies black==22.8.0 bumpver==2022.1118 -isort==5.10.1 +isort==5.12.0 pre-commit==2.20.0 # Packaging dependencies diff --git a/src/neofs_testlib/hosting/docker_host.py b/src/neofs_testlib/hosting/docker_host.py index 257847b..3b36e31 100644 --- a/src/neofs_testlib/hosting/docker_host.py +++ b/src/neofs_testlib/hosting/docker_host.py @@ -54,8 +54,8 @@ class ServiceAttributes(ParsedAttributes): container_name: str volume_name: Optional[str] = None - start_timeout: int = 60 - stop_timeout: int = 60 + start_timeout: int = 90 + stop_timeout: int = 90 class DockerHost(Host): @@ -117,6 +117,18 @@ class DockerHost(Host): timeout=service_attributes.stop_timeout, ) + def restart_service(self, service_name: str) -> None: + service_attributes = self._get_service_attributes(service_name) + + client = self._get_docker_client() + client.restart(service_attributes.container_name) + + self._wait_for_container_to_be_in_state( + container_name=service_attributes.container_name, + expected_state="running", + timeout=service_attributes.start_timeout, + ) + def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None: service_attributes = self._get_service_attributes(service_name) diff --git a/src/neofs_testlib/hosting/interfaces.py b/src/neofs_testlib/hosting/interfaces.py index ddb07b7..2d36045 100644 --- a/src/neofs_testlib/hosting/interfaces.py +++ b/src/neofs_testlib/hosting/interfaces.py @@ -104,6 +104,14 @@ class Host(ABC): service_name: Name of the service to stop. """ + @abstractmethod + def restart_service(self, service_name: str) -> None: + """Restarts the service with specified name and waits until it starts. + The service must be hosted on this host. + Args: + service_name: Name of the service to restart. + """ + @abstractmethod def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None: """Erases all data of the storage node with specified name. From 54ac125f40dead64a84b6f552b77927346c646fd Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 7 Feb 2023 13:35:26 +0300 Subject: [PATCH 050/363] Bump version 0.9.0 -> 0.10.0 Signed-off-by: Andrey Berezin --- pyproject.toml | 4 ++-- src/neofs_testlib/__init__.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index bd14edf..0abc3ac 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "neofs-testlib" -version = "0.9.0" +version = "0.10.0" description = "Building blocks and utilities to facilitate development of automated tests for NeoFS system" readme = "README.md" authors = [{ name = "NSPCC", email = "info@nspcc.ru" }] @@ -48,7 +48,7 @@ line-length = 100 target-version = ["py39"] [tool.bumpver] -current_version = "0.9.0" +current_version = "0.10.0" version_pattern = "MAJOR.MINOR.PATCH" commit_message = "Bump version {old_version} -> {new_version}" commit = true diff --git a/src/neofs_testlib/__init__.py b/src/neofs_testlib/__init__.py index 3e2f46a..61fb31c 100644 --- a/src/neofs_testlib/__init__.py +++ b/src/neofs_testlib/__init__.py @@ -1 +1 @@ -__version__ = "0.9.0" +__version__ = "0.10.0" From cea3ab5126e9967e14536c20f0def2301f9f1e5e Mon Sep 17 00:00:00 2001 From: "a.chetaev" Date: Mon, 6 Feb 2023 18:12:04 +0100 Subject: [PATCH 051/363] Change mamba version with update imports Signed-off-by: Aleskei Chetaev --- .gitignore | 1 + pyproject.toml | 4 ++-- requirements.txt | 2 +- src/neofs_testlib/utils/converters.py | 2 +- src/neofs_testlib/utils/wallet.py | 5 +++-- tests/test_ssh_shell.py | 1 - tests/test_wallet.py | 2 +- 7 files changed, 9 insertions(+), 8 deletions(-) diff --git a/.gitignore b/.gitignore index c3d58f7..a7f7de0 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ # ignore IDE files .vscode +.idea # ignore temp files under any path .DS_Store diff --git a/pyproject.toml b/pyproject.toml index 0abc3ac..29e8571 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,12 +19,12 @@ dependencies = [ "allure-python-commons>=2.9.45", "docker>=4.4.0", "importlib_metadata>=5.0; python_version < '3.10'", - "neo-mamba==0.10.0", + "neo-mamba==1.0.0", "paramiko>=2.10.3", "pexpect>=4.8.0", "requests>=2.28.0", ] -requires-python = ">=3.9" +requires-python = ">=3.10" [project.optional-dependencies] dev = ["black", "bumpver", "isort", "pre-commit"] diff --git a/requirements.txt b/requirements.txt index af0ee32..a75b94f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ allure-python-commons==2.9.45 docker==4.4.0 importlib_metadata==5.0.0 -neo-mamba==0.10.0 +neo-mamba==1.0.0 paramiko==2.10.3 pexpect==4.8.0 requests==2.28.1 diff --git a/src/neofs_testlib/utils/converters.py b/src/neofs_testlib/utils/converters.py index 64cef1a..65ea366 100644 --- a/src/neofs_testlib/utils/converters.py +++ b/src/neofs_testlib/utils/converters.py @@ -3,7 +3,7 @@ import binascii import json import base58 -from neo3 import wallet as neo3_wallet +from neo3.wallet import wallet as neo3_wallet def str_to_ascii_hex(input: str) -> str: diff --git a/src/neofs_testlib/utils/wallet.py b/src/neofs_testlib/utils/wallet.py index 9cd248b..5d4ff4a 100644 --- a/src/neofs_testlib/utils/wallet.py +++ b/src/neofs_testlib/utils/wallet.py @@ -1,7 +1,8 @@ import json import logging -from neo3 import wallet as neo3_wallet +from neo3.wallet import wallet as neo3_wallet +from neo3.wallet import account as neo3_account logger = logging.getLogger("neofs.testlib.utils") @@ -14,7 +15,7 @@ def init_wallet(wallet_path: str, wallet_password: str): wallet_password: The password for new wallet. """ wallet = neo3_wallet.Wallet() - account = neo3_wallet.Account.create_new(wallet_password) + account = neo3_account.Account.create_new(wallet_password) wallet.account_add(account) with open(wallet_path, "w") as out: json.dump(wallet.to_json(), out) diff --git a/tests/test_ssh_shell.py b/tests/test_ssh_shell.py index a57b479..0ffeb4d 100644 --- a/tests/test_ssh_shell.py +++ b/tests/test_ssh_shell.py @@ -3,7 +3,6 @@ from unittest import SkipTest, TestCase from neofs_testlib.shell.interfaces import CommandOptions, InteractiveInput from neofs_testlib.shell.ssh_shell import SSHShell - from tests.helpers import format_error_details, get_output_lines diff --git a/tests/test_wallet.py b/tests/test_wallet.py index b9352e9..7e688ab 100644 --- a/tests/test_wallet.py +++ b/tests/test_wallet.py @@ -3,7 +3,7 @@ import os from unittest import TestCase from uuid import uuid4 -from neo3.wallet import Wallet +from neo3.wallet.wallet import Wallet from neofs_testlib.utils.wallet import init_wallet, get_last_address_from_wallet From f04ca55cdefbbf379848e5e95dc0b25fad58e602 Mon Sep 17 00:00:00 2001 From: Aleskei Chetaev Date: Tue, 7 Feb 2023 14:41:00 +0100 Subject: [PATCH 052/363] Bump version 0.10.0 -> 1.0.0 Signed-off-by: Aleskei Chetaev --- pyproject.toml | 4 ++-- src/neofs_testlib/__init__.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 29e8571..6925ef8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "neofs-testlib" -version = "0.10.0" +version = "1.0.0" description = "Building blocks and utilities to facilitate development of automated tests for NeoFS system" readme = "README.md" authors = [{ name = "NSPCC", email = "info@nspcc.ru" }] @@ -48,7 +48,7 @@ line-length = 100 target-version = ["py39"] [tool.bumpver] -current_version = "0.10.0" +current_version = "1.0.0" version_pattern = "MAJOR.MINOR.PATCH" commit_message = "Bump version {old_version} -> {new_version}" commit = true diff --git a/src/neofs_testlib/__init__.py b/src/neofs_testlib/__init__.py index 61fb31c..5becc17 100644 --- a/src/neofs_testlib/__init__.py +++ b/src/neofs_testlib/__init__.py @@ -1 +1 @@ -__version__ = "0.10.0" +__version__ = "1.0.0" From 0b42dbaab18ba9598bbe1c71aca8ffbcb485393e Mon Sep 17 00:00:00 2001 From: Aleskei Chetaev Date: Tue, 7 Feb 2023 21:38:40 +0100 Subject: [PATCH 053/363] Bump version 1.1.0 -> 1.1.1 Signed-off-by: Aleskei Chetaev --- .pre-commit-config.yaml | 2 +- CONTRIBUTING.md | 2 +- pyproject.toml | 6 +++--- src/neofs_testlib/__init__.py | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0603040..6a9716a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,7 +3,7 @@ repos: rev: 22.8.0 hooks: - id: black - language_version: python3.9 + language_version: python3.10 - repo: https://github.com/pycqa/isort rev: 5.12.0 hooks: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c034916..5426511 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -46,7 +46,7 @@ To setup development environment for `neofs-testlib`, please, take the following 1. Prepare virtualenv ```shell -$ virtualenv --python=python3.9 venv +$ virtualenv --python=python3.10 venv $ source venv/bin/activate ``` diff --git a/pyproject.toml b/pyproject.toml index 6925ef8..1b796b4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "neofs-testlib" -version = "1.0.0" +version = "1.1.1" description = "Building blocks and utilities to facilitate development of automated tests for NeoFS system" readme = "README.md" authors = [{ name = "NSPCC", email = "info@nspcc.ru" }] @@ -45,10 +45,10 @@ line_length = 100 [tool.black] line-length = 100 -target-version = ["py39"] +target-version = ["py310"] [tool.bumpver] -current_version = "1.0.0" +current_version = "1.1.1" version_pattern = "MAJOR.MINOR.PATCH" commit_message = "Bump version {old_version} -> {new_version}" commit = true diff --git a/src/neofs_testlib/__init__.py b/src/neofs_testlib/__init__.py index 5becc17..a82b376 100644 --- a/src/neofs_testlib/__init__.py +++ b/src/neofs_testlib/__init__.py @@ -1 +1 @@ -__version__ = "1.0.0" +__version__ = "1.1.1" From 5a2c7ac98df678dafc8d3cadcfe42edf992bf235 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 25 Jan 2023 17:41:48 +0300 Subject: [PATCH 054/363] Update codeowners Signed-off-by: Andrey Berezin --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index c2a7a3b..1422062 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1 +1 @@ -* @aprasolova @vdomnich-yadro @dansingjulia @yadro-vavdeev +* @aprasolova @vdomnich-yadro @dansingjulia @yadro-vavdeev @abereziny From 6d3b6f0f2fac94e9869a80e0b7333de41892b3e3 Mon Sep 17 00:00:00 2001 From: Yulia Kovshova Date: Tue, 10 Jan 2023 16:02:24 +0300 Subject: [PATCH 055/363] Rename neofs to frostfs Signed-off-by: Yulia Kovshova --- CONTRIBUTING.md | 20 ++++----- README.md | 36 +++++++-------- pyproject.toml | 18 ++++---- .../__init__.py | 0 src/frostfs_testlib/blockchain/__init__.py | 2 + .../blockchain/multisig.py | 2 +- .../blockchain/role_designation.py | 2 +- .../blockchain/rpc_client.py | 2 +- src/frostfs_testlib/cli/__init__.py | 4 ++ .../cli/cli_command.py | 2 +- .../cli/frostfs_adm/__init__.py | 1 + src/frostfs_testlib/cli/frostfs_adm/adm.py | 22 ++++++++++ src/frostfs_testlib/cli/frostfs_adm/config.py | 22 ++++++++++ .../cli/frostfs_adm}/morph.py | 20 ++++----- .../cli/frostfs_adm}/storage_config.py | 8 ++-- .../cli/frostfs_adm}/subnet.py | 24 +++++----- .../cli/frostfs_adm}/version.py | 6 +-- .../cli/frostfs_authmate/__init__.py | 1 + .../cli/frostfs_authmate/authmate.py | 14 ++++++ .../cli/frostfs_authmate}/secret.py | 16 +++---- .../cli/frostfs_authmate}/version.py | 6 +-- .../cli/frostfs_cli/__init__.py | 1 + .../cli/frostfs_cli}/accounting.py | 8 ++-- .../cli/frostfs_cli}/acl.py | 8 ++-- src/frostfs_testlib/cli/frostfs_cli/cli.py | 38 ++++++++++++++++ .../cli/frostfs_cli}/container.py | 8 ++-- .../cli/frostfs_cli}/netmap.py | 8 ++-- .../cli/frostfs_cli}/object.py | 12 ++--- .../cli/frostfs_cli}/session.py | 6 +-- .../cli/frostfs_cli}/shards.py | 6 +-- .../cli/frostfs_cli}/storagegroup.py | 14 +++--- .../cli/frostfs_cli}/util.py | 6 +-- .../cli/frostfs_cli/version.py | 13 ++++++ src/frostfs_testlib/cli/neogo/__init__.py | 2 + .../cli/neogo/candidate.py | 4 +- .../cli/neogo/contract.py | 4 +- .../cli/neogo/db.py | 6 +-- .../cli/neogo/go.py | 18 ++++---- .../cli/neogo/nep17.py | 4 +- .../cli/neogo/network_type.py | 0 .../cli/neogo/node.py | 6 +-- .../cli/neogo/query.py | 4 +- .../cli/neogo/version.py | 4 +- .../cli/neogo/wallet.py | 4 +- src/frostfs_testlib/hosting/__init__.py | 3 ++ .../hosting/config.py | 0 .../hosting/docker_host.py | 19 +++----- .../hosting/hosting.py | 8 ++-- .../hosting/interfaces.py | 4 +- .../plugins/__init__.py | 0 .../reporter/__init__.py | 6 +-- .../reporter/allure_handler.py | 2 +- .../reporter/interfaces.py | 0 .../reporter/reporter.py | 6 +-- src/frostfs_testlib/shell/__init__.py | 3 ++ .../shell/command_inspectors.py | 2 +- .../shell/interfaces.py | 0 .../shell/local_shell.py | 6 +-- .../shell/ssh_shell.py | 6 +-- .../utils/__init__.py | 0 .../utils/converters.py | 0 .../utils/wallet.py | 2 +- src/neofs_testlib/blockchain/__init__.py | 2 - src/neofs_testlib/cli/__init__.py | 4 -- src/neofs_testlib/cli/neofs_adm/__init__.py | 1 - src/neofs_testlib/cli/neofs_adm/adm.py | 22 ---------- src/neofs_testlib/cli/neofs_adm/config.py | 22 ---------- .../cli/neofs_authmate/__init__.py | 1 - .../cli/neofs_authmate/authmate.py | 14 ------ src/neofs_testlib/cli/neofs_cli/__init__.py | 1 - src/neofs_testlib/cli/neofs_cli/cli.py | 38 ---------------- src/neofs_testlib/cli/neofs_cli/version.py | 13 ------ src/neofs_testlib/cli/neogo/__init__.py | 2 - src/neofs_testlib/hosting/__init__.py | 3 -- src/neofs_testlib/shell/__init__.py | 3 -- tests/helpers.py | 2 +- tests/test_cli.py | 44 +++++++++---------- tests/test_converters.py | 2 +- tests/test_hosting.py | 2 +- tests/test_local_shell.py | 4 +- tests/test_reporter.py | 2 +- tests/test_ssh_shell.py | 5 ++- tests/test_wallet.py | 2 +- 83 files changed, 330 insertions(+), 338 deletions(-) rename src/{neofs_testlib => frostfs_testlib}/__init__.py (100%) create mode 100644 src/frostfs_testlib/blockchain/__init__.py rename src/{neofs_testlib => frostfs_testlib}/blockchain/multisig.py (97%) rename src/{neofs_testlib => frostfs_testlib}/blockchain/role_designation.py (98%) rename src/{neofs_testlib => frostfs_testlib}/blockchain/rpc_client.py (97%) create mode 100644 src/frostfs_testlib/cli/__init__.py rename src/{neofs_testlib => frostfs_testlib}/cli/cli_command.py (96%) create mode 100644 src/frostfs_testlib/cli/frostfs_adm/__init__.py create mode 100644 src/frostfs_testlib/cli/frostfs_adm/adm.py create mode 100644 src/frostfs_testlib/cli/frostfs_adm/config.py rename src/{neofs_testlib/cli/neofs_adm => frostfs_testlib/cli/frostfs_adm}/morph.py (94%) rename src/{neofs_testlib/cli/neofs_adm => frostfs_testlib/cli/frostfs_adm}/storage_config.py (68%) rename src/{neofs_testlib/cli/neofs_adm => frostfs_testlib/cli/frostfs_adm}/subnet.py (92%) rename src/{neofs_testlib/cli/neofs_adm => frostfs_testlib/cli/frostfs_adm}/version.py (55%) create mode 100644 src/frostfs_testlib/cli/frostfs_authmate/__init__.py create mode 100644 src/frostfs_testlib/cli/frostfs_authmate/authmate.py rename src/{neofs_testlib/cli/neofs_authmate => frostfs_testlib/cli/frostfs_authmate}/secret.py (86%) rename src/{neofs_testlib/cli/neofs_authmate => frostfs_testlib/cli/frostfs_authmate}/version.py (54%) create mode 100644 src/frostfs_testlib/cli/frostfs_cli/__init__.py rename src/{neofs_testlib/cli/neofs_cli => frostfs_testlib/cli/frostfs_cli}/accounting.py (79%) rename src/{neofs_testlib/cli/neofs_cli => frostfs_testlib/cli/frostfs_cli}/acl.py (88%) create mode 100644 src/frostfs_testlib/cli/frostfs_cli/cli.py rename src/{neofs_testlib/cli/neofs_cli => frostfs_testlib/cli/frostfs_cli}/container.py (97%) rename src/{neofs_testlib/cli/neofs_cli => frostfs_testlib/cli/frostfs_cli}/netmap.py (95%) rename src/{neofs_testlib/cli/neofs_cli => frostfs_testlib/cli/frostfs_cli}/object.py (98%) rename src/{neofs_testlib/cli/neofs_cli => frostfs_testlib/cli/frostfs_cli}/session.py (88%) rename src/{neofs_testlib/cli/neofs_cli => frostfs_testlib/cli/frostfs_cli}/shards.py (96%) rename src/{neofs_testlib/cli/neofs_cli => frostfs_testlib/cli/frostfs_cli}/storagegroup.py (93%) rename src/{neofs_testlib/cli/neofs_cli => frostfs_testlib/cli/frostfs_cli}/util.py (92%) create mode 100644 src/frostfs_testlib/cli/frostfs_cli/version.py create mode 100644 src/frostfs_testlib/cli/neogo/__init__.py rename src/{neofs_testlib => frostfs_testlib}/cli/neogo/candidate.py (97%) rename src/{neofs_testlib => frostfs_testlib}/cli/neogo/contract.py (99%) rename src/{neofs_testlib => frostfs_testlib}/cli/neogo/db.py (92%) rename src/{neofs_testlib => frostfs_testlib}/cli/neogo/go.py (70%) rename src/{neofs_testlib => frostfs_testlib}/cli/neogo/nep17.py (98%) rename src/{neofs_testlib => frostfs_testlib}/cli/neogo/network_type.py (100%) rename src/{neofs_testlib => frostfs_testlib}/cli/neogo/node.py (67%) rename src/{neofs_testlib => frostfs_testlib}/cli/neogo/query.py (96%) rename src/{neofs_testlib => frostfs_testlib}/cli/neogo/version.py (66%) rename src/{neofs_testlib => frostfs_testlib}/cli/neogo/wallet.py (99%) create mode 100644 src/frostfs_testlib/hosting/__init__.py rename src/{neofs_testlib => frostfs_testlib}/hosting/config.py (100%) rename src/{neofs_testlib => frostfs_testlib}/hosting/docker_host.py (93%) rename src/{neofs_testlib => frostfs_testlib}/hosting/hosting.py (92%) rename src/{neofs_testlib => frostfs_testlib}/hosting/interfaces.py (97%) rename src/{neofs_testlib => frostfs_testlib}/plugins/__init__.py (100%) rename src/{neofs_testlib => frostfs_testlib}/reporter/__init__.py (67%) rename src/{neofs_testlib => frostfs_testlib}/reporter/allure_handler.py (94%) rename src/{neofs_testlib => frostfs_testlib}/reporter/interfaces.py (100%) rename src/{neofs_testlib => frostfs_testlib}/reporter/reporter.py (93%) create mode 100644 src/frostfs_testlib/shell/__init__.py rename src/{neofs_testlib => frostfs_testlib}/shell/command_inspectors.py (82%) rename src/{neofs_testlib => frostfs_testlib}/shell/interfaces.py (100%) rename src/{neofs_testlib => frostfs_testlib}/shell/local_shell.py (96%) rename src/{neofs_testlib => frostfs_testlib}/shell/ssh_shell.py (98%) rename src/{neofs_testlib => frostfs_testlib}/utils/__init__.py (100%) rename src/{neofs_testlib => frostfs_testlib}/utils/converters.py (100%) rename src/{neofs_testlib => frostfs_testlib}/utils/wallet.py (95%) delete mode 100644 src/neofs_testlib/blockchain/__init__.py delete mode 100644 src/neofs_testlib/cli/__init__.py delete mode 100644 src/neofs_testlib/cli/neofs_adm/__init__.py delete mode 100644 src/neofs_testlib/cli/neofs_adm/adm.py delete mode 100644 src/neofs_testlib/cli/neofs_adm/config.py delete mode 100644 src/neofs_testlib/cli/neofs_authmate/__init__.py delete mode 100644 src/neofs_testlib/cli/neofs_authmate/authmate.py delete mode 100644 src/neofs_testlib/cli/neofs_cli/__init__.py delete mode 100644 src/neofs_testlib/cli/neofs_cli/cli.py delete mode 100644 src/neofs_testlib/cli/neofs_cli/version.py delete mode 100644 src/neofs_testlib/cli/neogo/__init__.py delete mode 100644 src/neofs_testlib/hosting/__init__.py delete mode 100644 src/neofs_testlib/shell/__init__.py diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5426511..f290592 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -3,8 +3,8 @@ First, thank you for contributing! We love and encourage pull requests from everyone. Please follow the guidelines: -- Check the open [issues](https://github.com/nspcc-dev/neofs-testlib/issues) and - [pull requests](https://github.com/nspcc-dev/neofs-testlib/pulls) for existing +- Check the open [issues](https://github.com/TrueCloudLab/frostfs-testlib/issues) and + [pull requests](https://github.com/TrueCloudLab/frostfs-testlib/pulls) for existing discussions. - Open an issue first, to discuss a new feature or enhancement. @@ -22,12 +22,12 @@ everyone. Please follow the guidelines: ## Development Workflow -Start by forking the `neofs-testlib` repository, make changes in a branch and then +Start by forking the `frostfs-testlib` repository, make changes in a branch and then send a pull request. We encourage pull requests to discuss code changes. Here are the steps in details: ### Set up your GitHub Repository -Fork [NeoFS testlib upstream](https://github.com/nspcc-dev/neofs-testlib/fork) source +Fork [FrostFS testlib upstream](https://github.com/TrueCloudLab/frostfs-testlib/fork) source repository to your own personal repository. Copy the URL of your fork and clone it: ```shell @@ -36,13 +36,13 @@ $ git clone ### Set up git remote as ``upstream`` ```shell -$ cd neofs-testlib -$ git remote add upstream https://github.com/nspcc-dev/neofs-testlib +$ cd frostfs-testlib +$ git remote add upstream https://github.com/TrueCloudLab/frostfs-testlib $ git fetch upstream ``` ### Set up development environment -To setup development environment for `neofs-testlib`, please, take the following steps: +To setup development environment for `frostfs-testlib`, please, take the following steps: 1. Prepare virtualenv ```shell @@ -183,9 +183,9 @@ Do not use relative imports. Even if the module is in the same package, use the To format docstrings, please, use [Google Style Docstrings](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html). Type annotations should be specified in the code and not in docstrings (please, refer to [this sample](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/index.html#type-annotations)). ## Editable installation -If you would like to modify code of the library in the integration with your test suite, you can use editable installation. For that, in virtual environment of your test suite (not in the virtual environment of the testlib itself!) run the following command (path to `neofs-testlib` directory might be different on your machine): +If you would like to modify code of the library in the integration with your test suite, you can use editable installation. For that, in virtual environment of your test suite (not in the virtual environment of the testlib itself!) run the following command (path to `frostfs-testlib` directory might be different on your machine): ```shell -$ pip install -e ../neofs-testlib +$ pip install -e ../frostfs-testlib ``` # Maintaining guide @@ -225,7 +225,7 @@ To check that package description will be correctly rendered at PyPI, please, us $ twine check dist/* ``` -To upload package to [test PyPI](https://test.pypi.org/project/neofs-testlib/), please, use command: +To upload package to [test PyPI](https://test.pypi.org/project/frostfs-testlib/), please, use command: ```shell $ twine upload -r testpypi dist/* ``` diff --git a/README.md b/README.md index 3cb43fb..ddd2620 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,10 @@ -# neofs-testlib -This library provides building blocks and utilities to facilitate development of automated tests for NeoFS system. +# frostfs-testlib +This library provides building blocks and utilities to facilitate development of automated tests for FrostFS system. ## Installation Library can be installed via pip: ```shell -$ pip install neofs-testlib +$ pip install frostfs-testlib ``` ## Configuration @@ -16,7 +16,7 @@ Reporter is a singleton component that is used by the library to store test arti Reporter sends artifacts to handlers that are responsible for actual storing in particular system. By default reporter is initialized without any handlers and won't take any actions to store the artifacts. To add handlers directly via code you can use method `register_handler`: ```python -from neofs_testlib.reporter import AllureHandler, get_reporter +from frostfs_testlib.reporter import AllureHandler, get_reporter get_reporter().register_handler(AllureHandler()) ``` @@ -30,10 +30,10 @@ get_reporter().configure({ "handlers": [{"plugin_name": "allure"}] }) ``` ### Hosting Configuration -Hosting component is a class that represents infrastructure (machines/containers/services) where neoFS is hosted. Interaction with specific infrastructure instance (host) is encapsulated in classes that implement interface `neofs_testlib.hosting.Host`. To pass information about hosts to the `Hosting` class in runtime we use method `configure`: +Hosting component is a class that represents infrastructure (machines/containers/services) where neoFS is hosted. Interaction with specific infrastructure instance (host) is encapsulated in classes that implement interface `frostfs_testlib.hosting.Host`. To pass information about hosts to the `Hosting` class in runtime we use method `configure`: ```python -from neofs_testlib.hosting import Hosting +from frostfs_testlib.hosting import Hosting hosting = Hosting() hosting.configure({ "hosts": [{ "address": "localhost", "plugin_name": "docker" ... }]}) @@ -41,18 +41,18 @@ hosting.configure({ "hosts": [{ "address": "localhost", "plugin_name": "docker" ## Plugins Testlib uses [entrypoint specification](https://docs.python.org/3/library/importlib.metadata.html) for plugins. Testlib supports the following entrypoint groups for plugins: - - `neofs.testlib.reporter` - group for reporter handler plugins. Plugin should be a class that implements interface `neofs_testlib.reporter.interfaces.ReporterHandler`. + - `frostfs.testlib.reporter` - group for reporter handler plugins. Plugin should be a class that implements interface `frostfs_testlib.reporter.interfaces.ReporterHandler`. ### Example reporter plugin In this example we will consider two Python projects: - - Project "my_neofs_plugins" where we will build a plugin that extends testlib functionality. - - Project "my_neofs_tests" that uses "neofs_testlib" and "my_neofs_plugins" to build some tests. + - Project "my_frostfs_plugins" where we will build a plugin that extends testlib functionality. + - Project "my_frostfs_tests" that uses "frostfs_testlib" and "my_frostfs_plugins" to build some tests. Let's say we want to implement some custom reporter handler that can be used as a plugin for testlib. Pseudo-code of implementation can look like that: ```python -# File my_neofs_plugins/src/foo/bar/custom_handler.py +# File my_frostfs_plugins/src/foo/bar/custom_handler.py from contextlib import AbstractContextManager -from neofs_testlib.reporter import ReporterHandler +from frostfs_testlib.reporter import ReporterHandler class CustomHandler(ReporterHandler): @@ -63,18 +63,18 @@ class CustomHandler(ReporterHandler): ... some implementation ... ``` -Then in the file `pyproject.toml` of "my_neofs_plugins" we should register entrypoint for this plugin. Entrypoint must belong to the group `neofs.testlib.reporter`: +Then in the file `pyproject.toml` of "my_frostfs_plugins" we should register entrypoint for this plugin. Entrypoint must belong to the group `frostfs.testlib.reporter`: ```yaml -# File my_neofs_plugins/pyproject.toml -[project.entry-points."neofs.testlib.reporter"] +# File my_frostfs_plugins/pyproject.toml +[project.entry-points."frostfs.testlib.reporter"] my_custom_handler = "foo.bar.custom_handler:CustomHandler" ``` -Finally, to use this handler in our test project "my_neofs_tests", we should configure reporter with name of the handler plugin: +Finally, to use this handler in our test project "my_frostfs_tests", we should configure reporter with name of the handler plugin: ```python -# File my_neofs_tests/src/conftest.py -from neofs_testlib.reporter import get_reporter +# File my_frostfs_tests/src/conftest.py +from frostfs_testlib.reporter import get_reporter get_reporter().configure({ "handlers": [{"plugin_name": "my_custom_handler"}] }) ``` @@ -92,4 +92,4 @@ The library provides the following primary components: ## Contributing -Any contributions to the library should conform to the [contribution guideline](https://github.com/nspcc-dev/neofs-testlib/blob/master/CONTRIBUTING.md). +Any contributions to the library should conform to the [contribution guideline](https://github.com/TrueCloudLab/frostfs-testlib/blob/master/CONTRIBUTING.md). diff --git a/pyproject.toml b/pyproject.toml index 1b796b4..e9d69ab 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,9 +3,9 @@ requires = ["setuptools>=65.0.0", "wheel"] build-backend = "setuptools.build_meta" [project] -name = "neofs-testlib" +name = "frostfs-testlib" version = "1.1.1" -description = "Building blocks and utilities to facilitate development of automated tests for NeoFS system" +description = "Building blocks and utilities to facilitate development of automated tests for FrostFS system" readme = "README.md" authors = [{ name = "NSPCC", email = "info@nspcc.ru" }] license = { text = "GNU General Public License v3 (GPLv3)" } @@ -14,7 +14,7 @@ classifiers = [ "Programming Language :: Python", "Programming Language :: Python :: 3", ] -keywords = ["neofs", "test"] +keywords = ["frostfs", "test"] dependencies = [ "allure-python-commons>=2.9.45", "docker>=4.4.0", @@ -30,13 +30,13 @@ requires-python = ">=3.10" dev = ["black", "bumpver", "isort", "pre-commit"] [project.urls] -Homepage = "https://github.com/nspcc-dev/neofs-testlib" +Homepage = "https://github.com/TrueCloudLab/frostfs-testlib" -[project.entry-points."neofs.testlib.reporter"] -allure = "neofs_testlib.reporter.allure_handler:AllureHandler" +[project.entry-points."frostfs.testlib.reporter"] +allure = "frostfs_testlib.reporter.allure_handler:AllureHandler" -[project.entry-points."neofs.testlib.hosting"] -docker = "neofs_testlib.hosting.docker_host:DockerHost" +[project.entry-points."frostfs.testlib.hosting"] +docker = "frostfs_testlib.hosting.docker_host:DockerHost" [tool.isort] profile = "black" @@ -57,4 +57,4 @@ push = false [tool.bumpver.file_patterns] "pyproject.toml" = ['current_version = "{version}"', 'version = "{version}"'] -"src/neofs_testlib/__init__.py" = ["{version}"] +"src/frostfs_testlib/__init__.py" = ["{version}"] diff --git a/src/neofs_testlib/__init__.py b/src/frostfs_testlib/__init__.py similarity index 100% rename from src/neofs_testlib/__init__.py rename to src/frostfs_testlib/__init__.py diff --git a/src/frostfs_testlib/blockchain/__init__.py b/src/frostfs_testlib/blockchain/__init__.py new file mode 100644 index 0000000..ceca6a2 --- /dev/null +++ b/src/frostfs_testlib/blockchain/__init__.py @@ -0,0 +1,2 @@ +from frostfs_testlib.blockchain.multisig import Multisig +from frostfs_testlib.blockchain.rpc_client import RPCClient diff --git a/src/neofs_testlib/blockchain/multisig.py b/src/frostfs_testlib/blockchain/multisig.py similarity index 97% rename from src/neofs_testlib/blockchain/multisig.py rename to src/frostfs_testlib/blockchain/multisig.py index 229f2a0..cd71f68 100644 --- a/src/neofs_testlib/blockchain/multisig.py +++ b/src/frostfs_testlib/blockchain/multisig.py @@ -1,4 +1,4 @@ -from neofs_testlib.cli import NeoGo +from frostfs_testlib.cli import NeoGo class Multisig: diff --git a/src/neofs_testlib/blockchain/role_designation.py b/src/frostfs_testlib/blockchain/role_designation.py similarity index 98% rename from src/neofs_testlib/blockchain/role_designation.py rename to src/frostfs_testlib/blockchain/role_designation.py index cfbce29..14d321b 100644 --- a/src/neofs_testlib/blockchain/role_designation.py +++ b/src/frostfs_testlib/blockchain/role_designation.py @@ -6,7 +6,7 @@ from cli import NeoGo from shell import Shell from utils.converters import process_b64_bytearray -from neofs_testlib.blockchain import Multisig +from frostfs_testlib.blockchain import Multisig class RoleDesignation: diff --git a/src/neofs_testlib/blockchain/rpc_client.py b/src/frostfs_testlib/blockchain/rpc_client.py similarity index 97% rename from src/neofs_testlib/blockchain/rpc_client.py rename to src/frostfs_testlib/blockchain/rpc_client.py index 0ca0212..25286d0 100644 --- a/src/neofs_testlib/blockchain/rpc_client.py +++ b/src/frostfs_testlib/blockchain/rpc_client.py @@ -4,7 +4,7 @@ from typing import Any, Dict, Optional import requests -logger = logging.getLogger("neofs.testlib.blockchain") +logger = logging.getLogger("frostfs.testlib.blockchain") class NeoRPCException(Exception): diff --git a/src/frostfs_testlib/cli/__init__.py b/src/frostfs_testlib/cli/__init__.py new file mode 100644 index 0000000..3799be9 --- /dev/null +++ b/src/frostfs_testlib/cli/__init__.py @@ -0,0 +1,4 @@ +from frostfs_testlib.cli.frostfs_adm import FrostfsAdm +from frostfs_testlib.cli.frostfs_authmate import FrostfsAuthmate +from frostfs_testlib.cli.frostfs_cli import FrostfsCli +from frostfs_testlib.cli.neogo import NeoGo, NetworkType diff --git a/src/neofs_testlib/cli/cli_command.py b/src/frostfs_testlib/cli/cli_command.py similarity index 96% rename from src/neofs_testlib/cli/cli_command.py rename to src/frostfs_testlib/cli/cli_command.py index d3ce86d..3600e77 100644 --- a/src/neofs_testlib/cli/cli_command.py +++ b/src/frostfs_testlib/cli/cli_command.py @@ -1,6 +1,6 @@ from typing import Optional -from neofs_testlib.shell import CommandOptions, CommandResult, InteractiveInput, Shell +from frostfs_testlib.shell import CommandOptions, CommandResult, InteractiveInput, Shell class CliCommand: diff --git a/src/frostfs_testlib/cli/frostfs_adm/__init__.py b/src/frostfs_testlib/cli/frostfs_adm/__init__.py new file mode 100644 index 0000000..d592eaf --- /dev/null +++ b/src/frostfs_testlib/cli/frostfs_adm/__init__.py @@ -0,0 +1 @@ +from frostfs_testlib.cli.frostfs_adm.adm import FrostfsAdm diff --git a/src/frostfs_testlib/cli/frostfs_adm/adm.py b/src/frostfs_testlib/cli/frostfs_adm/adm.py new file mode 100644 index 0000000..283069c --- /dev/null +++ b/src/frostfs_testlib/cli/frostfs_adm/adm.py @@ -0,0 +1,22 @@ +from typing import Optional + +from frostfs_testlib.cli.frostfs_adm.config import FrostfsAdmConfig +from frostfs_testlib.cli.frostfs_adm.morph import FrostfsAdmMorph +from frostfs_testlib.cli.frostfs_adm.storage_config import FrostfsAdmStorageConfig +from frostfs_testlib.cli.frostfs_adm.subnet import FrostfsAdmMorphSubnet +from frostfs_testlib.cli.frostfs_adm.version import FrostfsAdmVersion +from frostfs_testlib.shell import Shell + + +class FrostfsAdm: + morph: Optional[FrostfsAdmMorph] = None + subnet: Optional[FrostfsAdmMorphSubnet] = None + storage_config: Optional[FrostfsAdmStorageConfig] = None + version: Optional[FrostfsAdmVersion] = None + + def __init__(self, shell: Shell, frostfs_adm_exec_path: str, config_file: Optional[str] = None): + self.config = FrostfsAdmConfig(shell, frostfs_adm_exec_path, config=config_file) + self.morph = FrostfsAdmMorph(shell, frostfs_adm_exec_path, config=config_file) + self.subnet = FrostfsAdmMorphSubnet(shell, frostfs_adm_exec_path, config=config_file) + self.storage_config = FrostfsAdmStorageConfig(shell, frostfs_adm_exec_path, config=config_file) + self.version = FrostfsAdmVersion(shell, frostfs_adm_exec_path, config=config_file) diff --git a/src/frostfs_testlib/cli/frostfs_adm/config.py b/src/frostfs_testlib/cli/frostfs_adm/config.py new file mode 100644 index 0000000..a29b3ac --- /dev/null +++ b/src/frostfs_testlib/cli/frostfs_adm/config.py @@ -0,0 +1,22 @@ +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult + + +class FrostfsAdmConfig(CliCommand): + def init(self, path: str = "~/.frostfs/adm/config.yml") -> CommandResult: + """Initialize basic frostfs-adm configuration file. + + Args: + path: Path to config (default ~/.frostfs/adm/config.yml). + + Returns: + Command's result. + """ + return self._execute( + "config init", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self"] + }, + ) diff --git a/src/neofs_testlib/cli/neofs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py similarity index 94% rename from src/neofs_testlib/cli/neofs_adm/morph.py rename to src/frostfs_testlib/cli/frostfs_adm/morph.py index 6c67d79..aba147b 100644 --- a/src/neofs_testlib/cli/neofs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -1,10 +1,10 @@ from typing import Optional -from neofs_testlib.cli.cli_command import CliCommand -from neofs_testlib.shell import CommandResult +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult -class NeofsAdmMorph(CliCommand): +class FrostfsAdmMorph(CliCommand): def deposit_notary( self, rpc_endpoint: str, @@ -88,7 +88,7 @@ class NeofsAdmMorph(CliCommand): container_contract: Optional[str] = None, dump: str = "./testlib_dump_container", ) -> CommandResult: - """Dump NeoFS containers to file. + """Dump FrostFS containers to file. Args: cid: Containers to dump. @@ -129,7 +129,7 @@ class NeofsAdmMorph(CliCommand): def force_new_epoch( self, rpc_endpoint: Optional[str] = None, alphabet: Optional[str] = None ) -> CommandResult: - """Create new NeoFS epoch event in the side chain. + """Create new FrostFS epoch event in the side chain. Args: alphabet: Path to alphabet wallets dir. @@ -218,9 +218,9 @@ class NeofsAdmMorph(CliCommand): alphabet_wallets: Path to alphabet wallets dir. container_alias_fee: Container alias fee (default 500). container_fee: Container registration fee (default 1000). - contracts: Path to archive with compiled NeoFS contracts + contracts: Path to archive with compiled FrostFS contracts (default fetched from latest github release). - epoch_duration: Amount of side chain blocks in one NeoFS epoch (default 240). + epoch_duration: Amount of side chain blocks in one FrostFS epoch (default 240). homomorphic_disabled: Disable object homomorphic hashing. local_dump: Path to the blocks dump file. max_object_size: Max single object size in bytes (default 67108864). @@ -273,7 +273,7 @@ class NeofsAdmMorph(CliCommand): cid: str, dump: str, ) -> CommandResult: - """Restore NeoFS containers from file. + """Restore FrostFS containers from file. Args: alphabet_wallets: Path to alphabet wallets dir. @@ -335,11 +335,11 @@ class NeofsAdmMorph(CliCommand): alphabet_wallets: str, contracts: Optional[str] = None, ) -> CommandResult: - """Update NeoFS contracts. + """Update FrostFS contracts. Args: alphabet_wallets: Path to alphabet wallets dir. - contracts: Path to archive with compiled NeoFS contracts + contracts: Path to archive with compiled FrostFS contracts (default fetched from latest github release). rpc_endpoint: N3 RPC node endpoint. diff --git a/src/neofs_testlib/cli/neofs_adm/storage_config.py b/src/frostfs_testlib/cli/frostfs_adm/storage_config.py similarity index 68% rename from src/neofs_testlib/cli/neofs_adm/storage_config.py rename to src/frostfs_testlib/cli/frostfs_adm/storage_config.py index 75e3e06..81bf210 100644 --- a/src/neofs_testlib/cli/neofs_adm/storage_config.py +++ b/src/frostfs_testlib/cli/frostfs_adm/storage_config.py @@ -1,10 +1,10 @@ -from neofs_testlib.cli.cli_command import CliCommand -from neofs_testlib.shell import CommandResult +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult -class NeofsAdmStorageConfig(CliCommand): +class FrostfsAdmStorageConfig(CliCommand): def set(self, account: str, wallet: str) -> CommandResult: - """Initialize basic neofs-adm configuration file. + """Initialize basic frostfs-adm configuration file. Args: account: Wallet account. diff --git a/src/neofs_testlib/cli/neofs_adm/subnet.py b/src/frostfs_testlib/cli/frostfs_adm/subnet.py similarity index 92% rename from src/neofs_testlib/cli/neofs_adm/subnet.py rename to src/frostfs_testlib/cli/frostfs_adm/subnet.py index 127136d..fb5935e 100644 --- a/src/neofs_testlib/cli/neofs_adm/subnet.py +++ b/src/frostfs_testlib/cli/frostfs_adm/subnet.py @@ -1,14 +1,14 @@ from typing import Optional -from neofs_testlib.cli.cli_command import CliCommand -from neofs_testlib.shell import CommandResult +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult -class NeofsAdmMorphSubnet(CliCommand): +class FrostfsAdmMorphSubnet(CliCommand): def create( self, rpc_endpoint: str, address: str, wallet: str, notary: bool = False ) -> CommandResult: - """Create NeoFS subnet. + """Create FrostFS subnet. Args: address: Address in the wallet, optional. @@ -29,7 +29,7 @@ class NeofsAdmMorphSubnet(CliCommand): ) def get(self, rpc_endpoint: str, subnet: str) -> CommandResult: - """Read information about the NeoFS subnet. + """Read information about the FrostFS subnet. Args: rpc_endpoint: N3 RPC node endpoint. @@ -50,7 +50,7 @@ class NeofsAdmMorphSubnet(CliCommand): def remove( self, rpc_endpoint: str, wallet: str, subnet: str, address: Optional[str] = None ) -> CommandResult: - """Remove NeoFS subnet. + """Remove FrostFS subnet. Args: address: Address in the wallet, optional. @@ -80,7 +80,7 @@ class NeofsAdmMorphSubnet(CliCommand): group: Optional[str] = None, address: Optional[str] = None, ) -> CommandResult: - """Add admin to the NeoFS subnet. + """Add admin to the FrostFS subnet. Args: address: Address in the wallet, optional. @@ -112,7 +112,7 @@ class NeofsAdmMorphSubnet(CliCommand): client: Optional[str] = None, address: Optional[str] = None, ) -> CommandResult: - """Remove admin of the NeoFS subnet. + """Remove admin of the FrostFS subnet. Args: address: Address in the wallet, optional. @@ -143,7 +143,7 @@ class NeofsAdmMorphSubnet(CliCommand): group: Optional[str] = None, address: Optional[str] = None, ) -> CommandResult: - """Add client to the NeoFS subnet. + """Add client to the FrostFS subnet. Args: address: Address in the wallet, optional. @@ -174,7 +174,7 @@ class NeofsAdmMorphSubnet(CliCommand): subnet: str, address: Optional[str] = None, ) -> CommandResult: - """Remove client of the NeoFS subnet. + """Remove client of the FrostFS subnet. Args: address: Address in the wallet, optional. @@ -197,7 +197,7 @@ class NeofsAdmMorphSubnet(CliCommand): ) def node_add(self, rpc_endpoint: str, wallet: str, node: str, subnet: str) -> CommandResult: - """Add node to the NeoFS subnet. + """Add node to the FrostFS subnet. Args: node: Hex-encoded public key of the node. @@ -218,7 +218,7 @@ class NeofsAdmMorphSubnet(CliCommand): ) def node_remove(self, rpc_endpoint: str, wallet: str, node: str, subnet: str) -> CommandResult: - """Remove node from the NeoFS subnet. + """Remove node from the FrostFS subnet. Args: node: Hex-encoded public key of the node. diff --git a/src/neofs_testlib/cli/neofs_adm/version.py b/src/frostfs_testlib/cli/frostfs_adm/version.py similarity index 55% rename from src/neofs_testlib/cli/neofs_adm/version.py rename to src/frostfs_testlib/cli/frostfs_adm/version.py index 502d578..7d09afc 100644 --- a/src/neofs_testlib/cli/neofs_adm/version.py +++ b/src/frostfs_testlib/cli/frostfs_adm/version.py @@ -1,8 +1,8 @@ -from neofs_testlib.cli.cli_command import CliCommand -from neofs_testlib.shell import CommandResult +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult -class NeofsAdmVersion(CliCommand): +class FrostfsAdmVersion(CliCommand): def get(self) -> CommandResult: """Application version diff --git a/src/frostfs_testlib/cli/frostfs_authmate/__init__.py b/src/frostfs_testlib/cli/frostfs_authmate/__init__.py new file mode 100644 index 0000000..6f2d765 --- /dev/null +++ b/src/frostfs_testlib/cli/frostfs_authmate/__init__.py @@ -0,0 +1 @@ +from frostfs_testlib.cli.frostfs_authmate.authmate import FrostfsAuthmate diff --git a/src/frostfs_testlib/cli/frostfs_authmate/authmate.py b/src/frostfs_testlib/cli/frostfs_authmate/authmate.py new file mode 100644 index 0000000..ba3a3b0 --- /dev/null +++ b/src/frostfs_testlib/cli/frostfs_authmate/authmate.py @@ -0,0 +1,14 @@ +from typing import Optional + +from frostfs_testlib.cli.frostfs_authmate.secret import FrostfsAuthmateSecret +from frostfs_testlib.cli.frostfs_authmate.version import FrostfsAuthmateVersion +from frostfs_testlib.shell import Shell + + +class FrostfsAuthmate: + secret: Optional[FrostfsAuthmateSecret] = None + version: Optional[FrostfsAuthmateVersion] = None + + def __init__(self, shell: Shell, frostfs_authmate_exec_path: str): + self.secret = FrostfsAuthmateSecret(shell, frostfs_authmate_exec_path) + self.version = FrostfsAuthmateVersion(shell, frostfs_authmate_exec_path) diff --git a/src/neofs_testlib/cli/neofs_authmate/secret.py b/src/frostfs_testlib/cli/frostfs_authmate/secret.py similarity index 86% rename from src/neofs_testlib/cli/neofs_authmate/secret.py rename to src/frostfs_testlib/cli/frostfs_authmate/secret.py index 40cfc25..ba5b5f5 100644 --- a/src/neofs_testlib/cli/neofs_authmate/secret.py +++ b/src/frostfs_testlib/cli/frostfs_authmate/secret.py @@ -1,10 +1,10 @@ from typing import Optional, Union -from neofs_testlib.cli.cli_command import CliCommand -from neofs_testlib.shell import CommandResult +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult -class NeofsAuthmateSecret(CliCommand): +class FrostfsAuthmateSecret(CliCommand): def obtain( self, wallet: str, @@ -15,13 +15,13 @@ class NeofsAuthmateSecret(CliCommand): address: Optional[str] = None, gate_address: Optional[str] = None, ) -> CommandResult: - """Obtain a secret from NeoFS network. + """Obtain a secret from FrostFS network. Args: wallet: Path to the wallet. wallet_password: Wallet password. address: Address of wallet account. - peer: Address of neofs peer to connect to. + peer: Address of frostfs peer to connect to. gate_wallet: Path to the wallet. gate_address: Address of wallet account. access_key_id: Access key id for s3. @@ -55,13 +55,13 @@ class NeofsAuthmateSecret(CliCommand): container_policy: Optional[str] = None, aws_cli_credentials: Optional[str] = None, ) -> CommandResult: - """Obtain a secret from NeoFS network + """Obtain a secret from FrostFS network Args: wallet: Path to the wallet. wallet_password: Wallet password. address: Address of wallet account. - peer: Address of a neofs peer to connect to. + peer: Address of a frostfs peer to connect to. bearer_rules: Rules for bearer token as plain json string. gate_public_key: Public 256r1 key of a gate (send list[str] of keys to use multiple gates). container_id: Auth container id to put the secret into. @@ -73,7 +73,7 @@ class NeofsAuthmateSecret(CliCommand): lifetime: Lifetime of tokens. For example 50h30m (note: max time unit is an hour so to set a day you should use 24h). It will be ceil rounded to the nearest amount of epoch. (default: 720h0m0s). - container_policy: Mapping AWS storage class to NeoFS storage policy as plain json string + container_policy: Mapping AWS storage class to FrostFS storage policy as plain json string or path to json file. aws_cli_credentials: Path to the aws cli credential file. diff --git a/src/neofs_testlib/cli/neofs_authmate/version.py b/src/frostfs_testlib/cli/frostfs_authmate/version.py similarity index 54% rename from src/neofs_testlib/cli/neofs_authmate/version.py rename to src/frostfs_testlib/cli/frostfs_authmate/version.py index ec336dc..d459a06 100644 --- a/src/neofs_testlib/cli/neofs_authmate/version.py +++ b/src/frostfs_testlib/cli/frostfs_authmate/version.py @@ -1,8 +1,8 @@ -from neofs_testlib.cli.cli_command import CliCommand -from neofs_testlib.shell import CommandResult +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult -class NeofsAuthmateVersion(CliCommand): +class FrostfsAuthmateVersion(CliCommand): def get(self) -> CommandResult: """Application version diff --git a/src/frostfs_testlib/cli/frostfs_cli/__init__.py b/src/frostfs_testlib/cli/frostfs_cli/__init__.py new file mode 100644 index 0000000..e67f887 --- /dev/null +++ b/src/frostfs_testlib/cli/frostfs_cli/__init__.py @@ -0,0 +1 @@ +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli diff --git a/src/neofs_testlib/cli/neofs_cli/accounting.py b/src/frostfs_testlib/cli/frostfs_cli/accounting.py similarity index 79% rename from src/neofs_testlib/cli/neofs_cli/accounting.py rename to src/frostfs_testlib/cli/frostfs_cli/accounting.py index b8bdcc2..7b2b9f0 100644 --- a/src/neofs_testlib/cli/neofs_cli/accounting.py +++ b/src/frostfs_testlib/cli/frostfs_cli/accounting.py @@ -1,10 +1,10 @@ from typing import Optional -from neofs_testlib.cli.cli_command import CliCommand -from neofs_testlib.shell import CommandResult +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult -class NeofsCliAccounting(CliCommand): +class FrostfsCliAccounting(CliCommand): def balance( self, wallet: Optional[str] = None, @@ -12,7 +12,7 @@ class NeofsCliAccounting(CliCommand): address: Optional[str] = None, owner: Optional[str] = None, ) -> CommandResult: - """Get internal balance of NeoFS account + """Get internal balance of FrostFS account Args: address: Address of wallet account. diff --git a/src/neofs_testlib/cli/neofs_cli/acl.py b/src/frostfs_testlib/cli/frostfs_cli/acl.py similarity index 88% rename from src/neofs_testlib/cli/neofs_cli/acl.py rename to src/frostfs_testlib/cli/frostfs_cli/acl.py index 47f86cb..bd0f80e 100644 --- a/src/neofs_testlib/cli/neofs_cli/acl.py +++ b/src/frostfs_testlib/cli/frostfs_cli/acl.py @@ -1,10 +1,10 @@ from typing import Optional -from neofs_testlib.cli.cli_command import CliCommand -from neofs_testlib.shell import CommandResult +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult -class NeofsCliACL(CliCommand): +class FrostfsCliACL(CliCommand): def extended_create( self, cid: str, out: str, file: Optional[str] = None, rule: Optional[list] = None ) -> CommandResult: @@ -22,7 +22,7 @@ class NeofsCliACL(CliCommand): Well-known system object headers start with '$Object:' prefix. User defined headers start without prefix. Read more about filter keys at: - http://github.com/nspcc-dev/neofs-api/blob/master/proto-docs/acl.md#message-eaclrecordfilter + http://github.com/TrueCloudLab/frostfs-api/blob/master/proto-docs/acl.md#message-eaclrecordfilter Match is '=' for matching and '!=' for non-matching filter. Value is a valid unicode string corresponding to object or request header value. diff --git a/src/frostfs_testlib/cli/frostfs_cli/cli.py b/src/frostfs_testlib/cli/frostfs_cli/cli.py new file mode 100644 index 0000000..07986c2 --- /dev/null +++ b/src/frostfs_testlib/cli/frostfs_cli/cli.py @@ -0,0 +1,38 @@ +from typing import Optional + +from frostfs_testlib.cli.frostfs_cli.accounting import FrostfsCliAccounting +from frostfs_testlib.cli.frostfs_cli.acl import FrostfsCliACL +from frostfs_testlib.cli.frostfs_cli.container import FrostfsCliContainer +from frostfs_testlib.cli.frostfs_cli.netmap import FrostfsCliNetmap +from frostfs_testlib.cli.frostfs_cli.object import FrostfsCliObject +from frostfs_testlib.cli.frostfs_cli.session import FrostfsCliSession +from frostfs_testlib.cli.frostfs_cli.shards import FrostfsCliShards +from frostfs_testlib.cli.frostfs_cli.storagegroup import FrostfsCliStorageGroup +from frostfs_testlib.cli.frostfs_cli.util import FrostfsCliUtil +from frostfs_testlib.cli.frostfs_cli.version import FrostfsCliVersion +from frostfs_testlib.shell import Shell + + +class FrostfsCli: + accounting: Optional[FrostfsCliAccounting] = None + acl: Optional[FrostfsCliACL] = None + container: Optional[FrostfsCliContainer] = None + netmap: Optional[FrostfsCliNetmap] = None + object: Optional[FrostfsCliObject] = None + session: Optional[FrostfsCliSession] = None + shards: Optional[FrostfsCliShards] = None + storagegroup: Optional[FrostfsCliStorageGroup] = None + util: Optional[FrostfsCliUtil] = None + version: Optional[FrostfsCliVersion] = None + + def __init__(self, shell: Shell, frostfs_cli_exec_path: str, config_file: Optional[str] = None): + self.accounting = FrostfsCliAccounting(shell, frostfs_cli_exec_path, config=config_file) + self.acl = FrostfsCliACL(shell, frostfs_cli_exec_path, config=config_file) + self.container = FrostfsCliContainer(shell, frostfs_cli_exec_path, config=config_file) + self.netmap = FrostfsCliNetmap(shell, frostfs_cli_exec_path, config=config_file) + self.object = FrostfsCliObject(shell, frostfs_cli_exec_path, config=config_file) + self.session = FrostfsCliSession(shell, frostfs_cli_exec_path, config=config_file) + self.shards = FrostfsCliShards(shell, frostfs_cli_exec_path, config=config_file) + self.storagegroup = FrostfsCliStorageGroup(shell, frostfs_cli_exec_path, config=config_file) + self.util = FrostfsCliUtil(shell, frostfs_cli_exec_path, config=config_file) + self.version = FrostfsCliVersion(shell, frostfs_cli_exec_path, config=config_file) diff --git a/src/neofs_testlib/cli/neofs_cli/container.py b/src/frostfs_testlib/cli/frostfs_cli/container.py similarity index 97% rename from src/neofs_testlib/cli/neofs_cli/container.py rename to src/frostfs_testlib/cli/frostfs_cli/container.py index 1952448..533ff1a 100644 --- a/src/neofs_testlib/cli/neofs_cli/container.py +++ b/src/frostfs_testlib/cli/frostfs_cli/container.py @@ -1,10 +1,10 @@ from typing import Optional -from neofs_testlib.cli.cli_command import CliCommand -from neofs_testlib.shell import CommandResult +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult -class NeofsCliContainer(CliCommand): +class FrostfsCliContainer(CliCommand): def create( self, rpc_endpoint: str, @@ -24,7 +24,7 @@ class NeofsCliContainer(CliCommand): timeout: Optional[str] = None, ) -> CommandResult: """ - Create a new container and register it in the NeoFS. + Create a new container and register it in the FrostFS. It will be stored in the sidechain when the Inner Ring accepts it. Args: diff --git a/src/neofs_testlib/cli/neofs_cli/netmap.py b/src/frostfs_testlib/cli/frostfs_cli/netmap.py similarity index 95% rename from src/neofs_testlib/cli/neofs_cli/netmap.py rename to src/frostfs_testlib/cli/frostfs_cli/netmap.py index 0100fc4..7033912 100644 --- a/src/neofs_testlib/cli/neofs_cli/netmap.py +++ b/src/frostfs_testlib/cli/frostfs_cli/netmap.py @@ -1,10 +1,10 @@ from typing import Optional -from neofs_testlib.cli.cli_command import CliCommand -from neofs_testlib.shell import CommandResult +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult -class NeofsCliNetmap(CliCommand): +class FrostfsCliNetmap(CliCommand): def epoch( self, rpc_endpoint: str, @@ -43,7 +43,7 @@ class NeofsCliNetmap(CliCommand): xhdr: Optional[dict] = None, ) -> CommandResult: """ - Get information about NeoFS network. + Get information about FrostFS network. Args: address: Address of wallet account diff --git a/src/neofs_testlib/cli/neofs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py similarity index 98% rename from src/neofs_testlib/cli/neofs_cli/object.py rename to src/frostfs_testlib/cli/frostfs_cli/object.py index 164076c..1c1d0ac 100644 --- a/src/neofs_testlib/cli/neofs_cli/object.py +++ b/src/frostfs_testlib/cli/frostfs_cli/object.py @@ -1,10 +1,10 @@ from typing import Optional -from neofs_testlib.cli.cli_command import CliCommand -from neofs_testlib.shell import CommandResult +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult -class NeofsCliObject(CliCommand): +class FrostfsCliObject(CliCommand): def delete( self, rpc_endpoint: str, @@ -19,7 +19,7 @@ class NeofsCliObject(CliCommand): timeout: Optional[str] = None, ) -> CommandResult: """ - Delete object from NeoFS. + Delete object from FrostFS. Args: address: Address of wallet account. @@ -59,7 +59,7 @@ class NeofsCliObject(CliCommand): timeout: Optional[str] = None, ) -> CommandResult: """ - Get object from NeoFS. + Get object from FrostFS. Args: address: Address of wallet account. @@ -235,7 +235,7 @@ class NeofsCliObject(CliCommand): timeout: Optional[str] = None, ) -> CommandResult: """ - Put object to NeoFS. + Put object to FrostFS. Args: address: Address of wallet account. diff --git a/src/neofs_testlib/cli/neofs_cli/session.py b/src/frostfs_testlib/cli/frostfs_cli/session.py similarity index 88% rename from src/neofs_testlib/cli/neofs_cli/session.py rename to src/frostfs_testlib/cli/frostfs_cli/session.py index 4e33a7a..e21cc23 100644 --- a/src/neofs_testlib/cli/neofs_cli/session.py +++ b/src/frostfs_testlib/cli/frostfs_cli/session.py @@ -1,10 +1,10 @@ from typing import Optional -from neofs_testlib.cli.cli_command import CliCommand -from neofs_testlib.shell import CommandResult +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult -class NeofsCliSession(CliCommand): +class FrostfsCliSession(CliCommand): def create( self, rpc_endpoint: str, diff --git a/src/neofs_testlib/cli/neofs_cli/shards.py b/src/frostfs_testlib/cli/frostfs_cli/shards.py similarity index 96% rename from src/neofs_testlib/cli/neofs_cli/shards.py rename to src/frostfs_testlib/cli/frostfs_cli/shards.py index dd57827..bfab6b6 100644 --- a/src/neofs_testlib/cli/neofs_cli/shards.py +++ b/src/frostfs_testlib/cli/frostfs_cli/shards.py @@ -1,10 +1,10 @@ from typing import Optional -from neofs_testlib.cli.cli_command import CliCommand -from neofs_testlib.shell import CommandResult +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult -class NeofsCliShards(CliCommand): +class FrostfsCliShards(CliCommand): def flush_cache( self, endpoint: str, diff --git a/src/neofs_testlib/cli/neofs_cli/storagegroup.py b/src/frostfs_testlib/cli/frostfs_cli/storagegroup.py similarity index 93% rename from src/neofs_testlib/cli/neofs_cli/storagegroup.py rename to src/frostfs_testlib/cli/frostfs_cli/storagegroup.py index 514abf5..10f724b 100644 --- a/src/neofs_testlib/cli/neofs_cli/storagegroup.py +++ b/src/frostfs_testlib/cli/frostfs_cli/storagegroup.py @@ -1,10 +1,10 @@ from typing import Optional -from neofs_testlib.cli.cli_command import CliCommand -from neofs_testlib.shell import CommandResult +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult -class NeofsCliStorageGroup(CliCommand): +class FrostfsCliStorageGroup(CliCommand): def put( self, rpc_endpoint: str, @@ -18,7 +18,7 @@ class NeofsCliStorageGroup(CliCommand): xhdr: Optional[dict] = None, ) -> CommandResult: """ - Put storage group to NeoFS. + Put storage group to FrostFS. Args: address: Address of wallet account. @@ -54,7 +54,7 @@ class NeofsCliStorageGroup(CliCommand): xhdr: Optional[dict] = None, ) -> CommandResult: """ - Get storage group from NeoFS. + Get storage group from FrostFS. Args: address: Address of wallet account. @@ -89,7 +89,7 @@ class NeofsCliStorageGroup(CliCommand): xhdr: Optional[dict] = None, ) -> CommandResult: """ - List storage groups in NeoFS container. + List storage groups in FrostFS container. Args: address: Address of wallet account. @@ -124,7 +124,7 @@ class NeofsCliStorageGroup(CliCommand): xhdr: Optional[dict] = None, ) -> CommandResult: """ - Delete storage group from NeoFS. + Delete storage group from FrostFS. Args: address: Address of wallet account. diff --git a/src/neofs_testlib/cli/neofs_cli/util.py b/src/frostfs_testlib/cli/frostfs_cli/util.py similarity index 92% rename from src/neofs_testlib/cli/neofs_cli/util.py rename to src/frostfs_testlib/cli/frostfs_cli/util.py index 786c156..99acd0a 100644 --- a/src/neofs_testlib/cli/neofs_cli/util.py +++ b/src/frostfs_testlib/cli/frostfs_cli/util.py @@ -1,10 +1,10 @@ from typing import Optional -from neofs_testlib.cli.cli_command import CliCommand -from neofs_testlib.shell import CommandResult +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult -class NeofsCliUtil(CliCommand): +class FrostfsCliUtil(CliCommand): def sign_bearer_token( self, wallet: str, diff --git a/src/frostfs_testlib/cli/frostfs_cli/version.py b/src/frostfs_testlib/cli/frostfs_cli/version.py new file mode 100644 index 0000000..9d22859 --- /dev/null +++ b/src/frostfs_testlib/cli/frostfs_cli/version.py @@ -0,0 +1,13 @@ +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult + + +class FrostfsCliVersion(CliCommand): + def get(self) -> CommandResult: + """ + Application version and FrostFS API compatibility. + + Returns: + Command's result. + """ + return self._execute("", version=True) diff --git a/src/frostfs_testlib/cli/neogo/__init__.py b/src/frostfs_testlib/cli/neogo/__init__.py new file mode 100644 index 0000000..43d305b --- /dev/null +++ b/src/frostfs_testlib/cli/neogo/__init__.py @@ -0,0 +1,2 @@ +from frostfs_testlib.cli.neogo.go import NeoGo +from frostfs_testlib.cli.neogo.network_type import NetworkType diff --git a/src/neofs_testlib/cli/neogo/candidate.py b/src/frostfs_testlib/cli/neogo/candidate.py similarity index 97% rename from src/neofs_testlib/cli/neogo/candidate.py rename to src/frostfs_testlib/cli/neogo/candidate.py index f5e4f33..e4bf6b7 100644 --- a/src/neofs_testlib/cli/neogo/candidate.py +++ b/src/frostfs_testlib/cli/neogo/candidate.py @@ -1,7 +1,7 @@ from typing import Optional -from neofs_testlib.cli.cli_command import CliCommand -from neofs_testlib.shell import CommandResult +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult class NeoGoCandidate(CliCommand): diff --git a/src/neofs_testlib/cli/neogo/contract.py b/src/frostfs_testlib/cli/neogo/contract.py similarity index 99% rename from src/neofs_testlib/cli/neogo/contract.py rename to src/frostfs_testlib/cli/neogo/contract.py index 61f4edb..bc56dd9 100644 --- a/src/neofs_testlib/cli/neogo/contract.py +++ b/src/frostfs_testlib/cli/neogo/contract.py @@ -1,7 +1,7 @@ from typing import Optional -from neofs_testlib.cli.cli_command import CliCommand -from neofs_testlib.shell import CommandResult +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult class NeoGoContract(CliCommand): diff --git a/src/neofs_testlib/cli/neogo/db.py b/src/frostfs_testlib/cli/neogo/db.py similarity index 92% rename from src/neofs_testlib/cli/neogo/db.py rename to src/frostfs_testlib/cli/neogo/db.py index ae3185d..4b456c3 100644 --- a/src/neofs_testlib/cli/neogo/db.py +++ b/src/frostfs_testlib/cli/neogo/db.py @@ -1,8 +1,8 @@ from typing import Optional -from neofs_testlib.cli.cli_command import CliCommand -from neofs_testlib.cli.neogo.network_type import NetworkType -from neofs_testlib.shell import CommandResult +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.cli.neogo.network_type import NetworkType +from frostfs_testlib.shell import CommandResult class NeoGoDb(CliCommand): diff --git a/src/neofs_testlib/cli/neogo/go.py b/src/frostfs_testlib/cli/neogo/go.py similarity index 70% rename from src/neofs_testlib/cli/neogo/go.py rename to src/frostfs_testlib/cli/neogo/go.py index 5f216ce..a0634a4 100644 --- a/src/neofs_testlib/cli/neogo/go.py +++ b/src/frostfs_testlib/cli/neogo/go.py @@ -1,14 +1,14 @@ from typing import Optional -from neofs_testlib.cli.neogo.candidate import NeoGoCandidate -from neofs_testlib.cli.neogo.contract import NeoGoContract -from neofs_testlib.cli.neogo.db import NeoGoDb -from neofs_testlib.cli.neogo.nep17 import NeoGoNep17 -from neofs_testlib.cli.neogo.node import NeoGoNode -from neofs_testlib.cli.neogo.query import NeoGoQuery -from neofs_testlib.cli.neogo.version import NeoGoVersion -from neofs_testlib.cli.neogo.wallet import NeoGoWallet -from neofs_testlib.shell import Shell +from frostfs_testlib.cli.neogo.candidate import NeoGoCandidate +from frostfs_testlib.cli.neogo.contract import NeoGoContract +from frostfs_testlib.cli.neogo.db import NeoGoDb +from frostfs_testlib.cli.neogo.nep17 import NeoGoNep17 +from frostfs_testlib.cli.neogo.node import NeoGoNode +from frostfs_testlib.cli.neogo.query import NeoGoQuery +from frostfs_testlib.cli.neogo.version import NeoGoVersion +from frostfs_testlib.cli.neogo.wallet import NeoGoWallet +from frostfs_testlib.shell import Shell class NeoGo: diff --git a/src/neofs_testlib/cli/neogo/nep17.py b/src/frostfs_testlib/cli/neogo/nep17.py similarity index 98% rename from src/neofs_testlib/cli/neogo/nep17.py rename to src/frostfs_testlib/cli/neogo/nep17.py index 7cc00b6..a3dcb12 100644 --- a/src/neofs_testlib/cli/neogo/nep17.py +++ b/src/frostfs_testlib/cli/neogo/nep17.py @@ -1,7 +1,7 @@ from typing import Optional -from neofs_testlib.cli.cli_command import CliCommand -from neofs_testlib.shell import CommandResult +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult class NeoGoNep17(CliCommand): diff --git a/src/neofs_testlib/cli/neogo/network_type.py b/src/frostfs_testlib/cli/neogo/network_type.py similarity index 100% rename from src/neofs_testlib/cli/neogo/network_type.py rename to src/frostfs_testlib/cli/neogo/network_type.py diff --git a/src/neofs_testlib/cli/neogo/node.py b/src/frostfs_testlib/cli/neogo/node.py similarity index 67% rename from src/neofs_testlib/cli/neogo/node.py rename to src/frostfs_testlib/cli/neogo/node.py index 8fe4d28..ccc833d 100644 --- a/src/neofs_testlib/cli/neogo/node.py +++ b/src/frostfs_testlib/cli/neogo/node.py @@ -1,6 +1,6 @@ -from neofs_testlib.cli.cli_command import CliCommand -from neofs_testlib.cli.neogo.network_type import NetworkType -from neofs_testlib.shell import CommandResult +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.cli.neogo.network_type import NetworkType +from frostfs_testlib.shell import CommandResult class NeoGoNode(CliCommand): diff --git a/src/neofs_testlib/cli/neogo/query.py b/src/frostfs_testlib/cli/neogo/query.py similarity index 96% rename from src/neofs_testlib/cli/neogo/query.py rename to src/frostfs_testlib/cli/neogo/query.py index 945cd6c..6627790 100644 --- a/src/neofs_testlib/cli/neogo/query.py +++ b/src/frostfs_testlib/cli/neogo/query.py @@ -1,5 +1,5 @@ -from neofs_testlib.cli.cli_command import CliCommand -from neofs_testlib.shell import CommandResult +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult class NeoGoQuery(CliCommand): diff --git a/src/neofs_testlib/cli/neogo/version.py b/src/frostfs_testlib/cli/neogo/version.py similarity index 66% rename from src/neofs_testlib/cli/neogo/version.py rename to src/frostfs_testlib/cli/neogo/version.py index 0d56bdb..3f6ce3f 100644 --- a/src/neofs_testlib/cli/neogo/version.py +++ b/src/frostfs_testlib/cli/neogo/version.py @@ -1,5 +1,5 @@ -from neofs_testlib.cli.cli_command import CliCommand -from neofs_testlib.shell import CommandResult +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult class NeoGoVersion(CliCommand): diff --git a/src/neofs_testlib/cli/neogo/wallet.py b/src/frostfs_testlib/cli/neogo/wallet.py similarity index 99% rename from src/neofs_testlib/cli/neogo/wallet.py rename to src/frostfs_testlib/cli/neogo/wallet.py index c3a44b6..2f3e518 100644 --- a/src/neofs_testlib/cli/neogo/wallet.py +++ b/src/frostfs_testlib/cli/neogo/wallet.py @@ -1,7 +1,7 @@ from typing import Optional -from neofs_testlib.cli.cli_command import CliCommand -from neofs_testlib.shell import CommandResult +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult class NeoGoWallet(CliCommand): diff --git a/src/frostfs_testlib/hosting/__init__.py b/src/frostfs_testlib/hosting/__init__.py new file mode 100644 index 0000000..22a55c7 --- /dev/null +++ b/src/frostfs_testlib/hosting/__init__.py @@ -0,0 +1,3 @@ +from frostfs_testlib.hosting.config import CLIConfig, HostConfig, ServiceConfig +from frostfs_testlib.hosting.hosting import Hosting +from frostfs_testlib.hosting.interfaces import Host diff --git a/src/neofs_testlib/hosting/config.py b/src/frostfs_testlib/hosting/config.py similarity index 100% rename from src/neofs_testlib/hosting/config.py rename to src/frostfs_testlib/hosting/config.py diff --git a/src/neofs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py similarity index 93% rename from src/neofs_testlib/hosting/docker_host.py rename to src/frostfs_testlib/hosting/docker_host.py index 3b36e31..e4c61ac 100644 --- a/src/neofs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -10,12 +10,12 @@ from typing import Any, Optional import docker from requests import HTTPError -from neofs_testlib.hosting.config import ParsedAttributes -from neofs_testlib.hosting.interfaces import DiskInfo, Host -from neofs_testlib.shell import LocalShell, Shell, SSHShell -from neofs_testlib.shell.command_inspectors import SudoInspector +from frostfs_testlib.hosting.config import ParsedAttributes +from frostfs_testlib.hosting.interfaces import Host +from frostfs_testlib.shell import LocalShell, Shell, SSHShell +from frostfs_testlib.shell.command_inspectors import SudoInspector -logger = logging.getLogger("neofs.testlib.hosting") +logger = logging.getLogger("frostfs.testlib.hosting") @dataclass @@ -142,15 +142,6 @@ class DockerHost(Host): cmd = f"{meta_clean_cmd}{data_clean_cmd}" shell.exec(cmd) - def attach_disk(self, device: str, disk_info: DiskInfo) -> None: - raise NotImplementedError("Not supported for docker") - - def detach_disk(self, device: str) -> DiskInfo: - raise NotImplementedError("Not supported for docker") - - def is_disk_attached(self, device: str, disk_info: DiskInfo) -> bool: - raise NotImplementedError("Not supported for docker") - def dump_logs( self, directory_path: str, diff --git a/src/neofs_testlib/hosting/hosting.py b/src/frostfs_testlib/hosting/hosting.py similarity index 92% rename from src/neofs_testlib/hosting/hosting.py rename to src/frostfs_testlib/hosting/hosting.py index d127f25..d432135 100644 --- a/src/neofs_testlib/hosting/hosting.py +++ b/src/frostfs_testlib/hosting/hosting.py @@ -1,9 +1,9 @@ import re from typing import Any -from neofs_testlib.hosting.config import HostConfig, ServiceConfig -from neofs_testlib.hosting.interfaces import Host -from neofs_testlib.plugins import load_plugin +from frostfs_testlib.hosting.config import HostConfig, ServiceConfig +from frostfs_testlib.hosting.interfaces import Host +from frostfs_testlib.plugins import load_plugin class Hosting: @@ -36,7 +36,7 @@ class Hosting: host_configs = [HostConfig(**host_config) for host_config in config["hosts"]] for host_config in host_configs: - host_class = load_plugin("neofs.testlib.hosting", host_config.plugin_name) + host_class = load_plugin("frostfs.testlib.hosting", host_config.plugin_name) host = host_class(host_config) hosts.append(host) diff --git a/src/neofs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py similarity index 97% rename from src/neofs_testlib/hosting/interfaces.py rename to src/frostfs_testlib/hosting/interfaces.py index 2d36045..269a04e 100644 --- a/src/neofs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -2,8 +2,8 @@ from abc import ABC, abstractmethod from datetime import datetime from typing import Any, Optional -from neofs_testlib.hosting.config import CLIConfig, HostConfig, ServiceConfig -from neofs_testlib.shell.interfaces import Shell +from frostfs_testlib.hosting.config import CLIConfig, HostConfig, ServiceConfig +from frostfs_testlib.shell.interfaces import Shell class DiskInfo(dict): diff --git a/src/neofs_testlib/plugins/__init__.py b/src/frostfs_testlib/plugins/__init__.py similarity index 100% rename from src/neofs_testlib/plugins/__init__.py rename to src/frostfs_testlib/plugins/__init__.py diff --git a/src/neofs_testlib/reporter/__init__.py b/src/frostfs_testlib/reporter/__init__.py similarity index 67% rename from src/neofs_testlib/reporter/__init__.py rename to src/frostfs_testlib/reporter/__init__.py index ebfb9fd..10e4146 100644 --- a/src/neofs_testlib/reporter/__init__.py +++ b/src/frostfs_testlib/reporter/__init__.py @@ -1,6 +1,6 @@ -from neofs_testlib.reporter.allure_handler import AllureHandler -from neofs_testlib.reporter.interfaces import ReporterHandler -from neofs_testlib.reporter.reporter import Reporter +from frostfs_testlib.reporter.allure_handler import AllureHandler +from frostfs_testlib.reporter.interfaces import ReporterHandler +from frostfs_testlib.reporter.reporter import Reporter __reporter = Reporter() diff --git a/src/neofs_testlib/reporter/allure_handler.py b/src/frostfs_testlib/reporter/allure_handler.py similarity index 94% rename from src/neofs_testlib/reporter/allure_handler.py rename to src/frostfs_testlib/reporter/allure_handler.py index 0fceffb..92a295a 100644 --- a/src/neofs_testlib/reporter/allure_handler.py +++ b/src/frostfs_testlib/reporter/allure_handler.py @@ -6,7 +6,7 @@ from typing import Any import allure from allure import attachment_type -from neofs_testlib.reporter.interfaces import ReporterHandler +from frostfs_testlib.reporter.interfaces import ReporterHandler class AllureHandler(ReporterHandler): diff --git a/src/neofs_testlib/reporter/interfaces.py b/src/frostfs_testlib/reporter/interfaces.py similarity index 100% rename from src/neofs_testlib/reporter/interfaces.py rename to src/frostfs_testlib/reporter/interfaces.py diff --git a/src/neofs_testlib/reporter/reporter.py b/src/frostfs_testlib/reporter/reporter.py similarity index 93% rename from src/neofs_testlib/reporter/reporter.py rename to src/frostfs_testlib/reporter/reporter.py index d12cb05..ea8330b 100644 --- a/src/neofs_testlib/reporter/reporter.py +++ b/src/frostfs_testlib/reporter/reporter.py @@ -2,8 +2,8 @@ from contextlib import AbstractContextManager, contextmanager from types import TracebackType from typing import Any, Optional -from neofs_testlib.plugins import load_plugin -from neofs_testlib.reporter.interfaces import ReporterHandler +from frostfs_testlib.plugins import load_plugin +from frostfs_testlib.reporter.interfaces import ReporterHandler @contextmanager @@ -42,7 +42,7 @@ class Reporter: # Setup handlers from the specified config handler_configs = config.get("handlers", []) for handler_config in handler_configs: - handler_class = load_plugin("neofs.testlib.reporter", handler_config["plugin_name"]) + handler_class = load_plugin("frostfs.testlib.reporter", handler_config["plugin_name"]) self.register_handler(handler_class()) def step(self, name: str) -> AbstractContextManager: diff --git a/src/frostfs_testlib/shell/__init__.py b/src/frostfs_testlib/shell/__init__.py new file mode 100644 index 0000000..0300ff8 --- /dev/null +++ b/src/frostfs_testlib/shell/__init__.py @@ -0,0 +1,3 @@ +from frostfs_testlib.shell.interfaces import CommandOptions, CommandResult, InteractiveInput, Shell +from frostfs_testlib.shell.local_shell import LocalShell +from frostfs_testlib.shell.ssh_shell import SSHShell diff --git a/src/neofs_testlib/shell/command_inspectors.py b/src/frostfs_testlib/shell/command_inspectors.py similarity index 82% rename from src/neofs_testlib/shell/command_inspectors.py rename to src/frostfs_testlib/shell/command_inspectors.py index 9537549..8486f43 100644 --- a/src/neofs_testlib/shell/command_inspectors.py +++ b/src/frostfs_testlib/shell/command_inspectors.py @@ -1,4 +1,4 @@ -from neofs_testlib.shell.interfaces import CommandInspector +from frostfs_testlib.shell.interfaces import CommandInspector class SudoInspector(CommandInspector): diff --git a/src/neofs_testlib/shell/interfaces.py b/src/frostfs_testlib/shell/interfaces.py similarity index 100% rename from src/neofs_testlib/shell/interfaces.py rename to src/frostfs_testlib/shell/interfaces.py diff --git a/src/neofs_testlib/shell/local_shell.py b/src/frostfs_testlib/shell/local_shell.py similarity index 96% rename from src/neofs_testlib/shell/local_shell.py rename to src/frostfs_testlib/shell/local_shell.py index f16339f..12f450a 100644 --- a/src/neofs_testlib/shell/local_shell.py +++ b/src/frostfs_testlib/shell/local_shell.py @@ -6,10 +6,10 @@ from typing import IO, Optional import pexpect -from neofs_testlib.reporter import get_reporter -from neofs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell -logger = logging.getLogger("neofs.testlib.shell") +logger = logging.getLogger("frostfs.testlib.shell") reporter = get_reporter() diff --git a/src/neofs_testlib/shell/ssh_shell.py b/src/frostfs_testlib/shell/ssh_shell.py similarity index 98% rename from src/neofs_testlib/shell/ssh_shell.py rename to src/frostfs_testlib/shell/ssh_shell.py index f4870b4..04d42ee 100644 --- a/src/neofs_testlib/shell/ssh_shell.py +++ b/src/frostfs_testlib/shell/ssh_shell.py @@ -19,10 +19,10 @@ from paramiko import ( ) from paramiko.ssh_exception import AuthenticationException -from neofs_testlib.reporter import get_reporter -from neofs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell -logger = logging.getLogger("neofs.testlib.shell") +logger = logging.getLogger("frostfs.testlib.shell") reporter = get_reporter() diff --git a/src/neofs_testlib/utils/__init__.py b/src/frostfs_testlib/utils/__init__.py similarity index 100% rename from src/neofs_testlib/utils/__init__.py rename to src/frostfs_testlib/utils/__init__.py diff --git a/src/neofs_testlib/utils/converters.py b/src/frostfs_testlib/utils/converters.py similarity index 100% rename from src/neofs_testlib/utils/converters.py rename to src/frostfs_testlib/utils/converters.py diff --git a/src/neofs_testlib/utils/wallet.py b/src/frostfs_testlib/utils/wallet.py similarity index 95% rename from src/neofs_testlib/utils/wallet.py rename to src/frostfs_testlib/utils/wallet.py index 5d4ff4a..60cd2c3 100644 --- a/src/neofs_testlib/utils/wallet.py +++ b/src/frostfs_testlib/utils/wallet.py @@ -4,7 +4,7 @@ import logging from neo3.wallet import wallet as neo3_wallet from neo3.wallet import account as neo3_account -logger = logging.getLogger("neofs.testlib.utils") +logger = logging.getLogger("frostfs.testlib.utils") def init_wallet(wallet_path: str, wallet_password: str): diff --git a/src/neofs_testlib/blockchain/__init__.py b/src/neofs_testlib/blockchain/__init__.py deleted file mode 100644 index 006e8f1..0000000 --- a/src/neofs_testlib/blockchain/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from neofs_testlib.blockchain.multisig import Multisig -from neofs_testlib.blockchain.rpc_client import RPCClient diff --git a/src/neofs_testlib/cli/__init__.py b/src/neofs_testlib/cli/__init__.py deleted file mode 100644 index 63cd5bc..0000000 --- a/src/neofs_testlib/cli/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from neofs_testlib.cli.neofs_adm import NeofsAdm -from neofs_testlib.cli.neofs_authmate import NeofsAuthmate -from neofs_testlib.cli.neofs_cli import NeofsCli -from neofs_testlib.cli.neogo import NeoGo, NetworkType diff --git a/src/neofs_testlib/cli/neofs_adm/__init__.py b/src/neofs_testlib/cli/neofs_adm/__init__.py deleted file mode 100644 index dd91220..0000000 --- a/src/neofs_testlib/cli/neofs_adm/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from neofs_testlib.cli.neofs_adm.adm import NeofsAdm diff --git a/src/neofs_testlib/cli/neofs_adm/adm.py b/src/neofs_testlib/cli/neofs_adm/adm.py deleted file mode 100644 index 4fff981..0000000 --- a/src/neofs_testlib/cli/neofs_adm/adm.py +++ /dev/null @@ -1,22 +0,0 @@ -from typing import Optional - -from neofs_testlib.cli.neofs_adm.config import NeofsAdmConfig -from neofs_testlib.cli.neofs_adm.morph import NeofsAdmMorph -from neofs_testlib.cli.neofs_adm.storage_config import NeofsAdmStorageConfig -from neofs_testlib.cli.neofs_adm.subnet import NeofsAdmMorphSubnet -from neofs_testlib.cli.neofs_adm.version import NeofsAdmVersion -from neofs_testlib.shell import Shell - - -class NeofsAdm: - morph: Optional[NeofsAdmMorph] = None - subnet: Optional[NeofsAdmMorphSubnet] = None - storage_config: Optional[NeofsAdmStorageConfig] = None - version: Optional[NeofsAdmVersion] = None - - def __init__(self, shell: Shell, neofs_adm_exec_path: str, config_file: Optional[str] = None): - self.config = NeofsAdmConfig(shell, neofs_adm_exec_path, config=config_file) - self.morph = NeofsAdmMorph(shell, neofs_adm_exec_path, config=config_file) - self.subnet = NeofsAdmMorphSubnet(shell, neofs_adm_exec_path, config=config_file) - self.storage_config = NeofsAdmStorageConfig(shell, neofs_adm_exec_path, config=config_file) - self.version = NeofsAdmVersion(shell, neofs_adm_exec_path, config=config_file) diff --git a/src/neofs_testlib/cli/neofs_adm/config.py b/src/neofs_testlib/cli/neofs_adm/config.py deleted file mode 100644 index 86d684b..0000000 --- a/src/neofs_testlib/cli/neofs_adm/config.py +++ /dev/null @@ -1,22 +0,0 @@ -from neofs_testlib.cli.cli_command import CliCommand -from neofs_testlib.shell import CommandResult - - -class NeofsAdmConfig(CliCommand): - def init(self, path: str = "~/.neofs/adm/config.yml") -> CommandResult: - """Initialize basic neofs-adm configuration file. - - Args: - path: Path to config (default ~/.neofs/adm/config.yml). - - Returns: - Command's result. - """ - return self._execute( - "config init", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, - ) diff --git a/src/neofs_testlib/cli/neofs_authmate/__init__.py b/src/neofs_testlib/cli/neofs_authmate/__init__.py deleted file mode 100644 index 5d43b3e..0000000 --- a/src/neofs_testlib/cli/neofs_authmate/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from neofs_testlib.cli.neofs_authmate.authmate import NeofsAuthmate diff --git a/src/neofs_testlib/cli/neofs_authmate/authmate.py b/src/neofs_testlib/cli/neofs_authmate/authmate.py deleted file mode 100644 index 5f86a74..0000000 --- a/src/neofs_testlib/cli/neofs_authmate/authmate.py +++ /dev/null @@ -1,14 +0,0 @@ -from typing import Optional - -from neofs_testlib.cli.neofs_authmate.secret import NeofsAuthmateSecret -from neofs_testlib.cli.neofs_authmate.version import NeofsAuthmateVersion -from neofs_testlib.shell import Shell - - -class NeofsAuthmate: - secret: Optional[NeofsAuthmateSecret] = None - version: Optional[NeofsAuthmateVersion] = None - - def __init__(self, shell: Shell, neofs_authmate_exec_path: str): - self.secret = NeofsAuthmateSecret(shell, neofs_authmate_exec_path) - self.version = NeofsAuthmateVersion(shell, neofs_authmate_exec_path) diff --git a/src/neofs_testlib/cli/neofs_cli/__init__.py b/src/neofs_testlib/cli/neofs_cli/__init__.py deleted file mode 100644 index 9911fe2..0000000 --- a/src/neofs_testlib/cli/neofs_cli/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from neofs_testlib.cli.neofs_cli.cli import NeofsCli diff --git a/src/neofs_testlib/cli/neofs_cli/cli.py b/src/neofs_testlib/cli/neofs_cli/cli.py deleted file mode 100644 index cfe8e5d..0000000 --- a/src/neofs_testlib/cli/neofs_cli/cli.py +++ /dev/null @@ -1,38 +0,0 @@ -from typing import Optional - -from neofs_testlib.cli.neofs_cli.accounting import NeofsCliAccounting -from neofs_testlib.cli.neofs_cli.acl import NeofsCliACL -from neofs_testlib.cli.neofs_cli.container import NeofsCliContainer -from neofs_testlib.cli.neofs_cli.netmap import NeofsCliNetmap -from neofs_testlib.cli.neofs_cli.object import NeofsCliObject -from neofs_testlib.cli.neofs_cli.session import NeofsCliSession -from neofs_testlib.cli.neofs_cli.shards import NeofsCliShards -from neofs_testlib.cli.neofs_cli.storagegroup import NeofsCliStorageGroup -from neofs_testlib.cli.neofs_cli.util import NeofsCliUtil -from neofs_testlib.cli.neofs_cli.version import NeofsCliVersion -from neofs_testlib.shell import Shell - - -class NeofsCli: - accounting: Optional[NeofsCliAccounting] = None - acl: Optional[NeofsCliACL] = None - container: Optional[NeofsCliContainer] = None - netmap: Optional[NeofsCliNetmap] = None - object: Optional[NeofsCliObject] = None - session: Optional[NeofsCliSession] = None - shards: Optional[NeofsCliShards] = None - storagegroup: Optional[NeofsCliStorageGroup] = None - util: Optional[NeofsCliUtil] = None - version: Optional[NeofsCliVersion] = None - - def __init__(self, shell: Shell, neofs_cli_exec_path: str, config_file: Optional[str] = None): - self.accounting = NeofsCliAccounting(shell, neofs_cli_exec_path, config=config_file) - self.acl = NeofsCliACL(shell, neofs_cli_exec_path, config=config_file) - self.container = NeofsCliContainer(shell, neofs_cli_exec_path, config=config_file) - self.netmap = NeofsCliNetmap(shell, neofs_cli_exec_path, config=config_file) - self.object = NeofsCliObject(shell, neofs_cli_exec_path, config=config_file) - self.session = NeofsCliSession(shell, neofs_cli_exec_path, config=config_file) - self.shards = NeofsCliShards(shell, neofs_cli_exec_path, config=config_file) - self.storagegroup = NeofsCliStorageGroup(shell, neofs_cli_exec_path, config=config_file) - self.util = NeofsCliUtil(shell, neofs_cli_exec_path, config=config_file) - self.version = NeofsCliVersion(shell, neofs_cli_exec_path, config=config_file) diff --git a/src/neofs_testlib/cli/neofs_cli/version.py b/src/neofs_testlib/cli/neofs_cli/version.py deleted file mode 100644 index 6f22613..0000000 --- a/src/neofs_testlib/cli/neofs_cli/version.py +++ /dev/null @@ -1,13 +0,0 @@ -from neofs_testlib.cli.cli_command import CliCommand -from neofs_testlib.shell import CommandResult - - -class NeofsCliVersion(CliCommand): - def get(self) -> CommandResult: - """ - Application version and NeoFS API compatibility. - - Returns: - Command's result. - """ - return self._execute("", version=True) diff --git a/src/neofs_testlib/cli/neogo/__init__.py b/src/neofs_testlib/cli/neogo/__init__.py deleted file mode 100644 index 585be9e..0000000 --- a/src/neofs_testlib/cli/neogo/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from neofs_testlib.cli.neogo.go import NeoGo -from neofs_testlib.cli.neogo.network_type import NetworkType diff --git a/src/neofs_testlib/hosting/__init__.py b/src/neofs_testlib/hosting/__init__.py deleted file mode 100644 index d3f1f8f..0000000 --- a/src/neofs_testlib/hosting/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from neofs_testlib.hosting.config import CLIConfig, HostConfig, ServiceConfig -from neofs_testlib.hosting.hosting import Hosting -from neofs_testlib.hosting.interfaces import Host diff --git a/src/neofs_testlib/shell/__init__.py b/src/neofs_testlib/shell/__init__.py deleted file mode 100644 index d0f22d6..0000000 --- a/src/neofs_testlib/shell/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from neofs_testlib.shell.interfaces import CommandOptions, CommandResult, InteractiveInput, Shell -from neofs_testlib.shell.local_shell import LocalShell -from neofs_testlib.shell.ssh_shell import SSHShell diff --git a/tests/helpers.py b/tests/helpers.py index 8ee11b0..8391002 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -1,6 +1,6 @@ import traceback -from neofs_testlib.shell.interfaces import CommandResult +from frostfs_testlib.shell.interfaces import CommandResult def format_error_details(error: Exception) -> str: diff --git a/tests/test_cli.py b/tests/test_cli.py index f607121..6f4d791 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -1,15 +1,15 @@ from unittest import TestCase from unittest.mock import Mock -from neofs_testlib.cli import NeofsAdm, NeofsCli, NeoGo -from neofs_testlib.cli.cli_command import CliCommand -from neofs_testlib.shell.interfaces import CommandOptions, InteractiveInput +from frostfs_testlib.cli import FrostfsAdm, FrostfsCli, NeoGo +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput class TestCli(TestCase): - neofs_adm_exec_path = "neo-adm-exec" - neofs_go_exec_path = "neo-go-exec" - neofs_cli_exec_path = "neo-cli-exec" + frostfs_adm_exec_path = "neo-adm-exec" + frostfs_go_exec_path = "neo-go-exec" + frostfs_cli_exec_path = "neo-cli-exec" address = "0x0000000000000000000" addresses = ["0x000000", "0xDEADBEEF", "0xBABECAFE"] @@ -30,12 +30,12 @@ class TestCli(TestCase): def test_container_create(self): shell = Mock() - neofs_cli = NeofsCli( + frostfs_cli = FrostfsCli( config_file=self.config_file, - neofs_cli_exec_path=self.neofs_cli_exec_path, + frostfs_cli_exec_path=self.frostfs_cli_exec_path, shell=shell, ) - neofs_cli.container.create( + frostfs_cli.container.create( rpc_endpoint=self.rpc_endpoint, wallet=self.wallet, basic_acl=self.basic_acl, @@ -46,7 +46,7 @@ class TestCli(TestCase): xhdr = ",".join(f"{param}={value}" for param, value in self.xhdr.items()) expected_command = ( - f"{self.neofs_cli_exec_path} --config {self.config_file} container create " + f"{self.frostfs_cli_exec_path} --config {self.config_file} container create " f"--rpc-endpoint '{self.rpc_endpoint}' --wallet '{self.wallet}' " f"--basic-acl '{self.basic_acl}' --await --policy '{self.policy}' " f"--xhdr '{xhdr}'" @@ -57,7 +57,7 @@ class TestCli(TestCase): def test_bad_wallet_argument(self): shell = Mock() neo_go = NeoGo( - shell=shell, config_path=self.config_file, neo_go_exec_path=self.neofs_go_exec_path + shell=shell, config_path=self.config_file, neo_go_exec_path=self.frostfs_go_exec_path ) with self.assertRaises(Exception) as exc_msg: neo_go.contract.add_group( @@ -88,7 +88,7 @@ class TestCli(TestCase): def test_wallet_sign(self): shell = Mock() neo_go = NeoGo( - shell=shell, config_path=self.config_file, neo_go_exec_path=self.neofs_go_exec_path + shell=shell, config_path=self.config_file, neo_go_exec_path=self.frostfs_go_exec_path ) neo_go.wallet.sign( input_file=self.file1, @@ -101,7 +101,7 @@ class TestCli(TestCase): ) expected_command = ( - f"{self.neofs_go_exec_path} --config_path {self.config_file} wallet sign " + f"{self.frostfs_go_exec_path} --config_path {self.config_file} wallet sign " f"--input-file '{self.file1}' --address '{self.address}' " f"--rpc-endpoint '{self.rpc_endpoint}' --wallet '{self.wallet}' " f"--out '{self.file2}' --timeout '{self.timeout}s'" @@ -118,12 +118,12 @@ class TestCli(TestCase): def test_subnet_create(self): shell = Mock() - neofs_adm = NeofsAdm( + frostfs_adm = FrostfsAdm( config_file=self.config_file, - neofs_adm_exec_path=self.neofs_adm_exec_path, + frostfs_adm_exec_path=self.frostfs_adm_exec_path, shell=shell, ) - neofs_adm.subnet.create( + frostfs_adm.subnet.create( address=self.address, rpc_endpoint=self.rpc_endpoint, wallet=self.wallet, @@ -131,7 +131,7 @@ class TestCli(TestCase): ) expected_command = ( - f"{self.neofs_adm_exec_path} --config {self.config_file} morph subnet create " + f"{self.frostfs_adm_exec_path} --config {self.config_file} morph subnet create " f"--rpc-endpoint '{self.rpc_endpoint}' --address '{self.address}' " f"--wallet '{self.wallet}' --notary" ) @@ -141,7 +141,7 @@ class TestCli(TestCase): def test_wallet_nep17_multitransfer(self): shell = Mock() neo_go = NeoGo( - shell=shell, config_path=self.config_file, neo_go_exec_path=self.neofs_go_exec_path + shell=shell, config_path=self.config_file, neo_go_exec_path=self.frostfs_go_exec_path ) neo_go.nep17.multitransfer( wallet=self.wallet, @@ -157,7 +157,7 @@ class TestCli(TestCase): to_address = "".join(f" --to '{address}'" for address in self.addresses) expected_command = ( - f"{self.neofs_go_exec_path} --config_path {self.config_file} " + f"{self.frostfs_go_exec_path} --config_path {self.config_file} " f"wallet nep17 multitransfer --token '{self.token}'" f"{to_address} --sysgas '{self.sysgas}' --rpc-endpoint '{self.rpc_endpoint}' " f"--wallet '{self.wallet}' --from '{self.address}' --force --amount {self.amount} " @@ -168,7 +168,7 @@ class TestCli(TestCase): def test_version(self): shell = Mock() - neofs_adm = NeofsAdm(shell=shell, neofs_adm_exec_path=self.neofs_adm_exec_path) - neofs_adm.version.get() + frostfs_adm = FrostfsAdm(shell=shell, frostfs_adm_exec_path=self.frostfs_adm_exec_path) + frostfs_adm.version.get() - shell.exec.assert_called_once_with(f"{self.neofs_adm_exec_path} --version") + shell.exec.assert_called_once_with(f"{self.frostfs_adm_exec_path} --version") diff --git a/tests/test_converters.py b/tests/test_converters.py index f453c42..7600a5d 100644 --- a/tests/test_converters.py +++ b/tests/test_converters.py @@ -1,6 +1,6 @@ from unittest import TestCase -from neofs_testlib.utils import converters +from frostfs_testlib.utils import converters class TestConverters(TestCase): diff --git a/tests/test_hosting.py b/tests/test_hosting.py index 17cacb4..14be8c5 100644 --- a/tests/test_hosting.py +++ b/tests/test_hosting.py @@ -1,6 +1,6 @@ from unittest import TestCase -from neofs_testlib.hosting import CLIConfig, Hosting, ServiceConfig +from frostfs_testlib.hosting import CLIConfig, Hosting, ServiceConfig class TestHosting(TestCase): diff --git a/tests/test_local_shell.py b/tests/test_local_shell.py index de1e22f..3d05e5b 100644 --- a/tests/test_local_shell.py +++ b/tests/test_local_shell.py @@ -1,7 +1,7 @@ from unittest import TestCase -from neofs_testlib.shell.interfaces import CommandOptions, InteractiveInput -from neofs_testlib.shell.local_shell import LocalShell +from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput +from frostfs_testlib.shell.local_shell import LocalShell from tests.helpers import format_error_details, get_output_lines diff --git a/tests/test_reporter.py b/tests/test_reporter.py index 2dec8fb..f5e29d0 100644 --- a/tests/test_reporter.py +++ b/tests/test_reporter.py @@ -4,7 +4,7 @@ from typing import Optional from unittest import TestCase from unittest.mock import MagicMock -from neofs_testlib.reporter import Reporter +from frostfs_testlib.reporter import Reporter class TestLocalShellInteractive(TestCase): diff --git a/tests/test_ssh_shell.py b/tests/test_ssh_shell.py index 0ffeb4d..021014a 100644 --- a/tests/test_ssh_shell.py +++ b/tests/test_ssh_shell.py @@ -1,8 +1,9 @@ import os from unittest import SkipTest, TestCase -from neofs_testlib.shell.interfaces import CommandOptions, InteractiveInput -from neofs_testlib.shell.ssh_shell import SSHShell +from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput +from frostfs_testlib.shell.ssh_shell import SSHShell + from tests.helpers import format_error_details, get_output_lines diff --git a/tests/test_wallet.py b/tests/test_wallet.py index 7e688ab..f00a6af 100644 --- a/tests/test_wallet.py +++ b/tests/test_wallet.py @@ -5,7 +5,7 @@ from uuid import uuid4 from neo3.wallet.wallet import Wallet -from neofs_testlib.utils.wallet import init_wallet, get_last_address_from_wallet +from frostfs_testlib.utils.wallet import init_wallet, get_last_address_from_wallet class TestWallet(TestCase): From adb06aa158343b7cd9a44127dd7f5261861b03e0 Mon Sep 17 00:00:00 2001 From: Yulia Kovshova Date: Thu, 16 Feb 2023 17:41:23 +0300 Subject: [PATCH 056/363] Delete neofs_test_lib path Signed-off-by: Yulia Kovshova --- src/{neofs_testlib => frostfs_testlib}/defaults.py | 0 src/frostfs_testlib/shell/interfaces.py | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename src/{neofs_testlib => frostfs_testlib}/defaults.py (100%) diff --git a/src/neofs_testlib/defaults.py b/src/frostfs_testlib/defaults.py similarity index 100% rename from src/neofs_testlib/defaults.py rename to src/frostfs_testlib/defaults.py diff --git a/src/frostfs_testlib/shell/interfaces.py b/src/frostfs_testlib/shell/interfaces.py index e4f7dea..4c87a78 100644 --- a/src/frostfs_testlib/shell/interfaces.py +++ b/src/frostfs_testlib/shell/interfaces.py @@ -2,7 +2,7 @@ from abc import ABC, abstractmethod from dataclasses import dataclass from typing import Optional -from neofs_testlib.defaults import Options +from frostfs_testlib.defaults import Options @dataclass From ad8fd930c8600ad55e2524c285aac28154ea6f44 Mon Sep 17 00:00:00 2001 From: Aleksei Chetaev Date: Thu, 16 Feb 2023 15:50:09 +0100 Subject: [PATCH 057/363] Remove neoFS from comments and __doc__ Signed-off-by: Aleksei Chetaev --- README.md | 6 +++--- src/frostfs_testlib/hosting/config.py | 4 ++-- src/frostfs_testlib/hosting/docker_host.py | 4 ++-- src/frostfs_testlib/hosting/hosting.py | 2 +- src/frostfs_testlib/hosting/interfaces.py | 4 ++-- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index ddd2620..94e2709 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ get_reporter().configure({ "handlers": [{"plugin_name": "allure"}] }) ``` ### Hosting Configuration -Hosting component is a class that represents infrastructure (machines/containers/services) where neoFS is hosted. Interaction with specific infrastructure instance (host) is encapsulated in classes that implement interface `frostfs_testlib.hosting.Host`. To pass information about hosts to the `Hosting` class in runtime we use method `configure`: +Hosting component is a class that represents infrastructure (machines/containers/services) where frostFS is hosted. Interaction with specific infrastructure instance (host) is encapsulated in classes that implement interface `frostfs_testlib.hosting.Host`. To pass information about hosts to the `Hosting` class in runtime we use method `configure`: ```python from frostfs_testlib.hosting import Hosting @@ -84,8 +84,8 @@ Detailed information about registering entrypoints can be found at [setuptools d ## Library structure The library provides the following primary components: * `blockchain` - Contains helpers that allow to interact with neo blockchain, smart contracts, gas transfers, etc. - * `cli` - wrappers on top of neoFS command-line tools. These wrappers execute on a shell and provide type-safe interface for interacting with the tools. - * `hosting` - management of infrastructure (docker, virtual machines, services where neoFS is hosted). The library provides host implementation for docker environment (when neoFS services are running as docker containers). Support for other hosts is provided via plugins. + * `cli` - wrappers on top of frostFS command-line tools. These wrappers execute on a shell and provide type-safe interface for interacting with the tools. + * `hosting` - management of infrastructure (docker, virtual machines, services where frostFS is hosted). The library provides host implementation for docker environment (when neoFS services are running as docker containers). Support for other hosts is provided via plugins. * `reporter` - abstraction on top of test reporting tool like Allure. Components of the library will report their steps and attach artifacts to the configured reporter instance. * `shell` - shells that can be used to execute commands. Currently library provides local shell (on machine that runs the code) or SSH shell that connects to a remote machine via SSH. * `utils` - Support functions. diff --git a/src/frostfs_testlib/hosting/config.py b/src/frostfs_testlib/hosting/config.py index febc848..dd8b4b9 100644 --- a/src/frostfs_testlib/hosting/config.py +++ b/src/frostfs_testlib/hosting/config.py @@ -33,7 +33,7 @@ class CLIConfig: @dataclass class ServiceConfig: - """Describes neoFS service on some host. + """Describes frostFS service on some host. Attributes: name: Name of the service that uniquely identifies it across all hosts. @@ -48,7 +48,7 @@ class ServiceConfig: @dataclass class HostConfig: - """Describes machine that hosts neoFS services. + """Describes machine that hosts frostFS services. Attributes: plugin_name: Name of plugin that should be used to manage the host. diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index e4c61ac..1f7b545 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -20,11 +20,11 @@ logger = logging.getLogger("frostfs.testlib.hosting") @dataclass class HostAttributes(ParsedAttributes): - """Represents attributes of host where Docker with neoFS runs. + """Represents attributes of host where Docker with frostFS runs. Attributes: sudo_shell: Specifies whether shell commands should be auto-prefixed with sudo. - docker_endpoint: Protocol, address and port of docker where neoFS runs. Recommended format + docker_endpoint: Protocol, address and port of docker where frostFS runs. Recommended format is tcp socket (https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-socket-option), for example: tcp://{address}:2375 (where 2375 is default docker port). ssh_login: Login for SSH connection to the machine where docker runs. diff --git a/src/frostfs_testlib/hosting/hosting.py b/src/frostfs_testlib/hosting/hosting.py index d432135..eddf03c 100644 --- a/src/frostfs_testlib/hosting/hosting.py +++ b/src/frostfs_testlib/hosting/hosting.py @@ -7,7 +7,7 @@ from frostfs_testlib.plugins import load_plugin class Hosting: - """Hosting manages infrastructure where neoFS runs (machines and neoFS services).""" + """Hosting manages infrastructure where frostFS runs (machines and frostFS services).""" _hosts: list[Host] _host_by_address: dict[str, Host] diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 269a04e..93e0304 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -11,9 +11,9 @@ class DiskInfo(dict): class Host(ABC): - """Interface of a host machine where neoFS services are running. + """Interface of a host machine where frostFS services are running. - Allows to manage the machine and neoFS services that are hosted on it. + Allows to manage the machine and frostFS services that are hosted on it. """ def __init__(self, config: HostConfig) -> None: From 5d2963faeab18f4500808a867cefac0b05876025 Mon Sep 17 00:00:00 2001 From: Aleksei Chetaev Date: Thu, 16 Feb 2023 15:58:58 +0100 Subject: [PATCH 058/363] Remove neoFS from README Signed-off-by: Aleksei Chetaev --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 94e2709..c194df9 100644 --- a/README.md +++ b/README.md @@ -85,7 +85,7 @@ Detailed information about registering entrypoints can be found at [setuptools d The library provides the following primary components: * `blockchain` - Contains helpers that allow to interact with neo blockchain, smart contracts, gas transfers, etc. * `cli` - wrappers on top of frostFS command-line tools. These wrappers execute on a shell and provide type-safe interface for interacting with the tools. - * `hosting` - management of infrastructure (docker, virtual machines, services where frostFS is hosted). The library provides host implementation for docker environment (when neoFS services are running as docker containers). Support for other hosts is provided via plugins. + * `hosting` - management of infrastructure (docker, virtual machines, services where frostFS is hosted). The library provides host implementation for docker environment (when frostFS services are running as docker containers). Support for other hosts is provided via plugins. * `reporter` - abstraction on top of test reporting tool like Allure. Components of the library will report their steps and attach artifacts to the configured reporter instance. * `shell` - shells that can be used to execute commands. Currently library provides local shell (on machine that runs the code) or SSH shell that connects to a remote machine via SSH. * `utils` - Support functions. From 9d21d1c143f7231b3c257125dcf794846dc77550 Mon Sep 17 00:00:00 2001 From: Aleksei Chetaev Date: Wed, 15 Feb 2023 19:24:14 +0100 Subject: [PATCH 059/363] Implement test analitics export into TMS systems Signed-off-by: Aleksei Chetaev --- requirements.txt | 2 + src/frostfs_testlib/analytics/__init__.py | 4 + src/frostfs_testlib/analytics/test_case.py | 82 ++++++++ .../analytics/test_collector.py | 190 ++++++++++++++++++ .../analytics/test_exporter.py | 71 +++++++ .../analytics/testrail_exporter.py | 178 ++++++++++++++++ 6 files changed, 527 insertions(+) create mode 100644 src/frostfs_testlib/analytics/__init__.py create mode 100644 src/frostfs_testlib/analytics/test_case.py create mode 100644 src/frostfs_testlib/analytics/test_collector.py create mode 100644 src/frostfs_testlib/analytics/test_exporter.py create mode 100644 src/frostfs_testlib/analytics/testrail_exporter.py diff --git a/requirements.txt b/requirements.txt index a75b94f..eee5a85 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,6 +5,8 @@ neo-mamba==1.0.0 paramiko==2.10.3 pexpect==4.8.0 requests==2.28.1 +docstring_parser==0.15 +testrail-api==1.12.0 # Dev dependencies black==22.8.0 diff --git a/src/frostfs_testlib/analytics/__init__.py b/src/frostfs_testlib/analytics/__init__.py new file mode 100644 index 0000000..1eb8a74 --- /dev/null +++ b/src/frostfs_testlib/analytics/__init__.py @@ -0,0 +1,4 @@ +from test_case import id, suite_name, suite_section, title +from test_collector import TestCase, TestCaseCollector +from testrail_exporter import TestrailExporter +from testrail_exporter import TestrailExporter \ No newline at end of file diff --git a/src/frostfs_testlib/analytics/test_case.py b/src/frostfs_testlib/analytics/test_case.py new file mode 100644 index 0000000..c6e7ff5 --- /dev/null +++ b/src/frostfs_testlib/analytics/test_case.py @@ -0,0 +1,82 @@ +import allure + +from enum import Enum +from types import FunctionType + +class TestCasePriority(Enum): + HIGHEST = 0 + HIGH = 1 + MEDIUM = 2 + LOW = 3 + +def __set_label__(name: str, value: str, allure_decorator: FunctionType = None): + """ + Generic function for do not duplicate set label code in each decorator. + We get decorated function as an object and set needed argument inside. + + Args: + name: argument name to set into the function object + value: argument value to set into the function object + allure_decorator: allure decorator to decorate function and do not duplicate decorators with same value + """ + def wrapper(decorated_func): + if allure_decorator: + decorated_func = allure_decorator(value)(decorated_func) + setattr(decorated_func, name, value) + return decorated_func + + return wrapper + + +def id(uuid: str): + """ + Decorator for set test case ID which can be used as unique value due export into TMS. + + We prefer to use UUID4 format string for ID. + ID have to be generated manually for each new test. + + Args: + uuid: id to set as test_case_id into test function + """ + return __set_label__("__test_case_id__", uuid) + + +def title(title: str): + """ + Decorator for set test case title / name / summary / short description what we do. + + Args: + title: string with title to set into test function + """ + + return __set_label__("__test_case_title__", title, allure.title) + +def priority(priority: str): + """ + Decorator for set test case title / name / summary / short description what we do. + + Args: + priority: string with priority to set into test function + """ + + return __set_label__("__test_case_priority__", priority) + + +def suite_name(name: str): + """ + Decorator for set test case suite name. + Suite name is usually using in TMS for create structure of test cases. + + Args: + name: string with test suite name for set into test function + """ + + return __set_label__("__test_case_suite_name__", name, allure.story) + + +def suite_section(name: str): + """ + Decorator for set test case suite section. + Suite section is usually using in TMS for create deep test cases structure. + """ + return __set_label__("__test_case_suite_section__", name) diff --git a/src/frostfs_testlib/analytics/test_collector.py b/src/frostfs_testlib/analytics/test_collector.py new file mode 100644 index 0000000..0f5398e --- /dev/null +++ b/src/frostfs_testlib/analytics/test_collector.py @@ -0,0 +1,190 @@ +import re + +from docstring_parser import parse +from docstring_parser.common import DocstringStyle +from docstring_parser.google import DEFAULT_SECTIONS, Section, SectionType + +DEFAULT_SECTIONS.append(Section("Steps", "steps", SectionType.MULTIPLE)) + +class TestCase: + """ + Test case object implementation for use in collector and exporters + """ + + def __init__( + self, + uuid_id: str, + title: str, + description: str, + priority: int, + steps: dict, + params: str, + suite_name: str, + suite_section_name: str, + ): + """ + Base constructor for TestCase object + + Args: + uuid_id: uuid from id decorator + title: test case title from title decorator + priority: test case priority value (0-3) + steps: list of test case steps read from function __doc__ + params: string with test case param read from pytest Function(test) object + suite_name: test case suite name from test_suite decorator + suite_section_name: test case suite section from test_suite_section decorator + """ + + # It can confuse, but we rewrite id to "id [params]" string + # We do it in case that one functions can return a lot of tests if we use test params + if params: + self.id = f"{uuid_id} [{params}]" + else: + self.id: str = uuid_id + self.title: str = title + self.description: str = description + self.priority: int = priority + self.steps: dict = steps + self.params: str = params + self.suite_name: str = suite_name + self.suite_section_name: str = suite_section_name + + +class TestCaseCollector: + """ + Collector working like a plugin for pytest and can be used in collect-only call to get tests list from pytest + Additionally, we have several function to filter tests that can be exported. + """ + + pytest_tests = [] + + def __format_string_with_params__(self, source_string: str, test_params: dict) -> str: + """ + Helper function for format test case string arguments using test params. + Params name can be deep like a.b.c, so we will get the value from tests params. + Additionally, we check is the next object dict or real object to use right call for get next argument. + + Args: + source_string: string for format by using test params (if needed) + test_params: dictionary with test params got from pytest test object + Returns: + (str): formatted string with replaced params name by params value + """ + + target_string: str = source_string + for match in re.findall(r"\{(.*?)}", source_string): + nestings_attrs = match.split(".") + param = None + for nesting_attr in nestings_attrs: + if not param: + param = test_params.get(nesting_attr) + else: + if isinstance(param, dict): + param = param.get(nesting_attr) + else: + param = getattr(param, nesting_attr) + target_string = target_string.replace(f"{{{match}}}", str(param)) + return target_string + + def __get_test_case_from_pytest_test__(self, test) -> TestCase: + """ + Parce test meta and return test case if there is enough information for that. + + Args: + test: pytest Function object + Returns: + (TestCase): return tests cases if there is enough information for that and None if not + """ + + # Default values for use behind + suite_name: str = None + suite_section_name: str = None + test_case_steps = dict() + test_case_params: str = "" + test_case_description: str = "" + + # Read test_case suite and section name from test class if possible and get test function from class + if test.cls: + suite_name = test.cls.__dict__.get("__test_case_suite_name__", suite_name) + suite_section_name = test.cls.__dict__.get("__test_case_suite_section__", suite_section_name) + test_function = test.cls.__dict__[test.originalname] + else: + # If no test class, read test function from module + test_function = test.module.__dict__[test.originalname] + + # Read base values from test function arguments + test_case_id = test_function.__dict__.get("__test_case_id__", None) + test_case_title = test_function.__dict__.get("__test_case_title__", None) + test_case_priority = test_function.__dict__.get("__test_case_priority__", None) + suite_name = test_function.__dict__.get("__test_case_suite_name__", suite_name) + suite_section_name = test_function.__dict__.get("__test_case_suite_section__", suite_section_name) + + # Parce test_steps if they define in __doc__ + doc_string = parse(test_function.__doc__, style=DocstringStyle.GOOGLE) + + if doc_string.short_description: + test_case_description = doc_string.short_description + if doc_string.long_description: + test_case_description = f"{doc_string.short_description}\r\n{doc_string.long_description}" + + if doc_string.meta: + for meta in doc_string.meta: + if meta.args[0] == "steps": + test_case_steps[meta.args[1]] = meta.description + + # Read params from tests function if its exist + test_case_call_spec = getattr(test, "callspec", "") + + if test_case_call_spec: + # Set test cases params string value + test_case_params = test_case_call_spec.id + # Format title with params + if test_case_title: + test_case_title = self.__format_string_with_params__(test_case_title,test_case_call_spec.params) + # Format steps with params + if test_case_steps: + for key, value in test_case_steps.items(): + value = self.__format_string_with_params__(value,test_case_call_spec.params) + test_case_steps[key] = value + + # If there is set basic test case attributes create TestCase and return + if test_case_id and test_case_title and suite_name and suite_name: + test_case = TestCase( + id=test_case_id, + title=test_case_title, + description=test_case_description, + priority=test_case_priority, + steps=test_case_steps, + params=test_case_params, + suite_name=suite_name, + suite_section_name=suite_section_name, + ) + return test_case + # Return None if there is no enough information for return test case + return None + + def pytest_report_collectionfinish(self, pytest_tests: list) -> None: + """ + !!! DO NOT CHANGE THE NANE IT IS NOT A MISTAKE + Implement specific function with specific name + Pytest will be call this function when he uses plugin in collect-only call + + Args: + pytest_tests: list of pytest tests + """ + self.pytest_tests.extend(pytest_tests) + + def collect_test_cases(self) -> list[TestCase]: + """ + We're collecting test cases from the pytest tests list and return them in test case representation. + + Returns: + (list[TestCase]): list of test cases that we found in the pytest tests code + """ + test_cases = [] + + for test in self.pytest_tests: + test_case = self.__get_test_case_from_pytest_test__(test) + if test_case: + test_cases.append(test_case) + return test_cases \ No newline at end of file diff --git a/src/frostfs_testlib/analytics/test_exporter.py b/src/frostfs_testlib/analytics/test_exporter.py new file mode 100644 index 0000000..dd57478 --- /dev/null +++ b/src/frostfs_testlib/analytics/test_exporter.py @@ -0,0 +1,71 @@ +from abc import ABC, abstractmethod + +from test_collector import TestCase + +class TestExporter(ABC): + test_cases_cache = [] + test_suites_cache = [] + test_case_id_field_name = "" + + @abstractmethod + def fill_suite_cache(self) -> None: + """ + Fill test_suite_cache by all tests cases in TMS + It's help do not call TMS each time then we search test suite + """ + + @abstractmethod + def fill_cases_cache(self) -> None: + """ + Fill test_cases_cache by all tests cases in TMS + It's help do not call TMS each time then we search test case + """ + + @abstractmethod + def search_test_case_id(self, test_case_id: str) -> object: + """ + Find test cases in TMS by ID + """ + + @abstractmethod + def get_or_create_test_suite(self, test_suite_name) -> object: + """ + Get suite name with exact name or create if not exist + """ + + @abstractmethod + def get_or_create_suite_section(self, test_rail_suite, section_name) -> object: + """ + Get suite section with exact name or create new one if not exist + """ + + @abstractmethod + def create_test_case(self, test_case: TestCase, test_suite, test_suite_section) -> None: + """ + Create test case in TMS + """ + + @abstractmethod + def update_test_case(self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section) -> None: + """ + Update test case in TMS + """ + + def export_test_cases(self, test_cases: list[TestCase]): + # Fill caches before starting imports + self.fill_suite_cache() + self.fill_cases_cache() + + for test_case in test_cases: + test_suite = self.get_or_create_test_suite(test_case.suite_name) + test_section = self.get_or_create_suite_section(test_suite, test_case.suite_section_name) + test_case_in_tms = self.search_test_case_id(test_case.id) + steps = [ + {"content": value, "expected": " "} + for key, value in test_case.steps.items() + ] + + if test_case: + self.update_test_case(test_case, test_case_in_tms) + else: + self.create_test_case(test_case) \ No newline at end of file diff --git a/src/frostfs_testlib/analytics/testrail_exporter.py b/src/frostfs_testlib/analytics/testrail_exporter.py new file mode 100644 index 0000000..cefbf2e --- /dev/null +++ b/src/frostfs_testlib/analytics/testrail_exporter.py @@ -0,0 +1,178 @@ +from testrail_api import TestRailAPI + +from test_collector import TestCase +from test_exporter import TestExporter + + +class TestrailExporter(TestExporter): + def __init__( + self, + tr_url: str, + tr_username: str, + tr_password: str, + tr_project_id: int, + tr_template_id_without_steps: int, + tr_template_id_with_steps: int, + tr_priority_map: dict, + tr_id_field: str, + tr_description_fields: str, + tr_steps_field: str, + ): + """ + Redefine init for base exporter for get test rail credentials and project on create exporter + + Args: + tr_url: api url for create TestRailAPI object. See lib docs for details + tr_username: Testrail user login for api authentication + tr_password: Testrail user password for api authentication + tr_template_id_with_steps: id of test case template with steps + tr_template_id_without_steps: id of test case template without steps + tr_priority_map: mapping of TestCasePriority to priority ids in Testrail + """ + + self.api: TestRailAPI = TestRailAPI(tr_url, tr_username, tr_password) + self.tr_project_id: int = tr_project_id + self.tr_template_id_without_steps = tr_template_id_without_steps + self.tr_template_id_with_steps = tr_template_id_with_steps + self.tr_priority_map = tr_priority_map + self.tr_id_field = tr_id_field + self.tr_description_fields = tr_description_fields + self.tr_steps_field = tr_steps_field + + def fill_suite_cache(self) -> None: + """ + Fill test_suite_cache by all tests cases in TestRail + It's help do not call TMS each time then we search test suite + """ + project_suites = self.api.suites.get_suites(project_id=self.tr_project_id) + + for test_suite in project_suites: + test_suite_sections = self.api.sections.get_sections( + project_id=self.tr_project_id, + suite_id=test_suite["id"], + ) + test_suite["sections"] = test_suite_sections + + self.test_suites_cache.append(test_suite) + + def fill_cases_cache(self) -> None: + """ + Fill test_cases_cache by all tests cases in TestRail + It's help do not call TMS each time then we search test case + """ + for test_suite in self.test_suites_cache: + self.test_cases_cache.extend( + self.api.cases.get_cases(self.tr_project_id, suite_id=test_suite["id"]) + ) + + def search_test_case_id(self, test_case_id: str) -> object: + """ + Find test cases in TestRail (cache) by ID + """ + test_cases = [ + test_case + for test_case in self.test_cases_cache + if test_case["custom_autotest_name"] == test_case_id + ] + + if len(test_cases) > 1: + raise RuntimeError(f"Too many results found in test rail for id {test_case_id}") + elif len(test_cases) == 1: + return test_cases.pop() + else: + return None + + def get_or_create_test_suite(self, test_suite_name) -> object: + """ + Get suite name with exact name from Testrail or create if not exist + """ + test_rail_suites = [ + suite for suite in self.test_suites_cache if suite["name"] == test_suite_name + ] + + if not test_rail_suites: + test_rail_suite = self.api.suites.add_suite( + project_id=self.tr_project_id, + name=test_suite_name, + ) + test_rail_suite["sections"] = list() + self.test_suites_cache.append(test_rail_suite) + return test_rail_suite + elif len(test_rail_suites) == 1: + return test_rail_suites.pop() + else: + raise RuntimeError(f"Too many results found in test rail for suite name {test_suite_name}") + + def get_or_create_suite_section(self, test_rail_suite, section_name) -> object: + """ + Get suite section with exact name from Testrail or create new one if not exist + """ + test_rail_sections = [ + section for section in test_rail_suite["sections"] if section["name"] == section_name + ] + + if not test_rail_sections: + test_rail_section = self.api.sections.add_section( + project_id=self.tr_project_id, + suite_id=test_rail_suite["id"], + name=section_name, + ) + # !!!!!! BAD !!!!!! Do we really change object from cache or copy of suite object???? + # !!!!!! WE have to update object in cache + # !!!!! In opposite we will try to create section twice and get error from API + test_rail_suite["sections"].append(test_rail_section) + return test_rail_section + elif len(test_rail_sections) == 1: + return test_rail_sections.pop() + else: + raise RuntimeError( + f"Too many results found in test rail for section name {section_name}" + ) + + def prepare_request_body(self, test_case: TestCase, test_suite, test_suite_section) -> dict: + """ + Helper to prepare request body for add or update tests case from TestCase object + """ + request_body = { + "title": test_case.title, + "section_id": test_suite_section["id"], + self.test_case_id_field_name: test_case.id, + + } + + if test_case.priority: + request_body["priority_id"] = self.tr_priority_map.get(test_case.priority) + + if test_case.steps: + steps = [ + {"content": value, "expected": " "} + for key, value in test_case.steps.items() + ] + request_body[self.tr_steps_field] = steps + request_body["template_id"]=self.tr_template_id_with_steps + else: + request_body["template_id"] = self.tr_template_id_without_steps + if test_case.description: + request_body[self.tr_description_fields] = self.tr_description_fields + + return request_body + + + def create_test_case(self, test_case: TestCase, test_suite, test_suite_section) -> None: + """ + Create test case in Testrail + """ + request_body = self.prepare_request_body(test_case, test_suite, test_suite_section) + + self.api.cases.add_case(**request_body) + + + def update_test_case(self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section) -> None: + """ + Update test case in Testrail + """ + request_body = self.prepare_request_body(test_case, test_suite, test_suite_section) + + self.api.cases.update_case(case_id=test_case_in_tms["id"], **request_body) + + From 9021c5c0dfa3776b6bde4a7c1a4c41f33f81a9c7 Mon Sep 17 00:00:00 2001 From: Aleksei Chetaev Date: Thu, 16 Feb 2023 16:13:25 +0100 Subject: [PATCH 060/363] Add missed hints Signed-off-by: Aleksei Chetaev --- src/frostfs_testlib/analytics/test_exporter.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/analytics/test_exporter.py b/src/frostfs_testlib/analytics/test_exporter.py index dd57478..9deda8a 100644 --- a/src/frostfs_testlib/analytics/test_exporter.py +++ b/src/frostfs_testlib/analytics/test_exporter.py @@ -28,13 +28,13 @@ class TestExporter(ABC): """ @abstractmethod - def get_or_create_test_suite(self, test_suite_name) -> object: + def get_or_create_test_suite(self, test_suite_name: str) -> object: """ Get suite name with exact name or create if not exist """ @abstractmethod - def get_or_create_suite_section(self, test_rail_suite, section_name) -> object: + def get_or_create_suite_section(self, test_rail_suite, section_name: str) -> object: """ Get suite section with exact name or create new one if not exist """ From 004c5fb00afc35c1e223c81bb13ca60c0e92d3fe Mon Sep 17 00:00:00 2001 From: Aleksei Chetaev Date: Thu, 16 Feb 2023 16:15:13 +0100 Subject: [PATCH 061/363] Remove superfluous fields Signed-off-by: Aleksei Chetaev --- src/frostfs_testlib/analytics/test_exporter.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/frostfs_testlib/analytics/test_exporter.py b/src/frostfs_testlib/analytics/test_exporter.py index 9deda8a..77db0a2 100644 --- a/src/frostfs_testlib/analytics/test_exporter.py +++ b/src/frostfs_testlib/analytics/test_exporter.py @@ -5,7 +5,6 @@ from test_collector import TestCase class TestExporter(ABC): test_cases_cache = [] test_suites_cache = [] - test_case_id_field_name = "" @abstractmethod def fill_suite_cache(self) -> None: From 255c6b5eecdfee780a6b0108fddd1ef64e03286b Mon Sep 17 00:00:00 2001 From: Aleksei Chetaev Date: Thu, 16 Feb 2023 17:23:29 +0100 Subject: [PATCH 062/363] Fix issue in imports with analytics module --- src/frostfs_testlib/analytics/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/analytics/__init__.py b/src/frostfs_testlib/analytics/__init__.py index 1eb8a74..c670d1d 100644 --- a/src/frostfs_testlib/analytics/__init__.py +++ b/src/frostfs_testlib/analytics/__init__.py @@ -1,4 +1,4 @@ from test_case import id, suite_name, suite_section, title from test_collector import TestCase, TestCaseCollector -from testrail_exporter import TestrailExporter +from test_exporter import TestExporter from testrail_exporter import TestrailExporter \ No newline at end of file From a1635f46c09973132c40e499b302b0ee29f96ae0 Mon Sep 17 00:00:00 2001 From: Aleksei Chetaev Date: Thu, 16 Feb 2023 17:31:39 +0100 Subject: [PATCH 063/363] Fix issue with dependencies in pyproject.toml Signed-off-by: Aleksei Chetaev --- pyproject.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index e9d69ab..7ffd8d2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,6 +23,8 @@ dependencies = [ "paramiko>=2.10.3", "pexpect>=4.8.0", "requests>=2.28.0", + "docstring_parser>=0.15", + "testrail-api>=1.12.0", ] requires-python = ">=3.10" From e891f3804ce9081eedf5c2e9104d8406d83a95a9 Mon Sep 17 00:00:00 2001 From: Aleksei Chetaev Date: Thu, 16 Feb 2023 17:32:04 +0100 Subject: [PATCH 064/363] Bump version 1.1.1 -> 1.2.0 --- pyproject.toml | 4 ++-- src/frostfs_testlib/__init__.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 7ffd8d2..0a6ed78 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "frostfs-testlib" -version = "1.1.1" +version = "1.2.0" description = "Building blocks and utilities to facilitate development of automated tests for FrostFS system" readme = "README.md" authors = [{ name = "NSPCC", email = "info@nspcc.ru" }] @@ -50,7 +50,7 @@ line-length = 100 target-version = ["py310"] [tool.bumpver] -current_version = "1.1.1" +current_version = "1.2.0" version_pattern = "MAJOR.MINOR.PATCH" commit_message = "Bump version {old_version} -> {new_version}" commit = true diff --git a/src/frostfs_testlib/__init__.py b/src/frostfs_testlib/__init__.py index a82b376..c68196d 100644 --- a/src/frostfs_testlib/__init__.py +++ b/src/frostfs_testlib/__init__.py @@ -1 +1 @@ -__version__ = "1.1.1" +__version__ = "1.2.0" From 44e9a8480ae4877a330b6e06e1f7de2816ecaadb Mon Sep 17 00:00:00 2001 From: Aleksei Chetaev Date: Fri, 17 Feb 2023 14:00:16 +0100 Subject: [PATCH 065/363] Move errors patterns and check functions to testlib Signed-off-by: Aleksei Chetaev --- src/frostfs_testlib/resources/__init__.py | 0 src/frostfs_testlib/resources/common.py | 36 +++++++++++++++++++++++ src/frostfs_testlib/utils/errors.py | 11 +++++++ 3 files changed, 47 insertions(+) create mode 100644 src/frostfs_testlib/resources/__init__.py create mode 100644 src/frostfs_testlib/resources/common.py create mode 100644 src/frostfs_testlib/utils/errors.py diff --git a/src/frostfs_testlib/resources/__init__.py b/src/frostfs_testlib/resources/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/frostfs_testlib/resources/common.py b/src/frostfs_testlib/resources/common.py new file mode 100644 index 0000000..44f8c77 --- /dev/null +++ b/src/frostfs_testlib/resources/common.py @@ -0,0 +1,36 @@ +# ACLs with final flag +PUBLIC_ACL_F = "1FBFBFFF" +PRIVATE_ACL_F = "1C8C8CCC" +READONLY_ACL_F = "1FBF8CFF" + +# ACLs without final flag set +PUBLIC_ACL = "0FBFBFFF" +INACCESSIBLE_ACL = "40000000" +STICKY_BIT_PUB_ACL = "3FFFFFFF" + +EACL_PUBLIC_READ_WRITE = "eacl-public-read-write" + +# Regex patterns of status codes of Container service +CONTAINER_NOT_FOUND = "code = 3072.*message = container not found" + + +# Regex patterns of status codes of Object service +MALFORMED_REQUEST = "code = 1024.*message = malformed request" +OBJECT_ACCESS_DENIED = "code = 2048.*message = access to object operation denied" +OBJECT_NOT_FOUND = "code = 2049.*message = object not found" +OBJECT_ALREADY_REMOVED = "code = 2052.*message = object already removed" +SESSION_NOT_FOUND = "code = 4096.*message = session token not found" +OUT_OF_RANGE = "code = 2053.*message = out of range" +# TODO: Due to https://github.com/nspcc-dev/neofs-node/issues/2092 we have to check only codes until fixed +# OBJECT_IS_LOCKED = "code = 2050.*message = object is locked" +# LOCK_NON_REGULAR_OBJECT = "code = 2051.*message = ..." will be available once 2092 is fixed +OBJECT_IS_LOCKED = "code = 2050" +LOCK_NON_REGULAR_OBJECT = "code = 2051" + +LIFETIME_REQUIRED = "either expiration epoch of a lifetime is required" +LOCK_OBJECT_REMOVAL = "lock object removal" +LOCK_OBJECT_EXPIRATION = "lock object expiration: {expiration_epoch}; current: {current_epoch}" +INVALID_RANGE_ZERO_LENGTH = "invalid '{range}' range: zero length" +INVALID_RANGE_OVERFLOW = "invalid '{range}' range: uint64 overflow" +INVALID_OFFSET_SPECIFIER = "invalid '{range}' range offset specifier" +INVALID_LENGTH_SPECIFIER = "invalid '{range}' range length specifier" diff --git a/src/frostfs_testlib/utils/errors.py b/src/frostfs_testlib/utils/errors.py new file mode 100644 index 0000000..5a7bee8 --- /dev/null +++ b/src/frostfs_testlib/utils/errors.py @@ -0,0 +1,11 @@ +import re + + +def error_matches_status(error: Exception, status_pattern: str) -> bool: + """ + Determines whether exception matches specified status pattern. + + We use re.search() to be consistent with pytest.raises. + """ + match = re.search(status_pattern, str(error)) + return match is not None From 9c7c28e761c01e8c4e7ad8fd1000ec808b63af9f Mon Sep 17 00:00:00 2001 From: Aleksei Chetaev Date: Fri, 17 Feb 2023 14:02:14 +0100 Subject: [PATCH 066/363] Add imports for the new modules Signed-off-by: Aleksei Chetaev --- src/frostfs_testlib/resources/__init__.py | 1 + src/frostfs_testlib/utils/__init__.py | 3 +++ 2 files changed, 4 insertions(+) diff --git a/src/frostfs_testlib/resources/__init__.py b/src/frostfs_testlib/resources/__init__.py index e69de29..641b47e 100644 --- a/src/frostfs_testlib/resources/__init__.py +++ b/src/frostfs_testlib/resources/__init__.py @@ -0,0 +1 @@ +import common diff --git a/src/frostfs_testlib/utils/__init__.py b/src/frostfs_testlib/utils/__init__.py index e69de29..dd354a7 100644 --- a/src/frostfs_testlib/utils/__init__.py +++ b/src/frostfs_testlib/utils/__init__.py @@ -0,0 +1,3 @@ +import converters +import errors +import wallet From 8223e99ec88ae5059a2b7929ae539695ca4d28b6 Mon Sep 17 00:00:00 2001 From: Aleksei Chetaev Date: Fri, 17 Feb 2023 14:02:37 +0100 Subject: [PATCH 067/363] Bump version 1.2.0 -> 1.3.0 Signed-off-by: Aleksei Chetaev --- pyproject.toml | 4 ++-- src/frostfs_testlib/__init__.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 0a6ed78..646b002 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "frostfs-testlib" -version = "1.2.0" +version = "1.3.0" description = "Building blocks and utilities to facilitate development of automated tests for FrostFS system" readme = "README.md" authors = [{ name = "NSPCC", email = "info@nspcc.ru" }] @@ -50,7 +50,7 @@ line-length = 100 target-version = ["py310"] [tool.bumpver] -current_version = "1.2.0" +current_version = "1.3.0" version_pattern = "MAJOR.MINOR.PATCH" commit_message = "Bump version {old_version} -> {new_version}" commit = true diff --git a/src/frostfs_testlib/__init__.py b/src/frostfs_testlib/__init__.py index c68196d..67bc602 100644 --- a/src/frostfs_testlib/__init__.py +++ b/src/frostfs_testlib/__init__.py @@ -1 +1 @@ -__version__ = "1.2.0" +__version__ = "1.3.0" From 5568cbd0bfa70bced5a37c3c9ea3289d73983988 Mon Sep 17 00:00:00 2001 From: Aleksei Chetaev Date: Fri, 17 Feb 2023 16:20:00 +0100 Subject: [PATCH 068/363] Add new common erros templates Signed-off-by: Aleksei Chetaev --- pyproject.toml | 4 ++-- src/frostfs_testlib/__init__.py | 2 +- src/frostfs_testlib/resources/common.py | 1 + 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 646b002..5354e41 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "frostfs-testlib" -version = "1.3.0" +version = "1.3.1" description = "Building blocks and utilities to facilitate development of automated tests for FrostFS system" readme = "README.md" authors = [{ name = "NSPCC", email = "info@nspcc.ru" }] @@ -50,7 +50,7 @@ line-length = 100 target-version = ["py310"] [tool.bumpver] -current_version = "1.3.0" +current_version = "1.3.1" version_pattern = "MAJOR.MINOR.PATCH" commit_message = "Bump version {old_version} -> {new_version}" commit = true diff --git a/src/frostfs_testlib/__init__.py b/src/frostfs_testlib/__init__.py index 67bc602..9c73af2 100644 --- a/src/frostfs_testlib/__init__.py +++ b/src/frostfs_testlib/__init__.py @@ -1 +1 @@ -__version__ = "1.3.0" +__version__ = "1.3.1" diff --git a/src/frostfs_testlib/resources/common.py b/src/frostfs_testlib/resources/common.py index 44f8c77..7744c0c 100644 --- a/src/frostfs_testlib/resources/common.py +++ b/src/frostfs_testlib/resources/common.py @@ -21,6 +21,7 @@ OBJECT_NOT_FOUND = "code = 2049.*message = object not found" OBJECT_ALREADY_REMOVED = "code = 2052.*message = object already removed" SESSION_NOT_FOUND = "code = 4096.*message = session token not found" OUT_OF_RANGE = "code = 2053.*message = out of range" +EXPIRED_SESSION_TOKEN = "code = 4097.*message = expired session token" # TODO: Due to https://github.com/nspcc-dev/neofs-node/issues/2092 we have to check only codes until fixed # OBJECT_IS_LOCKED = "code = 2050.*message = object is locked" # LOCK_NON_REGULAR_OBJECT = "code = 2051.*message = ..." will be available once 2092 is fixed From 4fd9d697013366d029cc4e4417a54ff411f92136 Mon Sep 17 00:00:00 2001 From: Aleksei Chetaev Date: Mon, 20 Feb 2023 00:41:16 +0100 Subject: [PATCH 069/363] Refactoring utils with adding several new ones --- src/frostfs_testlib/utils/__init__.py | 8 +- .../{converters.py => converting_utils.py} | 7 - src/frostfs_testlib/utils/datetime_utils.py | 27 ++++ src/frostfs_testlib/utils/errors.py | 11 -- src/frostfs_testlib/utils/json_utils.py | 136 ++++++++++++++++++ src/frostfs_testlib/utils/string_utils.py | 31 ++++ src/frostfs_testlib/utils/wallet.py | 38 ----- src/frostfs_testlib/utils/wallet_utils.py | 75 ++++++++++ tests/test_converters.py | 2 +- tests/test_wallet.py | 2 +- 10 files changed, 276 insertions(+), 61 deletions(-) rename src/frostfs_testlib/utils/{converters.py => converting_utils.py} (86%) create mode 100644 src/frostfs_testlib/utils/datetime_utils.py delete mode 100644 src/frostfs_testlib/utils/errors.py create mode 100644 src/frostfs_testlib/utils/json_utils.py create mode 100644 src/frostfs_testlib/utils/string_utils.py delete mode 100644 src/frostfs_testlib/utils/wallet.py create mode 100644 src/frostfs_testlib/utils/wallet_utils.py diff --git a/src/frostfs_testlib/utils/__init__.py b/src/frostfs_testlib/utils/__init__.py index dd354a7..fbc4a8f 100644 --- a/src/frostfs_testlib/utils/__init__.py +++ b/src/frostfs_testlib/utils/__init__.py @@ -1,3 +1,5 @@ -import converters -import errors -import wallet +import frostfs_testlib.utils.converting_utils +import frostfs_testlib.utils.datetime_utils +import frostfs_testlib.utils.json_utils +import frostfs_testlib.utils.string_utils +import frostfs_testlib.utils.wallet_utils diff --git a/src/frostfs_testlib/utils/converters.py b/src/frostfs_testlib/utils/converting_utils.py similarity index 86% rename from src/frostfs_testlib/utils/converters.py rename to src/frostfs_testlib/utils/converting_utils.py index 65ea366..24b77ae 100644 --- a/src/frostfs_testlib/utils/converters.py +++ b/src/frostfs_testlib/utils/converting_utils.py @@ -3,7 +3,6 @@ import binascii import json import base58 -from neo3.wallet import wallet as neo3_wallet def str_to_ascii_hex(input: str) -> str: @@ -61,9 +60,3 @@ def get_wif_from_private_key(priv_key: bytes) -> str: compressed_flag = b"\x01" wif = base58.b58encode_check(wif_version + priv_key + compressed_flag) return wif.decode("utf-8") - - -def load_wallet(path: str, passwd: str = "") -> neo3_wallet.Wallet: - with open(path, "r") as wallet_file: - wlt_data = wallet_file.read() - return neo3_wallet.Wallet.from_json(json.loads(wlt_data), password=passwd) diff --git a/src/frostfs_testlib/utils/datetime_utils.py b/src/frostfs_testlib/utils/datetime_utils.py new file mode 100644 index 0000000..a357d8a --- /dev/null +++ b/src/frostfs_testlib/utils/datetime_utils.py @@ -0,0 +1,27 @@ +# There is place for date time utils functions + + +def parse_time(value: str) -> int: + """Converts time interval in text form into time interval as number of seconds. + + Args: + value: time interval as text. + + Returns: + Number of seconds in the parsed time interval. + """ + value = value.lower() + + for suffix in ["s", "sec"]: + if value.endswith(suffix): + return int(value[: -len(suffix)]) + + for suffix in ["m", "min"]: + if value.endswith(suffix): + return int(value[: -len(suffix)]) * 60 + + for suffix in ["h", "hr", "hour"]: + if value.endswith(suffix): + return int(value[: -len(suffix)]) * 60 * 60 + + raise ValueError(f"Unknown units in time value '{value}'") diff --git a/src/frostfs_testlib/utils/errors.py b/src/frostfs_testlib/utils/errors.py deleted file mode 100644 index 5a7bee8..0000000 --- a/src/frostfs_testlib/utils/errors.py +++ /dev/null @@ -1,11 +0,0 @@ -import re - - -def error_matches_status(error: Exception, status_pattern: str) -> bool: - """ - Determines whether exception matches specified status pattern. - - We use re.search() to be consistent with pytest.raises. - """ - match = re.search(status_pattern, str(error)) - return match is not None diff --git a/src/frostfs_testlib/utils/json_utils.py b/src/frostfs_testlib/utils/json_utils.py new file mode 100644 index 0000000..5db989e --- /dev/null +++ b/src/frostfs_testlib/utils/json_utils.py @@ -0,0 +1,136 @@ +""" + When doing requests to FrostFS, we get JSON output as an automatically decoded + structure from protobuf. Some fields are decoded with boilerplates and binary + values are Base64-encoded. + + This module contains functions which rearrange the structure and reencode binary + data from Base64 to Base58. +""" + +import base64 + +import base58 + + +def decode_simple_header(data: dict) -> dict: + """ + This function reencodes Simple Object header and its attributes. + """ + try: + data = decode_common_fields(data) + + # Normalize object attributes + data["header"]["attributes"] = { + attr["key"]: attr["value"] for attr in data["header"]["attributes"] + } + except Exception as exc: + raise ValueError(f"failed to decode JSON output: {exc}") from exc + + return data + + +def decode_split_header(data: dict) -> dict: + """ + This function rearranges Complex Object header. + The header holds SplitID, a random unique + number, which is common among all splitted objects, and IDs of the Linking + Object and the last splitted Object. + """ + try: + data["splitId"] = json_reencode(data["splitId"]) + data["lastPart"] = json_reencode(data["lastPart"]["value"]) if data["lastPart"] else None + data["link"] = json_reencode(data["link"]["value"]) if data["link"] else None + except Exception as exc: + raise ValueError(f"failed to decode JSON output: {exc}") from exc + + return data + + +def decode_linking_object(data: dict) -> dict: + """ + This function reencodes Linking Object header. + It contains IDs of child Objects and Split Chain data. + """ + try: + data = decode_simple_header(data) + split = data["header"]["split"] + split["children"] = [json_reencode(item["value"]) for item in split["children"]] + split["splitID"] = json_reencode(split["splitID"]) + split["previous"] = json_reencode(split["previous"]["value"]) if split["previous"] else None + split["parent"] = json_reencode(split["parent"]["value"]) if split["parent"] else None + except Exception as exc: + raise ValueError(f"failed to decode JSON output: {exc}") from exc + + return data + + +def decode_storage_group(data: dict) -> dict: + """ + This function reencodes Storage Group header. + """ + try: + data = decode_common_fields(data) + except Exception as exc: + raise ValueError(f"failed to decode JSON output: {exc}") from exc + + return data + + +def decode_tombstone(data: dict) -> dict: + """ + This function re-encodes Tombstone header. + """ + try: + data = decode_simple_header(data) + data["header"]["sessionToken"] = decode_session_token(data["header"]["sessionToken"]) + except Exception as exc: + raise ValueError(f"failed to decode JSON output: {exc}") from exc + return data + + +def decode_session_token(data: dict) -> dict: + """ + This function re-encodes a fragment of header which contains + information about session token. + """ + target = data["body"]["object"]["target"] + target["container"] = json_reencode(target["container"]["value"]) + target["objects"] = [json_reencode(obj["value"]) for obj in target["objects"]] + return data + + +def json_reencode(data: str) -> str: + """ + According to JSON protocol, binary data (Object/Container/Storage Group IDs, etc) + is converted to string via Base58 encoder. But we usually operate with Base64-encoded format. + This function reencodes given Base58 string into the Base64 one. + """ + return base58.b58encode(base64.b64decode(data)).decode("utf-8") + + +def encode_for_json(data: str) -> str: + """ + This function encodes binary data for sending them as protobuf + structures. + """ + return base64.b64encode(base58.b58decode(data)).decode("utf-8") + + +def decode_common_fields(data: dict) -> dict: + """ + Despite of type (simple/complex Object, Storage Group, etc) every Object + header contains several common fields. + This function rearranges these fields. + """ + data["objectID"] = json_reencode(data["objectID"]["value"]) + + header = data["header"] + header["containerID"] = json_reencode(header["containerID"]["value"]) + header["ownerID"] = json_reencode(header["ownerID"]["value"]) + header["payloadHash"] = json_reencode(header["payloadHash"]["sum"]) + header["version"] = f"{header['version']['major']}{header['version']['minor']}" + # Homomorphic hash is optional and its calculation might be disabled in trusted network + if header.get("homomorphicHash"): + header["homomorphicHash"] = json_reencode(header["homomorphicHash"]["sum"]) + + return data diff --git a/src/frostfs_testlib/utils/string_utils.py b/src/frostfs_testlib/utils/string_utils.py new file mode 100644 index 0000000..490217d --- /dev/null +++ b/src/frostfs_testlib/utils/string_utils.py @@ -0,0 +1,31 @@ +import random +import re +import string + +ONLY_ASCII_LETTERS = string.ascii_letters +DIGITS_AND_ASCII_LETTERS = string.ascii_letters + string.digits + + +def random_string(length: int = 5, source: str = ONLY_ASCII_LETTERS): + """ + Generate random string from source letters list + + Args: + length: length for generated string + source: source string with letters for generate random string + Returns: + (str): random string with len == length + """ + + return "".join(random.choice(string.ascii_letters) for i in range(length)) + + +def is_str_match_pattern(error: Exception, status_pattern: str) -> bool: + """ + Determines whether exception matches specified status pattern. + + We use re.search() to be consistent with pytest.raises. + """ + match = re.search(status_pattern, str(error)) + + return match is not None diff --git a/src/frostfs_testlib/utils/wallet.py b/src/frostfs_testlib/utils/wallet.py deleted file mode 100644 index 60cd2c3..0000000 --- a/src/frostfs_testlib/utils/wallet.py +++ /dev/null @@ -1,38 +0,0 @@ -import json -import logging - -from neo3.wallet import wallet as neo3_wallet -from neo3.wallet import account as neo3_account - -logger = logging.getLogger("frostfs.testlib.utils") - - -def init_wallet(wallet_path: str, wallet_password: str): - """ - Create new wallet and new account. - Args: - wallet_path: The path to the wallet to save wallet. - wallet_password: The password for new wallet. - """ - wallet = neo3_wallet.Wallet() - account = neo3_account.Account.create_new(wallet_password) - wallet.account_add(account) - with open(wallet_path, "w") as out: - json.dump(wallet.to_json(), out) - logger.info(f"Init new wallet: {wallet_path}, address: {account.address}") - - -def get_last_address_from_wallet(wallet_path: str, wallet_password: str): - """ - Extracting the last address from the given wallet. - Args: - wallet_path: The path to the wallet to extract address from. - wallet_password: The password for the given wallet. - Returns: - The address for the wallet. - """ - with open(wallet_path) as wallet_file: - wallet = neo3_wallet.Wallet.from_json(json.load(wallet_file), password=wallet_password) - address = wallet.accounts[-1].address - logger.info(f"got address: {address}") - return address diff --git a/src/frostfs_testlib/utils/wallet_utils.py b/src/frostfs_testlib/utils/wallet_utils.py new file mode 100644 index 0000000..0c5ab1a --- /dev/null +++ b/src/frostfs_testlib/utils/wallet_utils.py @@ -0,0 +1,75 @@ +import base64 +import json +import logging + +import base58 +from neo3.wallet import account as neo3_account +from neo3.wallet import wallet as neo3_wallet + +logger = logging.getLogger("frostfs.testlib.utils") + + +def init_wallet(wallet_path: str, wallet_password: str): + """ + Create new wallet and new account. + Args: + wallet_path: The path to the wallet to save wallet. + wallet_password: The password for new wallet. + """ + wallet = neo3_wallet.Wallet() + account = neo3_account.Account.create_new(wallet_password) + wallet.account_add(account) + with open(wallet_path, "w") as out: + json.dump(wallet.to_json(), out) + logger.info(f"Init new wallet: {wallet_path}, address: {account.address}") + + +def get_last_address_from_wallet(wallet_path: str, wallet_password: str): + """ + Extracting the last address from the given wallet. + Args: + wallet_path: The path to the wallet to extract address from. + wallet_password: The password for the given wallet. + Returns: + The address for the wallet. + """ + with open(wallet_path) as wallet_file: + wallet = neo3_wallet.Wallet.from_json(json.load(wallet_file), password=wallet_password) + address = wallet.accounts[-1].address + logger.info(f"got address: {address}") + return address + + +def get_wallet_public_key(wallet_path: str, wallet_password: str, format: str = "hex") -> str: + def __fix_wallet_schema(wallet: dict) -> None: + # Temporary function to fix wallets that do not conform to the schema + # TODO: get rid of it once issue is solved + if "name" not in wallet: + wallet["name"] = None + for account in wallet["accounts"]: + if "extra" not in account: + account["extra"] = None + + # Get public key from wallet file + with open(wallet_path, "r") as file: + wallet_content = json.load(file) + __fix_wallet_schema(wallet_content) + wallet_from_json = neo3_wallet.Wallet.from_json(wallet_content, password=wallet_password) + public_key_hex = str(wallet_from_json.accounts[0].public_key) + + # Convert public key to specified format + if format == "hex": + return public_key_hex + if format == "base58": + public_key_base58 = base58.b58encode(bytes.fromhex(public_key_hex)) + return public_key_base58.decode("utf-8") + if format == "base64": + public_key_base64 = base64.b64encode(bytes.fromhex(public_key_hex)) + return public_key_base64.decode("utf-8") + raise ValueError(f"Invalid public key format: {format}") + + +def load_wallet(path: str, passwd: str = "") -> neo3_wallet.Wallet: + with open(path, "r") as wallet_file: + wlt_data = wallet_file.read() + return neo3_wallet.Wallet.from_json(json.loads(wlt_data), password=passwd) diff --git a/tests/test_converters.py b/tests/test_converters.py index 7600a5d..77be425 100644 --- a/tests/test_converters.py +++ b/tests/test_converters.py @@ -1,6 +1,6 @@ from unittest import TestCase -from frostfs_testlib.utils import converters +from frostfs_testlib.utils import converting_utils class TestConverters(TestCase): diff --git a/tests/test_wallet.py b/tests/test_wallet.py index f00a6af..13a7899 100644 --- a/tests/test_wallet.py +++ b/tests/test_wallet.py @@ -5,7 +5,7 @@ from uuid import uuid4 from neo3.wallet.wallet import Wallet -from frostfs_testlib.utils.wallet import init_wallet, get_last_address_from_wallet +from frostfs_testlib.utils.wallet_utils import get_last_address_from_wallet, init_wallet class TestWallet(TestCase): From 71b35d45c356a36a39634a76b580e72e4b511bae Mon Sep 17 00:00:00 2001 From: Aleksei Chetaev Date: Wed, 22 Feb 2023 15:45:36 +0100 Subject: [PATCH 070/363] Fixing issues in imports after movin tests to pip install -e for testlib Signed-off-by: Aleksei Chetaev --- src/frostfs_testlib/analytics/__init__.py | 9 ++-- .../analytics/test_exporter.py | 18 ++++---- .../analytics/testrail_exporter.py | 46 +++++++++---------- .../blockchain/role_designation.py | 7 ++- src/frostfs_testlib/resources/__init__.py | 2 +- tests/test_converters.py | 18 ++++---- tests/test_local_shell.py | 3 +- tests/test_ssh_shell.py | 3 +- 8 files changed, 51 insertions(+), 55 deletions(-) diff --git a/src/frostfs_testlib/analytics/__init__.py b/src/frostfs_testlib/analytics/__init__.py index c670d1d..6995a08 100644 --- a/src/frostfs_testlib/analytics/__init__.py +++ b/src/frostfs_testlib/analytics/__init__.py @@ -1,4 +1,5 @@ -from test_case import id, suite_name, suite_section, title -from test_collector import TestCase, TestCaseCollector -from test_exporter import TestExporter -from testrail_exporter import TestrailExporter \ No newline at end of file +from frostfs_testlib.analytics import test_case +from frostfs_testlib.analytics.test_case import TestCasePriority +from frostfs_testlib.analytics.test_collector import TestCase, TestCaseCollector +from frostfs_testlib.analytics.test_exporter import TestExporter +from frostfs_testlib.analytics.testrail_exporter import TestrailExporter diff --git a/src/frostfs_testlib/analytics/test_exporter.py b/src/frostfs_testlib/analytics/test_exporter.py index 77db0a2..2af3f06 100644 --- a/src/frostfs_testlib/analytics/test_exporter.py +++ b/src/frostfs_testlib/analytics/test_exporter.py @@ -1,6 +1,7 @@ from abc import ABC, abstractmethod -from test_collector import TestCase +from frostfs_testlib.analytics.test_collector import TestCase + class TestExporter(ABC): test_cases_cache = [] @@ -45,7 +46,9 @@ class TestExporter(ABC): """ @abstractmethod - def update_test_case(self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section) -> None: + def update_test_case( + self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section + ) -> None: """ Update test case in TMS """ @@ -57,14 +60,13 @@ class TestExporter(ABC): for test_case in test_cases: test_suite = self.get_or_create_test_suite(test_case.suite_name) - test_section = self.get_or_create_suite_section(test_suite, test_case.suite_section_name) + test_section = self.get_or_create_suite_section( + test_suite, test_case.suite_section_name + ) test_case_in_tms = self.search_test_case_id(test_case.id) - steps = [ - {"content": value, "expected": " "} - for key, value in test_case.steps.items() - ] + steps = [{"content": value, "expected": " "} for key, value in test_case.steps.items()] if test_case: self.update_test_case(test_case, test_case_in_tms) else: - self.create_test_case(test_case) \ No newline at end of file + self.create_test_case(test_case) diff --git a/src/frostfs_testlib/analytics/testrail_exporter.py b/src/frostfs_testlib/analytics/testrail_exporter.py index cefbf2e..1a7c850 100644 --- a/src/frostfs_testlib/analytics/testrail_exporter.py +++ b/src/frostfs_testlib/analytics/testrail_exporter.py @@ -1,22 +1,22 @@ from testrail_api import TestRailAPI -from test_collector import TestCase -from test_exporter import TestExporter +from frostfs_testlib.analytics.test_collector import TestCase +from frostfs_testlib.analytics.test_exporter import TestExporter class TestrailExporter(TestExporter): def __init__( - self, - tr_url: str, - tr_username: str, - tr_password: str, - tr_project_id: int, - tr_template_id_without_steps: int, - tr_template_id_with_steps: int, - tr_priority_map: dict, - tr_id_field: str, - tr_description_fields: str, - tr_steps_field: str, + self, + tr_url: str, + tr_username: str, + tr_password: str, + tr_project_id: int, + tr_template_id_without_steps: int, + tr_template_id_with_steps: int, + tr_priority_map: dict, + tr_id_field: str, + tr_description_fields: str, + tr_steps_field: str, ): """ Redefine init for base exporter for get test rail credentials and project on create exporter @@ -101,7 +101,9 @@ class TestrailExporter(TestExporter): elif len(test_rail_suites) == 1: return test_rail_suites.pop() else: - raise RuntimeError(f"Too many results found in test rail for suite name {test_suite_name}") + raise RuntimeError( + f"Too many results found in test rail for suite name {test_suite_name}" + ) def get_or_create_suite_section(self, test_rail_suite, section_name) -> object: """ @@ -137,19 +139,15 @@ class TestrailExporter(TestExporter): "title": test_case.title, "section_id": test_suite_section["id"], self.test_case_id_field_name: test_case.id, - } if test_case.priority: request_body["priority_id"] = self.tr_priority_map.get(test_case.priority) if test_case.steps: - steps = [ - {"content": value, "expected": " "} - for key, value in test_case.steps.items() - ] + steps = [{"content": value, "expected": " "} for key, value in test_case.steps.items()] request_body[self.tr_steps_field] = steps - request_body["template_id"]=self.tr_template_id_with_steps + request_body["template_id"] = self.tr_template_id_with_steps else: request_body["template_id"] = self.tr_template_id_without_steps if test_case.description: @@ -157,7 +155,6 @@ class TestrailExporter(TestExporter): return request_body - def create_test_case(self, test_case: TestCase, test_suite, test_suite_section) -> None: """ Create test case in Testrail @@ -166,13 +163,12 @@ class TestrailExporter(TestExporter): self.api.cases.add_case(**request_body) - - def update_test_case(self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section) -> None: + def update_test_case( + self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section + ) -> None: """ Update test case in Testrail """ request_body = self.prepare_request_body(test_case, test_suite, test_suite_section) self.api.cases.update_case(case_id=test_case_in_tms["id"], **request_body) - - diff --git a/src/frostfs_testlib/blockchain/role_designation.py b/src/frostfs_testlib/blockchain/role_designation.py index 14d321b..4535300 100644 --- a/src/frostfs_testlib/blockchain/role_designation.py +++ b/src/frostfs_testlib/blockchain/role_designation.py @@ -2,11 +2,10 @@ import json from time import sleep from typing import Optional -from cli import NeoGo -from shell import Shell -from utils.converters import process_b64_bytearray - from frostfs_testlib.blockchain import Multisig +from frostfs_testlib.cli import NeoGo +from frostfs_testlib.shell import Shell +from frostfs_testlib.utils.converting_utils import process_b64_bytearray class RoleDesignation: diff --git a/src/frostfs_testlib/resources/__init__.py b/src/frostfs_testlib/resources/__init__.py index 641b47e..71bb053 100644 --- a/src/frostfs_testlib/resources/__init__.py +++ b/src/frostfs_testlib/resources/__init__.py @@ -1 +1 @@ -import common +from frostfs_testlib.resources import common diff --git a/tests/test_converters.py b/tests/test_converters.py index 77be425..8ff923d 100644 --- a/tests/test_converters.py +++ b/tests/test_converters.py @@ -7,40 +7,40 @@ class TestConverters(TestCase): def test_str_to_ascii_hex(self): source_str = "" result_str = "" - self.assertEqual(converters.str_to_ascii_hex(source_str), result_str) + self.assertEqual(converting_utils.str_to_ascii_hex(source_str), result_str) source_str = '"test_data" f0r ^convert*' result_str = "22746573745f646174612220663072205e636f6e766572742a" - self.assertEqual(converters.str_to_ascii_hex(source_str), result_str) + self.assertEqual(converting_utils.str_to_ascii_hex(source_str), result_str) def test_ascii_hex_to_str(self): source_str = "" result_bytes = b"" - self.assertEqual(converters.ascii_hex_to_str(source_str), result_bytes) + self.assertEqual(converting_utils.ascii_hex_to_str(source_str), result_bytes) source_str = "22746573745f646174612220663072205e636f6e766572742a" result_bytes = b'"test_data" f0r ^convert*' - self.assertEqual(converters.ascii_hex_to_str(source_str), result_bytes) + self.assertEqual(converting_utils.ascii_hex_to_str(source_str), result_bytes) def test_process_b64_bytearray_reverse(self): source_str = "" result_bytes = b"" - self.assertEqual(converters.process_b64_bytearray_reverse(source_str), result_bytes) + self.assertEqual(converting_utils.process_b64_bytearray_reverse(source_str), result_bytes) source_str = "InRlc3RfZGF0YSIgZjByIF5jb252ZXJ0Kg==" result_bytes = b"2a747265766e6f635e207230662022617461645f7473657422" - self.assertEqual(converters.process_b64_bytearray_reverse(source_str), result_bytes) + self.assertEqual(converting_utils.process_b64_bytearray_reverse(source_str), result_bytes) def test_process_b64_bytearray(self): source_str = "" result_bytes = b"" - self.assertEqual(converters.process_b64_bytearray(source_str), result_bytes) + self.assertEqual(converting_utils.process_b64_bytearray(source_str), result_bytes) source_str = "InRlc3RfZGF0YSIgZjByIF5jb252ZXJ0Kg==" result_bytes = b"22746573745f646174612220663072205e636f6e766572742a" - self.assertEqual(converters.process_b64_bytearray(source_str), result_bytes) + self.assertEqual(converting_utils.process_b64_bytearray(source_str), result_bytes) def test_contract_hash_to_address(self): source_str = "d01a381aae45f1ed181db9d554cc5ccc69c69f4e" result_str = "NT5hJ5peVmvYdZCsFKUM5MTcEGw5TB4k89" - self.assertEqual(converters.contract_hash_to_address(source_str), result_str) + self.assertEqual(converting_utils.contract_hash_to_address(source_str), result_str) diff --git a/tests/test_local_shell.py b/tests/test_local_shell.py index 3d05e5b..6261919 100644 --- a/tests/test_local_shell.py +++ b/tests/test_local_shell.py @@ -2,8 +2,7 @@ from unittest import TestCase from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput from frostfs_testlib.shell.local_shell import LocalShell - -from tests.helpers import format_error_details, get_output_lines +from helpers import format_error_details, get_output_lines class TestLocalShellInteractive(TestCase): diff --git a/tests/test_ssh_shell.py b/tests/test_ssh_shell.py index 021014a..4d1c0fd 100644 --- a/tests/test_ssh_shell.py +++ b/tests/test_ssh_shell.py @@ -3,8 +3,7 @@ from unittest import SkipTest, TestCase from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput from frostfs_testlib.shell.ssh_shell import SSHShell - -from tests.helpers import format_error_details, get_output_lines +from helpers import format_error_details, get_output_lines def init_shell() -> SSHShell: From 8ed26cef7db4aeac844b46aed4d258c908f16d54 Mon Sep 17 00:00:00 2001 From: Aleksei Chetaev Date: Mon, 6 Mar 2023 17:39:03 +0100 Subject: [PATCH 071/363] Fix issue with generate random string Signed-off-by: Aleksei Chetaev --- src/frostfs_testlib/utils/string_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/utils/string_utils.py b/src/frostfs_testlib/utils/string_utils.py index 490217d..a80192c 100644 --- a/src/frostfs_testlib/utils/string_utils.py +++ b/src/frostfs_testlib/utils/string_utils.py @@ -4,6 +4,7 @@ import string ONLY_ASCII_LETTERS = string.ascii_letters DIGITS_AND_ASCII_LETTERS = string.ascii_letters + string.digits +NON_DIGITS_AND_LETTERS = string.punctuation def random_string(length: int = 5, source: str = ONLY_ASCII_LETTERS): @@ -17,7 +18,7 @@ def random_string(length: int = 5, source: str = ONLY_ASCII_LETTERS): (str): random string with len == length """ - return "".join(random.choice(string.ascii_letters) for i in range(length)) + return "".join(random.choice(source) for i in range(length)) def is_str_match_pattern(error: Exception, status_pattern: str) -> bool: From f2ecce44bfe4b372c609094eb6d81b18a85028c3 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Thu, 9 Mar 2023 14:25:39 +0300 Subject: [PATCH 072/363] Adding timeout for more cli actions Signed-off-by: Andrey Berezin --- CONTRIBUTING.md | 17 ++---------- Makefile | 27 +++++++++++++++++++ src/frostfs_testlib/cli/frostfs_cli/netmap.py | 8 ++++++ src/frostfs_testlib/cli/frostfs_cli/shards.py | 14 +++++++--- .../cli/frostfs_cli/storagegroup.py | 8 ++++++ src/frostfs_testlib/hosting/interfaces.py | 2 +- 6 files changed, 57 insertions(+), 19 deletions(-) create mode 100644 Makefile diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f290592..b0f2b58 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -43,23 +43,10 @@ $ git fetch upstream ### Set up development environment To setup development environment for `frostfs-testlib`, please, take the following steps: -1. Prepare virtualenv ```shell -$ virtualenv --python=python3.10 venv -$ source venv/bin/activate -``` - -2. Install all dependencies: - -```shell -$ pip install -r requirements.txt -``` - -3. Setup pre-commit hooks to run code formatters on staged files before you run a `git commit` command: - -```shell -$ pre-commit install +$ make venv +$ source venv.frostfs-testlib/bin/activate ``` Optionally you might want to integrate code formatters with your code editor to apply formatters to code files as you go: diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..c746608 --- /dev/null +++ b/Makefile @@ -0,0 +1,27 @@ +SHELL := /bin/bash +PYTHON_VERSION := 3.10 +VENV_DIR := venv.frostfs-testlib + +current_dir := $(shell pwd) + +venv: create requirements paths precommit + @echo Ready + +precommit: + @echo Isntalling pre-commit hooks + . ${VENV_DIR}/bin/activate && pre-commit install + +paths: + @echo Append paths for project + @echo Virtual environment: ${VENV_DIR} + @sudo rm -rf ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth + @sudo touch ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth + @echo ${current_dir}/src/frostfs_testlib_frostfs_testlib | sudo tee ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth + +create: + @echo Create virtual environment for + virtualenv --python=python${PYTHON_VERSION} --prompt=frostfs-testlib ${VENV_DIR} + +requirements: + @echo Isntalling pip requirements + . ${VENV_DIR}/bin/activate && pip install -Ur requirements.txt \ No newline at end of file diff --git a/src/frostfs_testlib/cli/frostfs_cli/netmap.py b/src/frostfs_testlib/cli/frostfs_cli/netmap.py index 7033912..8920893 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/netmap.py +++ b/src/frostfs_testlib/cli/frostfs_cli/netmap.py @@ -13,6 +13,7 @@ class FrostfsCliNetmap(CliCommand): generate_key: bool = False, ttl: Optional[int] = None, xhdr: Optional[dict] = None, + timeout: Optional[str] = None, ) -> CommandResult: """ Get current epoch number. @@ -24,6 +25,7 @@ class FrostfsCliNetmap(CliCommand): ttl: TTL value in request meta header (default 2). wallet: Path to the wallet or binary key. xhdr: Dict with request X-Headers. + timeout: Timeout for an operation (default 15s). Returns: Command's result. @@ -41,6 +43,7 @@ class FrostfsCliNetmap(CliCommand): generate_key: bool = False, ttl: Optional[int] = None, xhdr: Optional[dict] = None, + timeout: Optional[str] = None, ) -> CommandResult: """ Get information about FrostFS network. @@ -52,6 +55,7 @@ class FrostfsCliNetmap(CliCommand): ttl: TTL value in request meta header (default 2) wallet: Path to the wallet or binary key xhdr: Request X-Headers in form of Key=Value + timeout: Timeout for an operation (default 15s). Returns: Command's result. @@ -70,6 +74,7 @@ class FrostfsCliNetmap(CliCommand): json: bool = False, ttl: Optional[int] = None, xhdr: Optional[dict] = None, + timeout: Optional[str] = None, ) -> CommandResult: """ Get target node info. @@ -82,6 +87,7 @@ class FrostfsCliNetmap(CliCommand): ttl: TTL value in request meta header (default 2). wallet: Path to the wallet or binary key. xhdr: Dict with request X-Headers. + timeout: Timeout for an operation (default 15s). Returns: Command's result. @@ -99,6 +105,7 @@ class FrostfsCliNetmap(CliCommand): generate_key: bool = False, ttl: Optional[int] = None, xhdr: Optional[dict] = None, + timeout: Optional[str] = None, ) -> CommandResult: """ Request current local snapshot of the network map. @@ -110,6 +117,7 @@ class FrostfsCliNetmap(CliCommand): ttl: TTL value in request meta header (default 2). wallet: Path to the wallet or binary key. xhdr: Dict with request X-Headers. + timeout: Timeout for an operation (default 15s). Returns: Command's result. diff --git a/src/frostfs_testlib/cli/frostfs_cli/shards.py b/src/frostfs_testlib/cli/frostfs_cli/shards.py index bfab6b6..6b47ac2 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/shards.py +++ b/src/frostfs_testlib/cli/frostfs_cli/shards.py @@ -13,6 +13,7 @@ class FrostfsCliShards(CliCommand): id: Optional[list[str]], address: Optional[str] = None, all: bool = False, + timeout: Optional[str] = None, ) -> CommandResult: """ Flush objects from the write-cache to the main storage. @@ -24,6 +25,7 @@ class FrostfsCliShards(CliCommand): endpoint: Remote node address (as 'multiaddr' or ':'). wallet: WIF (NEP-2) string or path to the wallet or binary key. wallet_password: Wallet password. + timeout: Timeout for an operation (default 15s). Returns: Command's result. @@ -44,6 +46,7 @@ class FrostfsCliShards(CliCommand): address: Optional[str] = None, all: bool = False, clear_errors: bool = False, + timeout: Optional[str] = None, ) -> CommandResult: """ Set work mode of the shard. @@ -57,12 +60,13 @@ class FrostfsCliShards(CliCommand): endpoint: Remote node address (as 'multiaddr' or ':'). wallet: WIF (NEP-2) string or path to the wallet or binary key. wallet_password: Wallet password. + timeout: Timeout for an operation (default 15s). Returns: Command's result. """ return self._execute_with_password( - f"control shards set-mode", + "control shards set-mode", wallet_password, **{ param: value @@ -80,6 +84,7 @@ class FrostfsCliShards(CliCommand): path: str, address: Optional[str] = None, no_errors: bool = False, + timeout: Optional[str] = None, ) -> CommandResult: """ Dump objects from shard to a file. @@ -92,12 +97,13 @@ class FrostfsCliShards(CliCommand): endpoint: Remote node address (as 'multiaddr' or ':'). wallet: WIF (NEP-2) string or path to the wallet or binary key. wallet_password: Wallet password. + timeout: Timeout for an operation (default 15s). Returns: Command's result. """ return self._execute_with_password( - f"control shards dump", + "control shards dump", wallet_password, **{ param: value @@ -113,6 +119,7 @@ class FrostfsCliShards(CliCommand): wallet_password: str, address: Optional[str] = None, json_mode: bool = False, + timeout: Optional[str] = None, ) -> CommandResult: """ List shards of the storage node. @@ -123,12 +130,13 @@ class FrostfsCliShards(CliCommand): endpoint: Remote node address (as 'multiaddr' or ':'). wallet: WIF (NEP-2) string or path to the wallet or binary key. wallet_password: Wallet password. + timeout: Timeout for an operation (default 15s). Returns: Command's result. """ return self._execute_with_password( - f"control shards list", + "control shards list", wallet_password, **{ param: value diff --git a/src/frostfs_testlib/cli/frostfs_cli/storagegroup.py b/src/frostfs_testlib/cli/frostfs_cli/storagegroup.py index 10f724b..8fb22ce 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/storagegroup.py +++ b/src/frostfs_testlib/cli/frostfs_cli/storagegroup.py @@ -16,6 +16,7 @@ class FrostfsCliStorageGroup(CliCommand): lifetime: Optional[int] = None, address: Optional[str] = None, xhdr: Optional[dict] = None, + timeout: Optional[str] = None, ) -> CommandResult: """ Put storage group to FrostFS. @@ -30,6 +31,7 @@ class FrostfsCliStorageGroup(CliCommand): ttl: TTL value in request meta header. wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + timeout: Timeout for an operation (default 15s). Returns: Command's result. @@ -52,6 +54,7 @@ class FrostfsCliStorageGroup(CliCommand): lifetime: Optional[int] = None, address: Optional[str] = None, xhdr: Optional[dict] = None, + timeout: Optional[str] = None, ) -> CommandResult: """ Get storage group from FrostFS. @@ -67,6 +70,7 @@ class FrostfsCliStorageGroup(CliCommand): ttl: TTL value in request meta header. wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + timeout: Timeout for an operation (default 15s). Returns: Command's result. @@ -87,6 +91,7 @@ class FrostfsCliStorageGroup(CliCommand): lifetime: Optional[int] = None, address: Optional[str] = None, xhdr: Optional[dict] = None, + timeout: Optional[str] = None, ) -> CommandResult: """ List storage groups in FrostFS container. @@ -101,6 +106,7 @@ class FrostfsCliStorageGroup(CliCommand): ttl: TTL value in request meta header. wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + timeout: Timeout for an operation (default 15s). Returns: Command's result. @@ -122,6 +128,7 @@ class FrostfsCliStorageGroup(CliCommand): lifetime: Optional[int] = None, address: Optional[str] = None, xhdr: Optional[dict] = None, + timeout: Optional[str] = None, ) -> CommandResult: """ Delete storage group from FrostFS. @@ -137,6 +144,7 @@ class FrostfsCliStorageGroup(CliCommand): ttl: TTL value in request meta header. wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + timeout: Timeout for an operation (default 15s). Returns: Command's result. diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 93e0304..73f4954 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -1,6 +1,6 @@ from abc import ABC, abstractmethod from datetime import datetime -from typing import Any, Optional +from typing import Optional from frostfs_testlib.hosting.config import CLIConfig, HostConfig, ServiceConfig from frostfs_testlib.shell.interfaces import Shell From e73585cf50bdeeead3ead642047ed72a87bd6f44 Mon Sep 17 00:00:00 2001 From: anikeev-yadro Date: Mon, 13 Mar 2023 15:45:06 +0300 Subject: [PATCH 073/363] Add morph remove node command Signed-off-by: anikeev-yadro --- src/frostfs_testlib/cli/frostfs_adm/morph.py | 30 ++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index aba147b..3faa875 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -127,12 +127,12 @@ class FrostfsAdmMorph(CliCommand): ) def force_new_epoch( - self, rpc_endpoint: Optional[str] = None, alphabet: Optional[str] = None + self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None ) -> CommandResult: """Create new FrostFS epoch event in the side chain. Args: - alphabet: Path to alphabet wallets dir. + alphabet_wallets: Path to alphabet wallets dir. rpc_endpoint: N3 RPC node endpoint. Returns: @@ -354,3 +354,29 @@ class FrostfsAdmMorph(CliCommand): if param not in ["self"] }, ) + + def remove_nodes( + self, node_netmap_keys: list[str], rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None + ) -> CommandResult: + """ Move node to the Offline state in the candidates list + and tick an epoch to update the netmap using frostfs-adm + + Args: + node_netmap_keys: list of nodes netmap keys. + alphabet_wallets: Path to alphabet wallets dir. + rpc_endpoint: N3 RPC node endpoint. + + Returns: + Command's result. + """ + if not len(node_netmap_keys): + raise AttributeError("Got empty node_netmap_keys list") + + return self._execute( + f"morph remove-nodes {' '.join(node_netmap_keys)}", + **{ + param: param_value + for param, param_value in locals().items() + if param not in ["self", "node_netmap_keys"] + }, + ) \ No newline at end of file From d97a02d1d3b0ca49128647002fe9c915620f6868 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 20 Mar 2023 16:53:52 +0300 Subject: [PATCH 074/363] Add missing functions Signed-off-by: Andrey Berezin --- src/frostfs_testlib/hosting/docker_host.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 1f7b545..7b22438 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -11,7 +11,7 @@ import docker from requests import HTTPError from frostfs_testlib.hosting.config import ParsedAttributes -from frostfs_testlib.hosting.interfaces import Host +from frostfs_testlib.hosting.interfaces import DiskInfo, Host from frostfs_testlib.shell import LocalShell, Shell, SSHShell from frostfs_testlib.shell.command_inspectors import SudoInspector @@ -142,6 +142,15 @@ class DockerHost(Host): cmd = f"{meta_clean_cmd}{data_clean_cmd}" shell.exec(cmd) + def attach_disk(self, device: str, disk_info: DiskInfo) -> None: + raise NotImplementedError("Not supported for docker") + + def detach_disk(self, device: str) -> DiskInfo: + raise NotImplementedError("Not supported for docker") + + def is_disk_attached(self, device: str, disk_info: DiskInfo) -> bool: + raise NotImplementedError("Not supported for docker") + def dump_logs( self, directory_path: str, From 997e768e92511900be5678277854002da74fabd0 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Sun, 14 May 2023 13:43:59 +0300 Subject: [PATCH 075/363] Move shared code to testlib Signed-off-by: Andrey Berezin --- CONTRIBUTING.md | 2 +- pyproject.toml | 10 +- requirements.txt | 4 + src/frostfs_testlib/__init__.py | 2 +- .../analytics/test_exporter.py | 2 +- .../analytics/testrail_exporter.py | 1 + src/frostfs_testlib/cli/frostfs_adm/adm.py | 13 +- src/frostfs_testlib/cli/frostfs_cli/cli.py | 20 +- src/frostfs_testlib/cli/neogo/go.py | 16 +- .../controllers/background_load_controller.py | 207 +++++ .../controllers/cluster_state_controller.py | 130 +++ src/frostfs_testlib/load/k6.py | 237 ++++++ src/frostfs_testlib/load/load_config.py | 211 +++++ src/frostfs_testlib/load/load_metrics.py | 162 ++++ src/frostfs_testlib/load/load_report.py | 265 ++++++ src/frostfs_testlib/load/load_steps.py | 184 +++++ src/frostfs_testlib/load/load_verifiers.py | 36 + .../processes/remote_process.py | 197 +++++ .../reporter/allure_handler.py | 5 +- src/frostfs_testlib/reporter/interfaces.py | 13 +- src/frostfs_testlib/reporter/reporter.py | 29 +- src/frostfs_testlib/resources/cli.py | 12 + src/frostfs_testlib/resources/common.py | 71 +- .../resources/error_patterns.py | 28 + src/frostfs_testlib/resources/load_params.py | 30 + src/frostfs_testlib/resources/optionals.py | 26 + .../resources/wellknown_acl.py | 11 + src/frostfs_testlib/s3/__init__.py | 3 + src/frostfs_testlib/s3/aws_cli_client.py | 754 ++++++++++++++++++ src/frostfs_testlib/s3/boto3_client.py | 661 +++++++++++++++ src/frostfs_testlib/s3/interfaces.py | 378 +++++++++ src/frostfs_testlib/shell/interfaces.py | 17 + src/frostfs_testlib/shell/ssh_shell.py | 2 +- src/frostfs_testlib/steps/__init__.py | 0 src/frostfs_testlib/steps/acl.py | 191 +++++ src/frostfs_testlib/steps/cli/container.py | 359 +++++++++ src/frostfs_testlib/steps/cli/object.py | 727 +++++++++++++++++ .../steps/complex_object_actions.py | 210 +++++ src/frostfs_testlib/steps/epoch.py | 131 +++ src/frostfs_testlib/steps/http/__init__.py | 0 src/frostfs_testlib/steps/http/http_gate.py | 355 +++++++++ src/frostfs_testlib/steps/node_management.py | 351 ++++++++ src/frostfs_testlib/steps/payment_neogo.py | 217 +++++ src/frostfs_testlib/steps/s3/s3_helper.py | 247 ++++++ src/frostfs_testlib/steps/session_token.py | 287 +++++++ src/frostfs_testlib/steps/storage_object.py | 63 ++ src/frostfs_testlib/steps/storage_policy.py | 173 ++++ src/frostfs_testlib/steps/tombstone.py | 41 + src/frostfs_testlib/storage/__init__.py | 33 + src/frostfs_testlib/storage/cluster.py | 237 ++++++ src/frostfs_testlib/storage/constants.py | 22 + .../controllers/background_load_controller.py | 207 +++++ .../controllers/cluster_state_controller.py | 130 +++ .../storage/controllers/disk_controller.py | 41 + .../storage/controllers/shards_watcher.py | 118 +++ .../storage/dataclasses/__init__.py | 0 .../storage/dataclasses/acl.py | 103 +++ .../storage/dataclasses/frostfs_services.py | 173 ++++ .../storage/dataclasses/node_base.py | 122 +++ .../dataclasses/storage_object_info.py | 25 + .../storage/dataclasses/wallet.py | 90 +++ .../storage/service_registry.py | 21 + .../testing/cluster_test_base.py | 32 + src/frostfs_testlib/testing/test_control.py | 164 ++++ src/frostfs_testlib/utils/cli_utils.py | 135 ++++ src/frostfs_testlib/utils/env_utils.py | 30 + src/frostfs_testlib/utils/failover_utils.py | 256 ++++++ src/frostfs_testlib/utils/file_utils.py | 168 ++++ src/frostfs_testlib/utils/version_utils.py | 79 ++ 69 files changed, 9213 insertions(+), 64 deletions(-) create mode 100644 src/frostfs_testlib/controllers/background_load_controller.py create mode 100644 src/frostfs_testlib/controllers/cluster_state_controller.py create mode 100644 src/frostfs_testlib/load/k6.py create mode 100644 src/frostfs_testlib/load/load_config.py create mode 100644 src/frostfs_testlib/load/load_metrics.py create mode 100644 src/frostfs_testlib/load/load_report.py create mode 100644 src/frostfs_testlib/load/load_steps.py create mode 100644 src/frostfs_testlib/load/load_verifiers.py create mode 100644 src/frostfs_testlib/processes/remote_process.py create mode 100644 src/frostfs_testlib/resources/cli.py create mode 100644 src/frostfs_testlib/resources/error_patterns.py create mode 100644 src/frostfs_testlib/resources/load_params.py create mode 100644 src/frostfs_testlib/resources/optionals.py create mode 100644 src/frostfs_testlib/resources/wellknown_acl.py create mode 100644 src/frostfs_testlib/s3/__init__.py create mode 100644 src/frostfs_testlib/s3/aws_cli_client.py create mode 100644 src/frostfs_testlib/s3/boto3_client.py create mode 100644 src/frostfs_testlib/s3/interfaces.py create mode 100644 src/frostfs_testlib/steps/__init__.py create mode 100644 src/frostfs_testlib/steps/acl.py create mode 100644 src/frostfs_testlib/steps/cli/container.py create mode 100644 src/frostfs_testlib/steps/cli/object.py create mode 100644 src/frostfs_testlib/steps/complex_object_actions.py create mode 100644 src/frostfs_testlib/steps/epoch.py create mode 100644 src/frostfs_testlib/steps/http/__init__.py create mode 100644 src/frostfs_testlib/steps/http/http_gate.py create mode 100644 src/frostfs_testlib/steps/node_management.py create mode 100644 src/frostfs_testlib/steps/payment_neogo.py create mode 100644 src/frostfs_testlib/steps/s3/s3_helper.py create mode 100644 src/frostfs_testlib/steps/session_token.py create mode 100644 src/frostfs_testlib/steps/storage_object.py create mode 100644 src/frostfs_testlib/steps/storage_policy.py create mode 100644 src/frostfs_testlib/steps/tombstone.py create mode 100644 src/frostfs_testlib/storage/__init__.py create mode 100644 src/frostfs_testlib/storage/cluster.py create mode 100644 src/frostfs_testlib/storage/constants.py create mode 100644 src/frostfs_testlib/storage/controllers/background_load_controller.py create mode 100644 src/frostfs_testlib/storage/controllers/cluster_state_controller.py create mode 100644 src/frostfs_testlib/storage/controllers/disk_controller.py create mode 100644 src/frostfs_testlib/storage/controllers/shards_watcher.py create mode 100644 src/frostfs_testlib/storage/dataclasses/__init__.py create mode 100644 src/frostfs_testlib/storage/dataclasses/acl.py create mode 100644 src/frostfs_testlib/storage/dataclasses/frostfs_services.py create mode 100644 src/frostfs_testlib/storage/dataclasses/node_base.py create mode 100644 src/frostfs_testlib/storage/dataclasses/storage_object_info.py create mode 100644 src/frostfs_testlib/storage/dataclasses/wallet.py create mode 100644 src/frostfs_testlib/storage/service_registry.py create mode 100644 src/frostfs_testlib/testing/cluster_test_base.py create mode 100644 src/frostfs_testlib/testing/test_control.py create mode 100644 src/frostfs_testlib/utils/cli_utils.py create mode 100644 src/frostfs_testlib/utils/env_utils.py create mode 100644 src/frostfs_testlib/utils/failover_utils.py create mode 100644 src/frostfs_testlib/utils/file_utils.py create mode 100644 src/frostfs_testlib/utils/version_utils.py diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b0f2b58..5996820 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -116,7 +116,7 @@ contributors". To sign your work, just add a line like this at the end of your commit message: ``` -Signed-off-by: Samii Sakisaka +Signed-off-by: Andrey Berezin ``` This can easily be done with the `--signoff` option to `git commit`. diff --git a/pyproject.toml b/pyproject.toml index 5354e41..fd5d8b7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,10 +4,10 @@ build-backend = "setuptools.build_meta" [project] name = "frostfs-testlib" -version = "1.3.1" +version = "2.0.0" description = "Building blocks and utilities to facilitate development of automated tests for FrostFS system" readme = "README.md" -authors = [{ name = "NSPCC", email = "info@nspcc.ru" }] +authors = [{ name = "Yadro", email = "info@yadro.com" }] license = { text = "GNU General Public License v3 (GPLv3)" } classifiers = [ "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", @@ -25,6 +25,10 @@ dependencies = [ "requests>=2.28.0", "docstring_parser>=0.15", "testrail-api>=1.12.0", + "pytest==7.1.2", + "tenacity==8.0.1", + "boto3==1.16.33", + "boto3-stubs[essential]==1.16.33", ] requires-python = ">=3.10" @@ -50,7 +54,7 @@ line-length = 100 target-version = ["py310"] [tool.bumpver] -current_version = "1.3.1" +current_version = "2.0.0" version_pattern = "MAJOR.MINOR.PATCH" commit_message = "Bump version {old_version} -> {new_version}" commit = true diff --git a/requirements.txt b/requirements.txt index eee5a85..c653f7b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,6 +7,10 @@ pexpect==4.8.0 requests==2.28.1 docstring_parser==0.15 testrail-api==1.12.0 +tenacity==8.0.1 +pytest==7.1.2 +boto3==1.16.33 +boto3-stubs[essential]==1.16.33 # Dev dependencies black==22.8.0 diff --git a/src/frostfs_testlib/__init__.py b/src/frostfs_testlib/__init__.py index 9c73af2..8c0d5d5 100644 --- a/src/frostfs_testlib/__init__.py +++ b/src/frostfs_testlib/__init__.py @@ -1 +1 @@ -__version__ = "1.3.1" +__version__ = "2.0.0" diff --git a/src/frostfs_testlib/analytics/test_exporter.py b/src/frostfs_testlib/analytics/test_exporter.py index 2af3f06..263995c 100644 --- a/src/frostfs_testlib/analytics/test_exporter.py +++ b/src/frostfs_testlib/analytics/test_exporter.py @@ -66,7 +66,7 @@ class TestExporter(ABC): test_case_in_tms = self.search_test_case_id(test_case.id) steps = [{"content": value, "expected": " "} for key, value in test_case.steps.items()] - if test_case: + if test_case_in_tms: self.update_test_case(test_case, test_case_in_tms) else: self.create_test_case(test_case) diff --git a/src/frostfs_testlib/analytics/testrail_exporter.py b/src/frostfs_testlib/analytics/testrail_exporter.py index 1a7c850..610fee5 100644 --- a/src/frostfs_testlib/analytics/testrail_exporter.py +++ b/src/frostfs_testlib/analytics/testrail_exporter.py @@ -38,6 +38,7 @@ class TestrailExporter(TestExporter): self.tr_id_field = tr_id_field self.tr_description_fields = tr_description_fields self.tr_steps_field = tr_steps_field + self.test_case_id_field_name = "" # TODO: Add me def fill_suite_cache(self) -> None: """ diff --git a/src/frostfs_testlib/cli/frostfs_adm/adm.py b/src/frostfs_testlib/cli/frostfs_adm/adm.py index 283069c..0b56fbd 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/adm.py +++ b/src/frostfs_testlib/cli/frostfs_adm/adm.py @@ -9,14 +9,17 @@ from frostfs_testlib.shell import Shell class FrostfsAdm: - morph: Optional[FrostfsAdmMorph] = None - subnet: Optional[FrostfsAdmMorphSubnet] = None - storage_config: Optional[FrostfsAdmStorageConfig] = None - version: Optional[FrostfsAdmVersion] = None + morph: FrostfsAdmMorph + subnet: FrostfsAdmMorphSubnet + storage_config: FrostfsAdmStorageConfig + version: FrostfsAdmVersion + config: FrostfsAdmConfig def __init__(self, shell: Shell, frostfs_adm_exec_path: str, config_file: Optional[str] = None): self.config = FrostfsAdmConfig(shell, frostfs_adm_exec_path, config=config_file) self.morph = FrostfsAdmMorph(shell, frostfs_adm_exec_path, config=config_file) self.subnet = FrostfsAdmMorphSubnet(shell, frostfs_adm_exec_path, config=config_file) - self.storage_config = FrostfsAdmStorageConfig(shell, frostfs_adm_exec_path, config=config_file) + self.storage_config = FrostfsAdmStorageConfig( + shell, frostfs_adm_exec_path, config=config_file + ) self.version = FrostfsAdmVersion(shell, frostfs_adm_exec_path, config=config_file) diff --git a/src/frostfs_testlib/cli/frostfs_cli/cli.py b/src/frostfs_testlib/cli/frostfs_cli/cli.py index 07986c2..5d55f55 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/cli.py +++ b/src/frostfs_testlib/cli/frostfs_cli/cli.py @@ -14,16 +14,16 @@ from frostfs_testlib.shell import Shell class FrostfsCli: - accounting: Optional[FrostfsCliAccounting] = None - acl: Optional[FrostfsCliACL] = None - container: Optional[FrostfsCliContainer] = None - netmap: Optional[FrostfsCliNetmap] = None - object: Optional[FrostfsCliObject] = None - session: Optional[FrostfsCliSession] = None - shards: Optional[FrostfsCliShards] = None - storagegroup: Optional[FrostfsCliStorageGroup] = None - util: Optional[FrostfsCliUtil] = None - version: Optional[FrostfsCliVersion] = None + accounting: FrostfsCliAccounting + acl: FrostfsCliACL + container: FrostfsCliContainer + netmap: FrostfsCliNetmap + object: FrostfsCliObject + session: FrostfsCliSession + shards: FrostfsCliShards + storagegroup: FrostfsCliStorageGroup + util: FrostfsCliUtil + version: FrostfsCliVersion def __init__(self, shell: Shell, frostfs_cli_exec_path: str, config_file: Optional[str] = None): self.accounting = FrostfsCliAccounting(shell, frostfs_cli_exec_path, config=config_file) diff --git a/src/frostfs_testlib/cli/neogo/go.py b/src/frostfs_testlib/cli/neogo/go.py index a0634a4..9e7286c 100644 --- a/src/frostfs_testlib/cli/neogo/go.py +++ b/src/frostfs_testlib/cli/neogo/go.py @@ -12,14 +12,14 @@ from frostfs_testlib.shell import Shell class NeoGo: - candidate: Optional[NeoGoCandidate] = None - contract: Optional[NeoGoContract] = None - db: Optional[NeoGoDb] = None - nep17: Optional[NeoGoNep17] = None - node: Optional[NeoGoNode] = None - query: Optional[NeoGoQuery] = None - version: Optional[NeoGoVersion] = None - wallet: Optional[NeoGoWallet] = None + candidate: NeoGoCandidate + contract: NeoGoContract + db: NeoGoDb + nep17: NeoGoNep17 + node: NeoGoNode + query: NeoGoQuery + version: NeoGoVersion + wallet: NeoGoWallet def __init__( self, diff --git a/src/frostfs_testlib/controllers/background_load_controller.py b/src/frostfs_testlib/controllers/background_load_controller.py new file mode 100644 index 0000000..4a97c29 --- /dev/null +++ b/src/frostfs_testlib/controllers/background_load_controller.py @@ -0,0 +1,207 @@ +import frostfs_testlib.resources.optionals as optionals +from frostfs_testlib.load.k6 import K6 +from frostfs_testlib.load.load_config import ( + EndpointSelectionStrategy, + K6ProcessAllocationStrategy, + LoadParams, + LoadScenario, + LoadType, +) +from frostfs_testlib.load.load_steps import init_s3_client, prepare_k6_instances +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.load_params import ( + K6_TEARDOWN_PERIOD, + LOAD_NODE_SSH_PASSWORD, + LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE, + LOAD_NODE_SSH_PRIVATE_KEY_PATH, + LOAD_NODE_SSH_USER, + LOAD_NODES, +) +from frostfs_testlib.shell.interfaces import SshCredentials +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.cluster.frostfs_services import S3Gate, StorageNode +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +from frostfs_testlib.testing.test_control import run_optionally + +reporter = get_reporter() + + +class BackgroundLoadController: + k6_instances: list[K6] + k6_dir: str + load_params: LoadParams + load_nodes: list[str] + verification_params: LoadParams + nodes_under_load: list[ClusterNode] + ssh_credentials: SshCredentials + loaders_wallet: WalletInfo + endpoints: list[str] + + def __init__( + self, + k6_dir: str, + load_params: LoadParams, + loaders_wallet: WalletInfo, + nodes_under_load: list[ClusterNode], + ) -> None: + self.k6_dir = k6_dir + self.load_params = load_params + self.nodes_under_load = nodes_under_load + self.load_nodes = LOAD_NODES + self.loaders_wallet = loaders_wallet + + if load_params.endpoint_selection_strategy is None: + raise RuntimeError("endpoint_selection_strategy should not be None") + + self.endpoints = self._get_endpoints( + load_params.load_type, load_params.endpoint_selection_strategy + ) + self.verification_params = LoadParams( + clients=load_params.readers, + scenario=LoadScenario.VERIFY, + registry_file=load_params.registry_file, + verify_time=load_params.verify_time, + load_type=load_params.load_type, + load_id=load_params.load_id, + working_dir=load_params.working_dir, + endpoint_selection_strategy=load_params.endpoint_selection_strategy, + k6_process_allocation_strategy=load_params.k6_process_allocation_strategy, + ) + self.ssh_credentials = SshCredentials( + LOAD_NODE_SSH_USER, + LOAD_NODE_SSH_PASSWORD, + LOAD_NODE_SSH_PRIVATE_KEY_PATH, + LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE, + ) + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, []) + def _get_endpoints( + self, load_type: LoadType, endpoint_selection_strategy: EndpointSelectionStrategy + ): + all_endpoints = { + LoadType.gRPC: { + EndpointSelectionStrategy.ALL: list( + set( + endpoint + for node_under_load in self.nodes_under_load + for endpoint in node_under_load.service(StorageNode).get_all_rpc_endpoint() + ) + ), + EndpointSelectionStrategy.FIRST: list( + set( + node_under_load.service(StorageNode).get_rpc_endpoint() + for node_under_load in self.nodes_under_load + ) + ), + }, + # for some reason xk6 appends http protocol on its own + LoadType.S3: { + EndpointSelectionStrategy.ALL: list( + set( + endpoint.replace("http://", "") + for node_under_load in self.nodes_under_load + for endpoint in node_under_load.service(S3Gate).get_all_endpoints() + ) + ), + EndpointSelectionStrategy.FIRST: list( + set( + node_under_load.service(S3Gate).get_endpoint().replace("http://", "") + for node_under_load in self.nodes_under_load + ) + ), + }, + } + + return all_endpoints[load_type][endpoint_selection_strategy] + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step_deco("Prepare background load instances") + def prepare(self): + if self.load_params.load_type == LoadType.S3: + init_s3_client( + self.load_nodes, + self.load_params, + self.k6_dir, + self.ssh_credentials, + self.nodes_under_load, + self.loaders_wallet, + ) + + self._prepare(self.load_params) + + def _prepare(self, load_params: LoadParams): + self.k6_instances = prepare_k6_instances( + load_nodes=LOAD_NODES, + ssh_credentials=self.ssh_credentials, + k6_dir=self.k6_dir, + load_params=load_params, + endpoints=self.endpoints, + loaders_wallet=self.loaders_wallet, + ) + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step_deco("Start background load") + def start(self): + if self.load_params.preset is None: + raise RuntimeError("Preset should not be none at the moment of start") + + with reporter.step( + f"Start background load on nodes {self.nodes_under_load}: " + f"writers = {self.load_params.writers}, " + f"obj_size = {self.load_params.object_size}, " + f"load_time = {self.load_params.load_time}, " + f"prepare_json = {self.load_params.preset.pregen_json}, " + f"endpoints = {self.endpoints}" + ): + for k6_load_instance in self.k6_instances: + k6_load_instance.start() + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step_deco("Stop background load") + def stop(self): + for k6_load_instance in self.k6_instances: + k6_load_instance.stop() + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, True) + def is_running(self): + for k6_load_instance in self.k6_instances: + if not k6_load_instance.is_running: + return False + + return True + + def wait_until_finish(self): + if self.load_params.load_time is None: + raise RuntimeError("LoadTime should not be none") + + for k6_instance in self.k6_instances: + k6_instance.wait_until_finished(self.load_params.load_time + int(K6_TEARDOWN_PERIOD)) + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + def verify(self): + if self.verification_params.verify_time is None: + raise RuntimeError("verify_time should not be none") + + self._prepare(self.verification_params) + with reporter.step("Run verify background load data"): + for k6_verify_instance in self.k6_instances: + k6_verify_instance.start() + k6_verify_instance.wait_until_finished(self.verification_params.verify_time) + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step_deco("K6 run results") + def get_results(self) -> dict: + results = {} + for k6_instance in self.k6_instances: + if k6_instance.load_params.k6_process_allocation_strategy is None: + raise RuntimeError("k6_process_allocation_strategy should not be none") + + result = k6_instance.get_results() + keys_map = { + K6ProcessAllocationStrategy.PER_LOAD_NODE: k6_instance.load_node, + K6ProcessAllocationStrategy.PER_ENDPOINT: k6_instance.endpoints[0], + } + key = keys_map[k6_instance.load_params.k6_process_allocation_strategy] + results[key] = result + + return results diff --git a/src/frostfs_testlib/controllers/cluster_state_controller.py b/src/frostfs_testlib/controllers/cluster_state_controller.py new file mode 100644 index 0000000..23d1a6c --- /dev/null +++ b/src/frostfs_testlib/controllers/cluster_state_controller.py @@ -0,0 +1,130 @@ +import time + +import allure + +import frostfs_testlib.resources.optionals as optionals +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.shell import CommandOptions, Shell +from frostfs_testlib.steps import epoch +from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode +from frostfs_testlib.storage.controllers.disk_controller import DiskController +from frostfs_testlib.testing.test_control import run_optionally, wait_for_success +from frostfs_testlib.utils.failover_utils import ( + wait_all_storage_nodes_returned, + wait_for_host_offline, + wait_for_host_online, + wait_for_node_online, +) + +reporter = get_reporter() + + +class ClusterStateController: + def __init__(self, shell: Shell, cluster: Cluster) -> None: + self.stopped_nodes: list[ClusterNode] = [] + self.detached_disks: dict[str, DiskController] = {} + self.stopped_storage_nodes: list[StorageNode] = [] + self.cluster = cluster + self.shell = shell + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Stop host of node {node}") + def stop_node_host(self, node: ClusterNode, mode: str): + with allure.step(f"Stop host {node.host.config.address}"): + node.host.stop_host(mode=mode) + wait_for_host_offline(self.shell, node.storage_node) + self.stopped_nodes.append(node) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Start host of node {node}") + def start_node_host(self, node: ClusterNode): + with allure.step(f"Start host {node.host.config.address}"): + node.host.start_host() + wait_for_host_online(self.shell, node.storage_node) + wait_for_node_online(node.storage_node) + self.stopped_nodes.remove(node) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Start stopped hosts") + def start_stopped_hosts(self): + for node in self.stopped_nodes: + node.host.start_host() + self.stopped_nodes = [] + wait_all_storage_nodes_returned(self.shell, self.cluster) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Detach disk {device} at {mountpoint} on node {node}") + def detach_disk(self, node: StorageNode, device: str, mountpoint: str): + disk_controller = self._get_disk_controller(node, device, mountpoint) + self.detached_disks[disk_controller.id] = disk_controller + disk_controller.detach() + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Attach disk {device} at {mountpoint} on node {node}") + def attach_disk(self, node: StorageNode, device: str, mountpoint: str): + disk_controller = self._get_disk_controller(node, device, mountpoint) + disk_controller.attach() + self.detached_disks.pop(disk_controller.id, None) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Restore detached disks") + def restore_disks(self): + for disk_controller in self.detached_disks.values(): + disk_controller.attach() + self.detached_disks = {} + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Stop storage service on {node}") + def stop_storage_service(self, node: ClusterNode): + node.storage_node.stop_service() + self.stopped_storage_nodes.append(node.storage_node) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Start storage service on {node}") + def start_storage_service(self, node: ClusterNode): + node.storage_node.start_service() + self.stopped_storage_nodes.remove(node.storage_node) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Start stopped storage services") + def start_stopped_storage_services(self): + for node in self.stopped_storage_nodes: + node.start_service() + self.stopped_storage_nodes = [] + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Hard reboot host {node} via magic SysRq option") + def panic_reboot_host(self, node: ClusterNode): + shell = node.host.get_shell() + shell.exec('sudo sh -c "echo 1 > /proc/sys/kernel/sysrq"') + + options = CommandOptions(close_stdin=True, timeout=1, check=False) + shell.exec('sudo sh -c "echo b > /proc/sysrq-trigger"', options) + + # Let the things to be settled + # A little wait here to prevent ssh stuck during panic + time.sleep(10) + wait_for_host_online(self.shell, node.storage_node) + wait_for_node_online(node.storage_node) + + @reporter.step_deco("Wait up to {timeout} seconds for nodes on cluster to align epochs") + def wait_for_epochs_align(self, timeout=60): + @wait_for_success(timeout, 5, None, True) + def check_epochs(): + epochs_by_node = epoch.get_epochs_from_nodes(self.shell, self.cluster) + assert ( + len(set(epochs_by_node.values())) == 1 + ), f"unaligned epochs found: {epochs_by_node}" + + check_epochs() + + def _get_disk_controller( + self, node: StorageNode, device: str, mountpoint: str + ) -> DiskController: + disk_controller_id = DiskController.get_id(node, device) + if disk_controller_id in self.detached_disks.keys(): + disk_controller = self.detached_disks[disk_controller_id] + else: + disk_controller = DiskController(node, device, mountpoint) + + return disk_controller diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py new file mode 100644 index 0000000..b3534d4 --- /dev/null +++ b/src/frostfs_testlib/load/k6.py @@ -0,0 +1,237 @@ +import json +import logging +import os +from dataclasses import dataclass, fields +from time import sleep +from typing import Any + +from frostfs_testlib.load.load_config import ( + K6ProcessAllocationStrategy, + LoadParams, + LoadScenario, + LoadType, +) +from frostfs_testlib.processes.remote_process import RemoteProcess +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.shell import Shell +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo + +EXIT_RESULT_CODE = 0 + +logger = logging.getLogger("NeoLogger") +reporter = get_reporter() + + +@dataclass +class LoadResults: + data_sent: float = 0.0 + data_received: float = 0.0 + read_ops: float = 0.0 + write_ops: float = 0.0 + total_ops: float = 0.0 + + +class K6: + _k6_process: RemoteProcess + _k6_stop_attempts: int = 5 + _k6_stop_check_interval: int = 15 + + def __init__( + self, + load_params: LoadParams, + endpoints: list[str], + k6_dir: str, + shell: Shell, + load_node: str, + wallet: WalletInfo, + ): + if load_params.scenario is None: + raise RuntimeError("Scenario should not be none") + + self.load_params: LoadParams = load_params + self.endpoints = endpoints + self.load_node: str = load_node + self.shell: Shell = shell + self.wallet = wallet + self.scenario: LoadScenario = load_params.scenario + self.summary_json: str = os.path.join( + self.load_params.working_dir, + f"{self.load_params.load_id}_{self.scenario.value}_summary.json", + ) + + self._k6_dir: str = k6_dir + + @property + def process_dir(self) -> str: + return self._k6_process.process_dir + + @reporter.step_deco("Preset containers and objects") + def preset(self) -> str: + preset_grpc = f"{self._k6_dir}/scenarios/preset/preset_grpc.py" + preset_s3 = f"{self._k6_dir}/scenarios/preset/preset_s3.py" + preset_map = { + LoadType.gRPC: preset_grpc, + LoadType.S3: preset_s3, + LoadType.HTTP: preset_grpc, + } + + base_args = { + preset_grpc: [ + preset_grpc, + f"--endpoint {self.endpoints[0]}", + f"--wallet {self.wallet.path} ", + f"--config {self.wallet.config_path} ", + ], + preset_s3: [ + preset_s3, + f"--endpoint {self.endpoints[0]}", + ], + } + + preset_scenario = preset_map[self.load_params.load_type] + command_args = base_args[preset_scenario].copy() + + command_args += [ + f"--{field.metadata['preset_argument']} '{getattr(self.load_params, field.name)}'" + for field in fields(self.load_params) + if field.metadata + and self.scenario in field.metadata["applicable_scenarios"] + and field.metadata["preset_argument"] + and getattr(self.load_params, field.name) is not None + ] + + if self.load_params.preset: + command_args += [ + f"--{field.metadata['preset_argument']} '{getattr(self.load_params.preset, field.name)}'" + for field in fields(self.load_params.preset) + if field.metadata + and self.scenario in field.metadata["applicable_scenarios"] + and field.metadata["preset_argument"] + and getattr(self.load_params.preset, field.name) is not None + ] + + command = " ".join(command_args) + result = self.shell.exec(command) + + assert ( + result.return_code == EXIT_RESULT_CODE + ), f"Return code of preset is not zero: {result.stdout}" + return result.stdout.strip("\n") + + @reporter.step_deco("Generate K6 command") + def _generate_env_variables(self) -> str: + env_vars = { + field.metadata["env_variable"]: getattr(self.load_params, field.name) + for field in fields(self.load_params) + if field.metadata + and self.scenario in field.metadata["applicable_scenarios"] + and field.metadata["env_variable"] + and getattr(self.load_params, field.name) is not None + } + + if self.load_params.preset: + env_vars.update( + { + field.metadata["env_variable"]: getattr(self.load_params.preset, field.name) + for field in fields(self.load_params.preset) + if field.metadata + and self.scenario in field.metadata["applicable_scenarios"] + and field.metadata["env_variable"] + and getattr(self.load_params.preset, field.name) is not None + } + ) + + env_vars[f"{self.load_params.load_type.value.upper()}_ENDPOINTS"] = ",".join(self.endpoints) + env_vars["SUMMARY_JSON"] = self.summary_json + + reporter.attach( + "\n".join(f"{param}: {value}" for param, value in env_vars.items()), "K6 ENV variables" + ) + return " ".join( + [f"-e {param}='{value}'" for param, value in env_vars.items() if value is not None] + ) + + @reporter.step_deco("Start K6 on initiator") + def start(self) -> None: + command = ( + f"{self._k6_dir}/k6 run {self._generate_env_variables()} " + f"{self._k6_dir}/scenarios/{self.scenario.value}.js" + ) + self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir) + + @reporter.step_deco("Wait until K6 is finished") + def wait_until_finished(self, timeout: int = 0, k6_should_be_running: bool = False) -> None: + wait_interval = 10 + if self._k6_process is None: + assert "No k6 instances were executed" + if k6_should_be_running: + assert self._k6_process.running(), "k6 should be running." + while timeout >= 0: + if not self._k6_process.running(): + return + logger.info(f"K6 is running. Waiting {wait_interval} seconds...") + if timeout > 0: + sleep(wait_interval) + timeout -= wait_interval + self._stop() + raise TimeoutError(f"Expected K6 finished in {timeout} sec.") + + def get_results(self) -> Any: + with reporter.step(f"K6 results from {self.load_node}"): + self.__log_output() + + if not self.summary_json: + return None + + summary_text = self.shell.exec(f"cat {self.summary_json}").stdout + summary_json = json.loads(summary_text) + + allure_filenames = { + K6ProcessAllocationStrategy.PER_LOAD_NODE: f"{self.load_node}_{self.scenario.value}_summary.json", + K6ProcessAllocationStrategy.PER_ENDPOINT: f"{self.load_node}_{self.scenario.value}_{self.endpoints[0]}_summary.json", + } + allure_filename = allure_filenames[self.load_params.k6_process_allocation_strategy] + + reporter.attach(summary_text, allure_filename) + return summary_json + + @reporter.step_deco("Assert K6 should be finished") + def _k6_should_be_finished(self) -> None: + k6_rc = self._k6_process.rc() + assert k6_rc == 0, f"K6 unexpectedly finished with RC {k6_rc}" + + @reporter.step_deco("Terminate K6 on initiator") + def stop(self) -> None: + if not self.is_running: + self.get_results() + raise AssertionError("K6 unexpectedly finished") + + self._stop() + + k6_rc = self._k6_process.rc() + assert k6_rc == EXIT_RESULT_CODE, f"Return code of K6 job should be 0, but {k6_rc}" + + @property + def is_running(self) -> bool: + if self._k6_process: + return self._k6_process.running() + return False + + @reporter.step_deco("Try to stop K6 with SIGTERM") + def _stop(self) -> None: + self._k6_process.stop() + with reporter.step("Wait until process end"): + for _ in range(self._k6_stop_attempts): + if not self._k6_process.running(): + break + + sleep(self._k6_stop_check_interval) + else: + raise AssertionError("Can not stop K6 process within timeout") + + def _kill(self) -> None: + self._k6_process.kill() + + def __log_output(self) -> None: + reporter.attach(self._k6_process.stdout(full=True), "K6 stdout") + reporter.attach(self._k6_process.stderr(full=True), "K6 stderr") diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py new file mode 100644 index 0000000..fd2fdef --- /dev/null +++ b/src/frostfs_testlib/load/load_config.py @@ -0,0 +1,211 @@ +import os +from dataclasses import dataclass, field +from enum import Enum +from typing import Optional + + +class LoadType(Enum): + gRPC = "grpc" + S3 = "s3" + HTTP = "http" + + +class LoadScenario(Enum): + gRPC = "grpc" + gRPC_CAR = "grpc_car" + S3 = "s3" + S3_CAR = "s3_car" + HTTP = "http" + VERIFY = "verify" + + +all_load_scenarios = [ + LoadScenario.gRPC, + LoadScenario.S3, + LoadScenario.HTTP, + LoadScenario.S3_CAR, + LoadScenario.gRPC_CAR, +] +all_scenarios = all_load_scenarios.copy() + [LoadScenario.VERIFY] + +constant_vus_scenarios = [LoadScenario.gRPC, LoadScenario.S3, LoadScenario.HTTP] +constant_arrival_rate_scenarios = [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR] + +grpc_preset_scenarios = [LoadScenario.gRPC, LoadScenario.HTTP, LoadScenario.gRPC_CAR] +s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR] + + +def metadata_field( + applicable_scenarios: list[LoadScenario], + preset_param: Optional[str] = None, + scenario_variable: Optional[str] = None, + distributed: Optional[bool] = False, +): + return field( + default=None, + metadata={ + "applicable_scenarios": applicable_scenarios, + "preset_argument": preset_param, + "env_variable": scenario_variable, + "distributed": distributed, + }, + ) + + +class NodesSelectionStrategy(Enum): + # Select ONE random node from cluster nodes. + RANDOM_SINGLE = "RANDOM_SINGLE" + # Select All nodes. + ALL = "ALL" + # Select All nodes except node under test (useful for failover). This is DEFAULT one + ALL_EXCEPT_UNDER_TEST = "ALL_EXCEPT_UNDER_TEST" + # Select ONE random node except under test (useful for failover). + RANDOM_SINGLE_EXCEPT_UNDER_TEST = "RANDOM_SINGLE_EXCEPT_UNDER_TEST" + + +class EndpointSelectionStrategy(Enum): + """Enum which defines which endpoint to select from each storage node""" + + # Select All endpoints. + ALL = "ALL" + # Select first endpoint from node + FIRST = "FIRST" + + +class K6ProcessAllocationStrategy(Enum): + """Enum which defines how K6 processes should be allocated""" + + # Each load node will get one k6 process with all endpoints (Default) + PER_LOAD_NODE = "PER_LOAD_NODE" + # Each endpoint will get it's own k6 process regardless of number of load nodes. + # If there is not enough load nodes, some nodes may have multiple k6 processes + PER_ENDPOINT = "PER_ENDPOINT" + + +@dataclass +class Preset: + # ------ COMMON ------ + # Amount of objects which should be created + objects_count: Optional[int] = metadata_field(all_load_scenarios, "preload_obj", None) + # Preset json. Filled automatically. + pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON") + # Workers count for preset + workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None) + + # ------ GRPC ------ + # Amount of containers which should be created + containers_count: Optional[int] = metadata_field(grpc_preset_scenarios, "containers", None) + # Container placement policy for containers for gRPC + container_placement_policy: Optional[str] = metadata_field( + grpc_preset_scenarios, "policy", None + ) + + # ------ S3 ------ + # Amount of buckets which should be created + buckets_count: Optional[int] = metadata_field(s3_preset_scenarios, "buckets", None) + # S3 region (AKA placement policy for S3 buckets) + s3_location: Optional[str] = metadata_field(s3_preset_scenarios, "location", None) + + +@dataclass +class LoadParams: + # ------- CONTROL PARAMS ------- + # Load type can be gRPC, HTTP, S3. + load_type: LoadType + # Load scenario from k6 scenarios + scenario: Optional[LoadScenario] = None + # Strategy to select nodes under load. See NodesSelectionStrategy class for more details. + # default is ALL_EXCEPT_UNDER_TEST + nodes_selection_strategy: Optional[NodesSelectionStrategy] = None + # Strategy which defines which endpoint to select from each storage node + endpoint_selection_strategy: Optional[EndpointSelectionStrategy] = None + # Strategy which defines how K6 processes should be allocated + k6_process_allocation_strategy: Optional[K6ProcessAllocationStrategy] = None + # Set to true in order to verify uploaded objects after K6 load finish. Default is True. + verify: Optional[bool] = None + # Just id for load so distinct it between runs. Filled automatically. + load_id: Optional[str] = None + # Working directory + working_dir: Optional[str] = None + # Preset for the k6 run + preset: Optional[Preset] = None + + # ------- COMMON SCENARIO PARAMS ------- + # Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value. + load_time: Optional[int] = metadata_field(all_load_scenarios, None, "DURATION") + # Object size in KB for load and preset. + object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE") + # Output registry K6 file. Filled automatically. + registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE") + # Specifies the minimum duration of every single execution (i.e. iteration). + # Any iterations that are shorter than this value will cause that VU to + # sleep for the remainder of the time until the specified minimum duration is reached. + min_iteration_duration: Optional[str] = metadata_field( + all_load_scenarios, None, "K6_MIN_ITERATION_DURATION" + ) + + # ------- CONSTANT VUS SCENARIO PARAMS ------- + # Amount of Writers VU. + writers: Optional[int] = metadata_field(constant_vus_scenarios, None, "WRITERS", True) + # Amount of Readers VU. + readers: Optional[int] = metadata_field(constant_vus_scenarios, None, "READERS", True) + # Amount of Deleters VU. + deleters: Optional[int] = metadata_field(constant_vus_scenarios, None, "DELETERS", True) + + # ------- CONSTANT ARRIVAL RATE SCENARIO PARAMS ------- + # Number of iterations to start during each timeUnit period for write. + write_rate: Optional[int] = metadata_field( + constant_arrival_rate_scenarios, None, "WRITE_RATE", True + ) + + # Number of iterations to start during each timeUnit period for read. + read_rate: Optional[int] = metadata_field( + constant_arrival_rate_scenarios, None, "READ_RATE", True + ) + + # Number of iterations to start during each timeUnit period for delete. + delete_rate: Optional[int] = metadata_field( + constant_arrival_rate_scenarios, None, "DELETE_RATE", True + ) + + # Amount of preAllocatedVUs for write operations. + preallocated_writers: Optional[int] = metadata_field( + constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True + ) + # Amount of maxVUs for write operations. + max_writers: Optional[int] = metadata_field( + constant_arrival_rate_scenarios, None, "MAX_WRITERS", True + ) + + # Amount of preAllocatedVUs for read operations. + preallocated_readers: Optional[int] = metadata_field( + constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True + ) + # Amount of maxVUs for read operations. + max_readers: Optional[int] = metadata_field( + constant_arrival_rate_scenarios, None, "MAX_READERS", True + ) + + # Amount of preAllocatedVUs for read operations. + preallocated_deleters: Optional[int] = metadata_field( + constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True + ) + # Amount of maxVUs for delete operations. + max_deleters: Optional[int] = metadata_field( + constant_arrival_rate_scenarios, None, "MAX_DELETERS", True + ) + + # Period of time to apply the rate value. + time_unit: Optional[str] = metadata_field(constant_arrival_rate_scenarios, None, "TIME_UNIT") + + # ------- VERIFY SCENARIO PARAMS ------- + # Maximum verification time for k6 to verify objects. Default is BACKGROUND_LOAD_MAX_VERIFY_TIME (3600). + verify_time: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "TIME_LIMIT") + # Amount of Verification VU. + clients: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "CLIENTS") + + def set_id(self, load_id): + self.load_id = load_id + self.registry_file = os.path.join(self.working_dir, f"{load_id}_registry.bolt") + if self.preset: + self.preset.pregen_json = os.path.join(self.working_dir, f"{load_id}_prepare.json") diff --git a/src/frostfs_testlib/load/load_metrics.py b/src/frostfs_testlib/load/load_metrics.py new file mode 100644 index 0000000..50d7b38 --- /dev/null +++ b/src/frostfs_testlib/load/load_metrics.py @@ -0,0 +1,162 @@ +from abc import ABC +from typing import Any + +from frostfs_testlib.load.load_config import LoadScenario + + +class MetricsBase(ABC): + _WRITE_SUCCESS = "" + _WRITE_ERRORS = "" + _WRITE_THROUGHPUT = "data_sent" + + _READ_SUCCESS = "" + _READ_ERRORS = "" + _READ_THROUGHPUT = "data_received" + + _DELETE_SUCCESS = "" + _DELETE_ERRORS = "" + + def __init__(self, summary) -> None: + self.summary = summary + self.metrics = summary["metrics"] + + @property + def write_total_iterations(self) -> int: + return self._get_metric(self._WRITE_SUCCESS) + self._get_metric(self._WRITE_ERRORS) + + @property + def write_success_iterations(self) -> int: + return self._get_metric(self._WRITE_SUCCESS) + + @property + def write_rate(self) -> float: + return self._get_metric_rate(self._WRITE_SUCCESS) + + @property + def write_failed_iterations(self) -> int: + return self._get_metric(self._WRITE_ERRORS) + + @property + def write_throughput(self) -> float: + return self._get_metric_rate(self._WRITE_THROUGHPUT) + + @property + def read_total_iterations(self) -> int: + return self._get_metric(self._READ_SUCCESS) + self._get_metric(self._READ_ERRORS) + + @property + def read_success_iterations(self) -> int: + return self._get_metric(self._READ_SUCCESS) + + @property + def read_rate(self) -> int: + return self._get_metric_rate(self._READ_SUCCESS) + + @property + def read_failed_iterations(self) -> int: + return self._get_metric(self._READ_ERRORS) + + @property + def read_throughput(self) -> float: + return self._get_metric_rate(self._READ_THROUGHPUT) + + @property + def delete_total_iterations(self) -> int: + return self._get_metric(self._DELETE_SUCCESS) + self._get_metric(self._DELETE_ERRORS) + + @property + def delete_success_iterations(self) -> int: + return self._get_metric(self._DELETE_SUCCESS) + + @property + def delete_failed_iterations(self) -> int: + return self._get_metric(self._DELETE_ERRORS) + + @property + def delete_rate(self) -> int: + return self._get_metric_rate(self._DELETE_SUCCESS) + + def _get_metric(self, metric: str) -> int: + metrics_method_map = {"counter": self._get_counter_metric, "gauge": self._get_gauge_metric} + + if metric not in self.metrics: + return 0 + + metric = self.metrics[metric] + metric_type = metric["type"] + if metric_type not in metrics_method_map: + raise Exception( + f"Unsupported metric type: {metric_type}, supported: {metrics_method_map.keys()}" + ) + + return metrics_method_map[metric_type](metric) + + def _get_metric_rate(self, metric: str) -> int: + metrics_method_map = {"counter": self._get_counter_metric_rate} + + if metric not in self.metrics: + return 0 + + metric = self.metrics[metric] + metric_type = metric["type"] + if metric_type not in metrics_method_map: + raise Exception( + f"Unsupported rate metric type: {metric_type}, supported: {metrics_method_map.keys()}" + ) + + return metrics_method_map[metric_type](metric) + + def _get_counter_metric_rate(self, metric: str) -> int: + return metric["values"]["rate"] + + def _get_counter_metric(self, metric: str) -> int: + return metric["values"]["count"] + + def _get_gauge_metric(self, metric: str) -> int: + return metric["values"]["value"] + + +class GrpcMetrics(MetricsBase): + _WRITE_SUCCESS = "frostfs_obj_put_total" + _WRITE_ERRORS = "frostfs_obj_put_fails" + + _READ_SUCCESS = "frostfs_obj_get_total" + _READ_ERRORS = "frostfs_obj_get_fails" + + _DELETE_SUCCESS = "frostfs_obj_delete_total" + _DELETE_ERRORS = "frostfs_obj_delete_fails" + + +class S3Metrics(MetricsBase): + _WRITE_SUCCESS = "aws_obj_put_total" + _WRITE_ERRORS = "aws_obj_put_fails" + + _READ_SUCCESS = "aws_obj_get_total" + _READ_ERRORS = "aws_obj_get_fails" + + _DELETE_SUCCESS = "aws_obj_delete_total" + _DELETE_ERRORS = "aws_obj_delete_fails" + + +class VerifyMetrics(MetricsBase): + _WRITE_SUCCESS = "N/A" + _WRITE_ERRORS = "N/A" + + _READ_SUCCESS = "verified_obj" + _READ_ERRORS = "invalid_obj" + + _DELETE_SUCCESS = "N/A" + _DELETE_ERRORS = "N/A" + + +def get_metrics_object(load_type: LoadScenario, summary: dict[str, Any]) -> MetricsBase: + class_map = { + LoadScenario.gRPC: GrpcMetrics, + LoadScenario.gRPC_CAR: GrpcMetrics, + LoadScenario.HTTP: GrpcMetrics, + LoadScenario.S3: S3Metrics, + LoadScenario.S3_CAR: S3Metrics, + LoadScenario.VERIFY: VerifyMetrics, + } + + return class_map[load_type](summary) diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py new file mode 100644 index 0000000..500a6e6 --- /dev/null +++ b/src/frostfs_testlib/load/load_report.py @@ -0,0 +1,265 @@ +from datetime import datetime +from typing import Optional, Tuple + +import yaml + +from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario +from frostfs_testlib.load.load_metrics import get_metrics_object + + +class LoadReport: + def __init__(self, load_test) -> None: + self.load_test = load_test + self.load_summaries: Optional[dict] = None + self.load_params: Optional[LoadParams] = None + self.start_time: Optional[datetime] = None + self.end_time: Optional[datetime] = None + + def set_start_time(self): + self.start_time = datetime.utcnow() + + def set_end_time(self): + self.end_time = datetime.utcnow() + + def set_summaries(self, load_summaries: dict): + self.load_summaries = load_summaries + + def set_load_params(self, load_params: LoadParams): + self.load_params = load_params + + def get_report_html(self): + report_sections = [ + [self.load_test, self._get_load_params_section_html], + [self.load_summaries, self._get_totals_section_html], + [self.end_time, self._get_test_time_html], + ] + + html = "" + for section in report_sections: + if section[0] is not None: + html += section[1]() + + return html + + def _get_load_params_section_html(self) -> str: + params: str = yaml.safe_dump(self.load_test, sort_keys=False) + params = params.replace("\n", "
") + section_html = f"""

Scenario params

+ +
{params}
+
""" + + return section_html + + def _get_test_time_html(self) -> str: + html = f"""

Scenario duration in UTC time (from agent)

+ {self.start_time} - {self.end_time}
+
+ """ + + return html + + def _calc_unit(self, value: float, skip_units: int = 0) -> Tuple[float, str]: + units = ["B", "KB", "MB", "GB", "TB"] + + for unit in units[skip_units:]: + if value < 1024: + return value, unit + + value = value / 1024.0 + + return value, unit + + def _seconds_to_formatted_duration(self, seconds: int) -> str: + """Converts N number of seconds to formatted output ignoring zeroes. + Examples: + 186399 -> "2d3h46m39s" + 86399 -> "23h59m59s" + 86399 -> "23h59m59s" + 3605 -> "1h5s" + 123 -> "2m3s" + """ + units = {"d": 86400, "h": 3600, "m": 60, "s": 1} + parts = [] + remaining = seconds + for divisor in units.values(): + part = remaining // divisor + remaining -= divisor * part + parts.append(part) + + return "".join([f"{val}{unit}" for unit, val in zip(units, parts) if val > 0]) + + def _row(self, caption: str, value: str) -> str: + return f"{caption}{value}" + + def _get_model_string(self): + if self.load_params.min_iteration_duration is not None: + return f"min_iteration_duration={self.load_params.min_iteration_duration}" + + model_map = { + LoadScenario.gRPC: "closed model", + LoadScenario.S3: "closed model", + LoadScenario.HTTP: "closed model", + LoadScenario.gRPC_CAR: "open model", + LoadScenario.S3_CAR: "open model", + } + + return model_map[self.load_params.scenario] + + def _get_oprations_sub_section_html( + self, + operation_type: str, + total_operations: int, + requested_rate_str: str, + vus_str: str, + total_rate: float, + throughput: float, + errors: dict[str, int], + ): + throughput_html = "" + if throughput > 0: + throughput, unit = self._calc_unit(throughput) + throughput_html = self._row("Throughput", f"{throughput:.2f} {unit}/sec") + + per_node_errors_html = "" + total_errors = 0 + if errors: + total_errors: int = 0 + for node_key, errors in errors.items(): + total_errors += errors + if ( + self.load_params.k6_process_allocation_strategy + == K6ProcessAllocationStrategy.PER_ENDPOINT + ): + per_node_errors_html += self._row(f"At {node_key}", errors) + + object_size, object_size_unit = self._calc_unit(self.load_params.object_size, 1) + duration = self._seconds_to_formatted_duration(self.load_params.load_time) + model = self._get_model_string() + # write 8KB 15h49m 50op/sec 50th open model/closed model/min_iteration duration=1s - 1.636MB/s 199.57451/s + short_summary = f"{operation_type} {object_size}{object_size_unit} {duration} {requested_rate_str} {vus_str} {model} - {throughput:.2f}{unit} {total_rate:.2f}/s" + + html = f""" + + + + {self._row("Total operations", total_operations)} + {self._row("OP/sec", f"{total_rate:.2f}")} + {throughput_html} + + + {per_node_errors_html} + {self._row("Total", f"{total_errors} ({total_errors/total_operations*100.0:.2f}%)")} +
{short_summary}MetricsErrors


+ """ + + return html + + def _get_totals_section_html(self): + + html = "

Load Results

" + + write_operations = 0 + write_op_sec = 0 + write_throughput = 0 + write_errors = {} + requested_write_rate = self.load_params.write_rate + requested_write_rate_str = f"{requested_write_rate}op/sec" if requested_write_rate else "" + + read_operations = 0 + read_op_sec = 0 + read_throughput = 0 + read_errors = {} + requested_read_rate = self.load_params.read_rate + requested_read_rate_str = f"{requested_read_rate}op/sec" if requested_read_rate else "" + + delete_operations = 0 + delete_op_sec = 0 + delete_errors = {} + requested_delete_rate = self.load_params.delete_rate + requested_delete_rate_str = ( + f"{requested_delete_rate}op/sec" if requested_delete_rate else "" + ) + + if self.load_params.scenario in [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]: + delete_vus = max( + self.load_params.preallocated_deleters or 0, self.load_params.max_deleters or 0 + ) + write_vus = max( + self.load_params.preallocated_writers or 0, self.load_params.max_writers or 0 + ) + read_vus = max( + self.load_params.preallocated_readers or 0, self.load_params.max_readers or 0 + ) + else: + write_vus = self.load_params.writers + read_vus = self.load_params.readers + delete_vus = self.load_params.deleters + + write_vus_str = f"{write_vus}th" + read_vus_str = f"{read_vus}th" + delete_vus_str = f"{delete_vus}th" + + write_section_required = False + read_section_required = False + delete_section_required = False + + for node_key, load_summary in self.load_summaries.items(): + metrics = get_metrics_object(self.load_params.scenario, load_summary) + write_operations += metrics.write_total_iterations + if write_operations: + write_section_required = True + write_op_sec += metrics.write_rate + write_throughput += metrics.write_throughput + if metrics.write_failed_iterations: + write_errors[node_key] = metrics.write_failed_iterations + + read_operations += metrics.read_total_iterations + if read_operations: + read_section_required = True + read_op_sec += metrics.read_rate + read_throughput += metrics.read_throughput + if metrics.read_failed_iterations: + read_errors[node_key] = metrics.read_failed_iterations + + delete_operations += metrics.delete_total_iterations + if delete_operations: + delete_section_required = True + delete_op_sec += metrics.delete_rate + if metrics.delete_failed_iterations: + delete_errors[node_key] = metrics.delete_failed_iterations + + if write_section_required: + html += self._get_oprations_sub_section_html( + "Write", + write_operations, + requested_write_rate_str, + write_vus_str, + write_op_sec, + write_throughput, + write_errors, + ) + + if read_section_required: + html += self._get_oprations_sub_section_html( + "Read", + read_operations, + requested_read_rate_str, + read_vus_str, + read_op_sec, + read_throughput, + read_errors, + ) + + if delete_section_required: + html += self._get_oprations_sub_section_html( + "Delete", + delete_operations, + requested_delete_rate_str, + delete_vus_str, + delete_op_sec, + 0, + delete_errors, + ) + + return html diff --git a/src/frostfs_testlib/load/load_steps.py b/src/frostfs_testlib/load/load_steps.py new file mode 100644 index 0000000..5d935aa --- /dev/null +++ b/src/frostfs_testlib/load/load_steps.py @@ -0,0 +1,184 @@ +import copy +import itertools +import math +import re +from dataclasses import fields + +from frostfs_testlib.cli import FrostfsAuthmate +from frostfs_testlib.load.k6 import K6 +from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC +from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_VUS_COUNT_DIVISOR +from frostfs_testlib.shell import CommandOptions, SSHShell +from frostfs_testlib.shell.interfaces import InteractiveInput, SshCredentials +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo + +reporter = get_reporter() + +STOPPED_HOSTS = [] + + +@reporter.step_deco("Init s3 client on load nodes") +def init_s3_client( + load_nodes: list[str], + load_params: LoadParams, + k6_directory: str, + ssh_credentials: SshCredentials, + nodes_under_load: list[ClusterNode], + wallet: WalletInfo, +): + storage_node = nodes_under_load[0].service(StorageNode) + s3_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in nodes_under_load] + grpc_peer = storage_node.get_rpc_endpoint() + + for load_node in load_nodes: + ssh_client = _get_ssh_client(ssh_credentials, load_node) + frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(ssh_client, FROSTFS_AUTHMATE_EXEC) + issue_secret_output = frostfs_authmate_exec.secret.issue( + wallet=wallet.path, + peer=grpc_peer, + bearer_rules=f"{k6_directory}/scenarios/files/rules.json", + gate_public_key=s3_public_keys, + container_placement_policy=load_params.preset.container_placement_policy, + container_policy=f"{k6_directory}/scenarios/files/policy.json", + wallet_password=wallet.password, + ).stdout + aws_access_key_id = str( + re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group( + "aws_access_key_id" + ) + ) + aws_secret_access_key = str( + re.search( + r"secret_access_key.*:\s.(?P\w*)", issue_secret_output + ).group("aws_secret_access_key") + ) + # prompt_pattern doesn't work at the moment + configure_input = [ + InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id), + InteractiveInput( + prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key + ), + InteractiveInput(prompt_pattern=r".*", input=""), + InteractiveInput(prompt_pattern=r".*", input=""), + ] + ssh_client.exec("aws configure", CommandOptions(interactive_inputs=configure_input)) + + +@reporter.step_deco("Prepare K6 instances and objects") +def prepare_k6_instances( + load_nodes: list[str], + ssh_credentials: SshCredentials, + k6_dir: str, + load_params: LoadParams, + endpoints: list[str], + loaders_wallet: WalletInfo, +) -> list[K6]: + k6_load_objects: list[K6] = [] + nodes = itertools.cycle(load_nodes) + + k6_distribution_count = { + K6ProcessAllocationStrategy.PER_LOAD_NODE: len(load_nodes), + K6ProcessAllocationStrategy.PER_ENDPOINT: len(endpoints), + } + endpoints_generators = { + K6ProcessAllocationStrategy.PER_LOAD_NODE: itertools.cycle([endpoints]), + K6ProcessAllocationStrategy.PER_ENDPOINT: itertools.cycle( + [[endpoint] for endpoint in endpoints] + ), + } + k6_processes_count = k6_distribution_count[load_params.k6_process_allocation_strategy] + endpoints_gen = endpoints_generators[load_params.k6_process_allocation_strategy] + + distributed_load_params_list = _get_distributed_load_params_list( + load_params, k6_processes_count + ) + + for distributed_load_params in distributed_load_params_list: + load_node = next(nodes) + ssh_client = _get_ssh_client(ssh_credentials, load_node) + k6_load_object = K6( + distributed_load_params, + next(endpoints_gen), + k6_dir, + ssh_client, + load_node, + loaders_wallet, + ) + k6_load_objects.append(k6_load_object) + if load_params.preset: + k6_load_object.preset() + + return k6_load_objects + + +def _get_ssh_client(ssh_credentials: SshCredentials, load_node: str): + ssh_client = SSHShell( + host=load_node, + login=ssh_credentials.ssh_login, + password=ssh_credentials.ssh_password, + private_key_path=ssh_credentials.ssh_key_path, + private_key_passphrase=ssh_credentials.ssh_key_passphrase, + ) + + return ssh_client + + +def _get_distributed_load_params_list( + original_load_params: LoadParams, workers_count: int +) -> list[LoadParams]: + divisor = int(BACKGROUND_LOAD_VUS_COUNT_DIVISOR) + distributed_load_params: list[LoadParams] = [] + + for i in range(workers_count): + load_params = copy.deepcopy(original_load_params) + # Append #i here in case if multiple k6 processes goes into same load node + load_params.set_id(f"{load_params.load_id}_{i}") + distributed_load_params.append(load_params) + + load_fields = fields(original_load_params) + + for field in load_fields: + if ( + field.metadata + and original_load_params.scenario in field.metadata["applicable_scenarios"] + and field.metadata["distributed"] + and getattr(original_load_params, field.name) is not None + ): + original_value = getattr(original_load_params, field.name) + distribution = _get_distribution(math.ceil(original_value / divisor), workers_count) + for i in range(workers_count): + setattr(distributed_load_params[i], field.name, distribution[i]) + + return distributed_load_params + + +def _get_distribution(clients_count: int, workers_count: int) -> list[int]: + """ + This function will distribute evenly as possible X clients to Y workers. + For example if we have 150 readers (clients) and we want to spread it over 4 load nodes (workers) + this will return [38, 38, 37, 37]. + + Args: + clients_count: amount of things needs to be distributed. + workers_count: amount of workers. + + Returns: + list of distribution. + """ + if workers_count < 1: + raise Exception("Workers cannot be less then 1") + + # Amount of guaranteed payload on one worker + clients_per_worker = clients_count // workers_count + # Remainder of clients left to be distributed + remainder = clients_count - clients_per_worker * workers_count + + distribution = [ + clients_per_worker + 1 if i < remainder else clients_per_worker + for i in range(workers_count) + ] + return distribution diff --git a/src/frostfs_testlib/load/load_verifiers.py b/src/frostfs_testlib/load/load_verifiers.py new file mode 100644 index 0000000..becfcf7 --- /dev/null +++ b/src/frostfs_testlib/load/load_verifiers.py @@ -0,0 +1,36 @@ +import logging + +from frostfs_testlib.load.load_config import LoadParams, LoadScenario +from frostfs_testlib.load.load_metrics import get_metrics_object + +logger = logging.getLogger("NeoLogger") + + +class LoadVerifier: + def __init__(self, load_params: LoadParams) -> None: + self.load_params = load_params + + def verify_summaries(self, load_summary, verification_summary) -> None: + if not verification_summary or not load_summary: + logger.info("Can't check load results due to missing summary") + + load_metrics = get_metrics_object(self.load_params.scenario, load_summary) + writers = self.load_params.writers or 0 + + objects_count = load_metrics.write_success_iterations + fails_count = load_metrics.write_failed_iterations + + if writers > 0: + assert objects_count > 0, "Total put objects should be greater than 0" + assert fails_count == 0, f"There were {fails_count} failed put objects operations" + + if verification_summary: + verify_metrics = get_metrics_object(LoadScenario.VERIFY, verification_summary) + verified_objects = verify_metrics.read_success_iterations + invalid_objects = verify_metrics.read_failed_iterations + + assert invalid_objects == 0, f"There were {invalid_objects} verification fails" + # Due to interruptions we may see total verified objects to be less than written on writers count + assert ( + abs(objects_count - verified_objects) <= writers + ), f"Verified objects is less than total objects. Total: {objects_count}, Verified: {verified_objects}. Writers: {writers}." diff --git a/src/frostfs_testlib/processes/remote_process.py b/src/frostfs_testlib/processes/remote_process.py new file mode 100644 index 0000000..c5b40bc --- /dev/null +++ b/src/frostfs_testlib/processes/remote_process.py @@ -0,0 +1,197 @@ +from __future__ import annotations + +import os +import uuid +from typing import Optional + +from tenacity import retry +from tenacity.stop import stop_after_attempt +from tenacity.wait import wait_fixed + +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.shell import Shell +from frostfs_testlib.shell.interfaces import CommandOptions + +reporter = get_reporter() + + +class RemoteProcess: + def __init__(self, cmd: str, process_dir: str, shell: Shell): + self.process_dir = process_dir + self.cmd = cmd + self.stdout_last_line_number = 0 + self.stderr_last_line_number = 0 + self.pid: Optional[str] = None + self.proc_rc: Optional[int] = None + self.saved_stdout: Optional[str] = None + self.saved_stderr: Optional[str] = None + self.shell = shell + + @classmethod + @reporter.step_deco("Create remote process") + def create(cls, command: str, shell: Shell, working_dir: str = "/tmp") -> RemoteProcess: + """ + Create a process on a remote host. + + Created dir for process with following files: + command.sh: script to execute + pid: contains process id + rc: contains script return code + stderr: contains script errors + stdout: contains script output + + Args: + shell: Shell instance + command: command to be run on a remote host + working_dir: working directory for the process + + Returns: + RemoteProcess instance for further examination + """ + remote_process = cls( + cmd=command, process_dir=os.path.join(working_dir, f"proc_{uuid.uuid4()}"), shell=shell + ) + remote_process._create_process_dir() + remote_process._generate_command_script(command) + remote_process._start_process() + remote_process.pid = remote_process._get_pid() + return remote_process + + @reporter.step_deco("Get process stdout") + def stdout(self, full: bool = False) -> str: + """ + Method to get process stdout, either fresh info or full. + + Args: + full: returns full stdout that we have to this moment + + Returns: + Fresh stdout. By means of stdout_last_line_number only new stdout lines are returned. + If process is finished (proc_rc is not None) saved stdout is returned + """ + if self.saved_stdout is not None: + cur_stdout = self.saved_stdout + else: + terminal = self.shell.exec(f"cat {self.process_dir}/stdout") + if self.proc_rc is not None: + self.saved_stdout = terminal.stdout + cur_stdout = terminal.stdout + + if full: + return cur_stdout + whole_stdout = cur_stdout.split("\n") + if len(whole_stdout) > self.stdout_last_line_number: + resulted_stdout = "\n".join(whole_stdout[self.stdout_last_line_number :]) + self.stdout_last_line_number = len(whole_stdout) + return resulted_stdout + return "" + + @reporter.step_deco("Get process stderr") + def stderr(self, full: bool = False) -> str: + """ + Method to get process stderr, either fresh info or full. + + Args: + full: returns full stderr that we have to this moment + + Returns: + Fresh stderr. By means of stderr_last_line_number only new stderr lines are returned. + If process is finished (proc_rc is not None) saved stderr is returned + """ + if self.saved_stderr is not None: + cur_stderr = self.saved_stderr + else: + terminal = self.shell.exec(f"cat {self.process_dir}/stderr") + if self.proc_rc is not None: + self.saved_stderr = terminal.stdout + cur_stderr = terminal.stdout + if full: + return cur_stderr + whole_stderr = cur_stderr.split("\n") + if len(whole_stderr) > self.stderr_last_line_number: + resulted_stderr = "\n".join(whole_stderr[self.stderr_last_line_number :]) + self.stderr_last_line_number = len(whole_stderr) + return resulted_stderr + return "" + + @reporter.step_deco("Get process rc") + def rc(self) -> Optional[int]: + if self.proc_rc is not None: + return self.proc_rc + + terminal = self.shell.exec(f"cat {self.process_dir}/rc", CommandOptions(check=False)) + if "No such file or directory" in terminal.stderr: + return None + elif terminal.stderr or terminal.return_code != 0: + raise AssertionError(f"cat process rc was not successful: {terminal.stderr}") + + self.proc_rc = int(terminal.stdout) + return self.proc_rc + + @reporter.step_deco("Check if process is running") + def running(self) -> bool: + return self.rc() is None + + @reporter.step_deco("Send signal to process") + def send_signal(self, signal: int) -> None: + kill_res = self.shell.exec(f"kill -{signal} {self.pid}", CommandOptions(check=False)) + if "No such process" in kill_res.stderr: + return + if kill_res.return_code: + raise AssertionError( + f"Signal {signal} not sent. Return code of kill: {kill_res.return_code}" + ) + + @reporter.step_deco("Stop process") + def stop(self) -> None: + self.send_signal(15) + + @reporter.step_deco("Kill process") + def kill(self) -> None: + self.send_signal(9) + + @reporter.step_deco("Clear process directory") + def clear(self) -> None: + if self.process_dir == "/": + raise AssertionError(f"Invalid path to delete: {self.process_dir}") + self.shell.exec(f"rm -rf {self.process_dir}") + + @reporter.step_deco("Start remote process") + def _start_process(self) -> None: + self.shell.exec( + f"nohup {self.process_dir}/command.sh {self.process_dir}/stdout " + f"2>{self.process_dir}/stderr &" + ) + + @reporter.step_deco("Create process directory") + def _create_process_dir(self) -> None: + self.shell.exec(f"mkdir {self.process_dir}") + self.shell.exec(f"chmod 777 {self.process_dir}") + terminal = self.shell.exec(f"realpath {self.process_dir}") + self.process_dir = terminal.stdout.strip() + + @reporter.step_deco("Get pid") + @retry(wait=wait_fixed(10), stop=stop_after_attempt(5), reraise=True) + def _get_pid(self) -> str: + terminal = self.shell.exec(f"cat {self.process_dir}/pid") + assert terminal.stdout, f"invalid pid: {terminal.stdout}" + return terminal.stdout.strip() + + @reporter.step_deco("Generate command script") + def _generate_command_script(self, command: str) -> None: + command = command.replace('"', '\\"').replace("\\", "\\\\") + script = ( + f"#!/bin/bash\n" + f"cd {self.process_dir}\n" + f"{command} &\n" + f"pid=\$!\n" + f"cd {self.process_dir}\n" + f"echo \$pid > {self.process_dir}/pid\n" + f"wait \$pid\n" + f"echo $? > {self.process_dir}/rc" + ) + + self.shell.exec(f'echo "{script}" > {self.process_dir}/command.sh') + self.shell.exec(f"cat {self.process_dir}/command.sh") + self.shell.exec(f"chmod +x {self.process_dir}/command.sh") diff --git a/src/frostfs_testlib/reporter/allure_handler.py b/src/frostfs_testlib/reporter/allure_handler.py index 92a295a..8e00b26 100644 --- a/src/frostfs_testlib/reporter/allure_handler.py +++ b/src/frostfs_testlib/reporter/allure_handler.py @@ -1,7 +1,7 @@ import os from contextlib import AbstractContextManager from textwrap import shorten -from typing import Any +from typing import Any, Callable import allure from allure import attachment_type @@ -16,6 +16,9 @@ class AllureHandler(ReporterHandler): name = shorten(name, width=70, placeholder="...") return allure.step(name) + def step_decorator(self, name: str) -> Callable: + return allure.step(name) + def attach(self, body: Any, file_name: str) -> None: attachment_name, extension = os.path.splitext(file_name) attachment_type = self._resolve_attachment_type(extension) diff --git a/src/frostfs_testlib/reporter/interfaces.py b/src/frostfs_testlib/reporter/interfaces.py index f2f6ce4..b47a3fb 100644 --- a/src/frostfs_testlib/reporter/interfaces.py +++ b/src/frostfs_testlib/reporter/interfaces.py @@ -1,6 +1,6 @@ from abc import ABC, abstractmethod from contextlib import AbstractContextManager -from typing import Any +from typing import Any, Callable class ReporterHandler(ABC): @@ -17,6 +17,17 @@ class ReporterHandler(ABC): Step context. """ + @abstractmethod + def step_decorator(self, name: str) -> Callable: + """A step decorator from reporter. + + Args: + name: Name of the step. + + Returns: + decorator for the step + """ + @abstractmethod def attach(self, content: Any, file_name: str) -> None: """Attach specified content with given file name to the test report. diff --git a/src/frostfs_testlib/reporter/reporter.py b/src/frostfs_testlib/reporter/reporter.py index ea8330b..d1c75f5 100644 --- a/src/frostfs_testlib/reporter/reporter.py +++ b/src/frostfs_testlib/reporter/reporter.py @@ -1,6 +1,7 @@ from contextlib import AbstractContextManager, contextmanager +from functools import wraps from types import TracebackType -from typing import Any, Optional +from typing import Any, Callable, Optional from frostfs_testlib.plugins import load_plugin from frostfs_testlib.reporter.interfaces import ReporterHandler @@ -45,6 +46,32 @@ class Reporter: handler_class = load_plugin("frostfs.testlib.reporter", handler_config["plugin_name"]) self.register_handler(handler_class()) + def step_deco(self, name: str) -> Callable: + """Register a new step in test execution in a decorator fashion. + + To note: the actual decoration with handlers is happening during target function call time. + + Args: + name: Name of the step. + + Returns: + decorated function + """ + + def deco(func): + @wraps(func) + def wrapper(*a, **kw): + resulting_func = func + for handler in self.handlers: + decorator = handler.step_decorator(name) + resulting_func = decorator(resulting_func) + + return resulting_func(*a, **kw) + + return wrapper + + return deco + def step(self, name: str) -> AbstractContextManager: """Register a new step in test execution. diff --git a/src/frostfs_testlib/resources/cli.py b/src/frostfs_testlib/resources/cli.py new file mode 100644 index 0000000..5f7d468 --- /dev/null +++ b/src/frostfs_testlib/resources/cli.py @@ -0,0 +1,12 @@ +# Paths to CLI executables on machine that runs tests +import os + +NEOGO_EXECUTABLE = os.getenv("FROSTFS_EXECUTABLE", "neo-go") +FROSTFS_CLI_EXEC = os.getenv("FROSTFS_CLI_EXEC", "frostfs-cli") +FROSTFS_AUTHMATE_EXEC = os.getenv("FROSTFS_AUTHMATE_EXEC", "frostfs-s3-authmate") +FROSTFS_ADM_EXEC = os.getenv("FROSTFS_ADM_EXEC", "frostfs-adm") + +# Config for frostfs-adm utility. Optional if tests are running against devenv +FROSTFS_ADM_CONFIG_PATH = os.getenv("FROSTFS_ADM_CONFIG_PATH") + +CLI_DEFAULT_TIMEOUT = os.getenv("CLI_DEFAULT_TIMEOUT", None) diff --git a/src/frostfs_testlib/resources/common.py b/src/frostfs_testlib/resources/common.py index 7744c0c..47aa976 100644 --- a/src/frostfs_testlib/resources/common.py +++ b/src/frostfs_testlib/resources/common.py @@ -1,37 +1,46 @@ -# ACLs with final flag -PUBLIC_ACL_F = "1FBFBFFF" -PRIVATE_ACL_F = "1C8C8CCC" -READONLY_ACL_F = "1FBF8CFF" +import os -# ACLs without final flag set -PUBLIC_ACL = "0FBFBFFF" -INACCESSIBLE_ACL = "40000000" -STICKY_BIT_PUB_ACL = "3FFFFFFF" +import yaml -EACL_PUBLIC_READ_WRITE = "eacl-public-read-write" +CONTAINER_WAIT_INTERVAL = "1m" -# Regex patterns of status codes of Container service -CONTAINER_NOT_FOUND = "code = 3072.*message = container not found" +SIMPLE_OBJECT_SIZE = os.getenv("SIMPLE_OBJECT_SIZE", "1000") +COMPLEX_OBJECT_CHUNKS_COUNT = os.getenv("COMPLEX_OBJECT_CHUNKS_COUNT", "3") +COMPLEX_OBJECT_TAIL_SIZE = os.getenv("COMPLEX_OBJECT_TAIL_SIZE", "1000") +SERVICE_MAX_STARTUP_TIME = os.getenv("SERVICE_MAX_STARTUP_TIME", "5m") -# Regex patterns of status codes of Object service -MALFORMED_REQUEST = "code = 1024.*message = malformed request" -OBJECT_ACCESS_DENIED = "code = 2048.*message = access to object operation denied" -OBJECT_NOT_FOUND = "code = 2049.*message = object not found" -OBJECT_ALREADY_REMOVED = "code = 2052.*message = object already removed" -SESSION_NOT_FOUND = "code = 4096.*message = session token not found" -OUT_OF_RANGE = "code = 2053.*message = out of range" -EXPIRED_SESSION_TOKEN = "code = 4097.*message = expired session token" -# TODO: Due to https://github.com/nspcc-dev/neofs-node/issues/2092 we have to check only codes until fixed -# OBJECT_IS_LOCKED = "code = 2050.*message = object is locked" -# LOCK_NON_REGULAR_OBJECT = "code = 2051.*message = ..." will be available once 2092 is fixed -OBJECT_IS_LOCKED = "code = 2050" -LOCK_NON_REGULAR_OBJECT = "code = 2051" +MORPH_TIMEOUT = os.getenv("MORPH_BLOCK_TIME", "8s") +MORPH_BLOCK_TIME = os.getenv("MORPH_BLOCK_TIME", "1s") +FROSTFS_CONTRACT_CACHE_TIMEOUT = os.getenv("FROSTFS_CONTRACT_CACHE_TIMEOUT", "30s") -LIFETIME_REQUIRED = "either expiration epoch of a lifetime is required" -LOCK_OBJECT_REMOVAL = "lock object removal" -LOCK_OBJECT_EXPIRATION = "lock object expiration: {expiration_epoch}; current: {current_epoch}" -INVALID_RANGE_ZERO_LENGTH = "invalid '{range}' range: zero length" -INVALID_RANGE_OVERFLOW = "invalid '{range}' range: uint64 overflow" -INVALID_OFFSET_SPECIFIER = "invalid '{range}' range offset specifier" -INVALID_LENGTH_SPECIFIER = "invalid '{range}' range length specifier" +# Time interval that allows a GC pass on storage node (this includes GC sleep interval +# of 1min plus 15 seconds for GC pass itself) +STORAGE_GC_TIME = os.getenv("STORAGE_GC_TIME", "75s") + +GAS_HASH = os.getenv("GAS_HASH", "0xd2a4cff31913016155e38e474a2c06d08be276cf") +FROSTFS_CONTRACT = os.getenv("FROSTFS_IR_CONTRACTS_FROSTFS") + +ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir") + +# Password of wallet owned by user on behalf of whom we are running tests +# Default wallet password is empty +DEFAULT_WALLET_PASS = os.getenv("WALLET_PASS", "") + +# Artificial delay that we add after object deletion and container creation +# Delay is added because sometimes immediately after deletion object still appears +# to be existing (probably because tombstone object takes some time to replicate) +# TODO: remove this wait +S3_SYNC_WAIT_TIME = 5 + +# Generate wallet config +# TODO: we should move all info about wallet configs to fixtures +DEFAULT_WALLET_CONFIG = os.path.join(os.getcwd(), "wallet_config.yml") +with open(DEFAULT_WALLET_CONFIG, "w") as file: + yaml.dump({"password": DEFAULT_WALLET_PASS}, file) + +# Number of attempts that S3 clients will attempt per each request (1 means single attempt +# without any retries) +MAX_REQUEST_ATTEMPTS = 1 +RETRY_MODE = "standard" +CREDENTIALS_CREATE_TIMEOUT = "1m" diff --git a/src/frostfs_testlib/resources/error_patterns.py b/src/frostfs_testlib/resources/error_patterns.py new file mode 100644 index 0000000..e2e4c48 --- /dev/null +++ b/src/frostfs_testlib/resources/error_patterns.py @@ -0,0 +1,28 @@ +# Regex patterns of status codes of Container service +CONTAINER_NOT_FOUND = "code = 3072.*message = container not found" + +# Regex patterns of status codes of Object service +MALFORMED_REQUEST = "code = 1024.*message = malformed request" +OBJECT_ACCESS_DENIED = "code = 2048.*message = access to object operation denied" +OBJECT_NOT_FOUND = "code = 2049.*message = object not found" +OBJECT_ALREADY_REMOVED = "code = 2052.*message = object already removed" +SESSION_NOT_FOUND = "code = 4096.*message = session token not found" +OUT_OF_RANGE = "code = 2053.*message = out of range" +EXPIRED_SESSION_TOKEN = "code = 4097.*message = expired session token" +# TODO: Change to codes with message +# OBJECT_IS_LOCKED = "code = 2050.*message = object is locked" +# LOCK_NON_REGULAR_OBJECT = "code = 2051.*message = ..." will be available once 2092 is fixed +OBJECT_IS_LOCKED = "code = 2050" +LOCK_NON_REGULAR_OBJECT = "code = 2051" + +LIFETIME_REQUIRED = "either expiration epoch of a lifetime is required" +LOCK_OBJECT_REMOVAL = "lock object removal" +LOCK_OBJECT_EXPIRATION = "lock object expiration: {expiration_epoch}; current: {current_epoch}" +INVALID_RANGE_ZERO_LENGTH = "invalid '{range}' range: zero length" +INVALID_RANGE_OVERFLOW = "invalid '{range}' range: uint64 overflow" +INVALID_OFFSET_SPECIFIER = "invalid '{range}' range offset specifier" +INVALID_LENGTH_SPECIFIER = "invalid '{range}' range length specifier" + +S3_MALFORMED_XML_REQUEST = ( + "The XML you provided was not well-formed or did not validate against our published schema." +) diff --git a/src/frostfs_testlib/resources/load_params.py b/src/frostfs_testlib/resources/load_params.py new file mode 100644 index 0000000..a43d20b --- /dev/null +++ b/src/frostfs_testlib/resources/load_params.py @@ -0,0 +1,30 @@ +import os + +# Background load node parameters +LOAD_NODES = os.getenv("LOAD_NODES", "").split() +# Must hardcode for now +LOAD_NODE_SSH_USER = os.getenv("LOAD_NODE_SSH_USER", "service") +LOAD_NODE_SSH_PASSWORD = os.getenv("LOAD_NODE_SSH_PASSWORD") +LOAD_NODE_SSH_PRIVATE_KEY_PATH = os.getenv("LOAD_NODE_SSH_PRIVATE_KEY_PATH") +LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE = os.getenv("LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE") +BACKGROUND_WRITERS_COUNT = os.getenv("BACKGROUND_WRITERS_COUNT", 4) +BACKGROUND_READERS_COUNT = os.getenv("BACKGROUND_READERS_COUNT", 4) +BACKGROUND_DELETERS_COUNT = os.getenv("BACKGROUND_DELETERS_COUNT", 0) +BACKGROUND_LOAD_DEFAULT_TIME = os.getenv("BACKGROUND_LOAD_DEFAULT_TIME", 600) +BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE = os.getenv("BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE", 32) + +# This will decrease load params for some weak environments +BACKGROUND_LOAD_VUS_COUNT_DIVISOR = os.getenv("BACKGROUND_LOAD_VUS_COUNT_DIVISOR", 1) + +# Wait for 1 hour for xk6 verify scenario by default (in practice means like "unlimited" time) +BACKGROUND_LOAD_MAX_VERIFY_TIME = os.getenv("BACKGROUND_LOAD_VERIFY_MAX_TIME", 3600) +BACKGROUND_LOAD_CONTAINER_PLACEMENT_POLICY = os.getenv( + "BACKGROUND_LOAD_CONTAINER_PLACEMENT_POLICY", "REP 2 IN X CBF 2 SELECT 2 FROM * AS X" +) +BACKGROUND_LOAD_S3_LOCATION = os.getenv("BACKGROUND_LOAD_S3_LOCATION", "node-off") +PRESET_CONTAINERS_COUNT = os.getenv("CONTAINERS_COUNT", "40") +# TODO: At lease one object is required due to bug in xk6 (buckets with no objects produce millions exceptions in read) +PRESET_OBJECTS_COUNT = os.getenv("OBJ_COUNT", "10") +K6_DIRECTORY = os.getenv("K6_DIRECTORY", "/etc/k6") +K6_TEARDOWN_PERIOD = os.getenv("K6_TEARDOWN_PERIOD", "30") +LOAD_CONFIG_YAML_PATH = os.getenv("LOAD_CONFIG_YAML_PATH", "load_config_yaml_file.yml") diff --git a/src/frostfs_testlib/resources/optionals.py b/src/frostfs_testlib/resources/optionals.py new file mode 100644 index 0000000..2a7ff22 --- /dev/null +++ b/src/frostfs_testlib/resources/optionals.py @@ -0,0 +1,26 @@ +import os + + +def str_to_bool(input: str) -> bool: + return input in ["true", "True", "1"] + + +# Override these optional params to not comment/modify code during local development. Use with caution. +# Node under test. Set this to occupy exact node. +OPTIONAL_NODE_UNDER_TEST = os.getenv("OPTIONAL_NODE_UNDER_TEST") + +# Node under load. Set this to target load on exact node. +OPTIONAL_NODE_UNDER_LOAD = os.getenv("OPTIONAL_NODE_UNDER_LOAD") + +# Set this to True to disable failover commands. I.E. node which supposed to be stopped will not be actually stopped. +OPTIONAL_FAILOVER_ENABLED = str_to_bool(os.getenv("OPTIONAL_FAILOVER_ENABLED", "true")) + +# Set this to True to disable background load. I.E. node which supposed to be stopped will not be actually stopped. +OPTIONAL_BACKGROUND_LOAD_ENABLED = str_to_bool( + os.getenv("OPTIONAL_BACKGROUND_LOAD_ENABLED", "true") +) + +# Set this to False for disable autouse fixture like node healthcheck during developing time. +OPTIONAL_AUTOUSE_FIXTURES_ENABLED = str_to_bool( + os.getenv("OPTIONAL_AUTOUSE_FIXTURES_ENABLED", "true") +) diff --git a/src/frostfs_testlib/resources/wellknown_acl.py b/src/frostfs_testlib/resources/wellknown_acl.py new file mode 100644 index 0000000..fe561b3 --- /dev/null +++ b/src/frostfs_testlib/resources/wellknown_acl.py @@ -0,0 +1,11 @@ +# ACLs with final flag +PUBLIC_ACL_F = "1FBFBFFF" +PRIVATE_ACL_F = "1C8C8CCC" +READONLY_ACL_F = "1FBF8CFF" + +# ACLs without final flag set +PUBLIC_ACL = "0FBFBFFF" +INACCESSIBLE_ACL = "40000000" +STICKY_BIT_PUB_ACL = "3FFFFFFF" + +EACL_PUBLIC_READ_WRITE = "eacl-public-read-write" diff --git a/src/frostfs_testlib/s3/__init__.py b/src/frostfs_testlib/s3/__init__.py new file mode 100644 index 0000000..32426c2 --- /dev/null +++ b/src/frostfs_testlib/s3/__init__.py @@ -0,0 +1,3 @@ +from frostfs_testlib.s3.aws_cli_client import AwsCliClient +from frostfs_testlib.s3.boto3_client import Boto3ClientWrapper +from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py new file mode 100644 index 0000000..054a1e8 --- /dev/null +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -0,0 +1,754 @@ +import json +import logging +import os +import uuid +from datetime import datetime +from time import sleep +from typing import Literal, Optional, Union + +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.common import ( + ASSETS_DIR, + MAX_REQUEST_ATTEMPTS, + RETRY_MODE, + S3_SYNC_WAIT_TIME, +) +from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict + +# TODO: Refactor this code to use shell instead of _cmd_run +from frostfs_testlib.utils.cli_utils import _cmd_run, _configure_aws_cli + +reporter = get_reporter() +logger = logging.getLogger("NeoLogger") +LONG_TIMEOUT = 240 + + +class AwsCliClient(S3ClientWrapper): + # Flags that we use for all S3 commands: disable SSL verification (as we use self-signed + # certificate in devenv) and disable automatic pagination in CLI output + common_flags = "--no-verify-ssl --no-paginate" + s3gate_endpoint: str + + @reporter.step_deco("Configure S3 client (aws cli)") + def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None: + self.s3gate_endpoint = s3gate_endpoint + try: + _configure_aws_cli("aws configure", access_key_id, secret_access_key) + _cmd_run(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS}") + _cmd_run(f"aws configure set retry_mode {RETRY_MODE}") + except Exception as err: + raise RuntimeError("Error while configuring AwsCliClient") from err + + @reporter.step_deco("Create bucket S3") + def create_bucket( + self, + bucket: Optional[str] = None, + object_lock_enabled_for_bucket: Optional[bool] = None, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + grant_full_control: Optional[str] = None, + location_constraint: Optional[str] = None, + ) -> str: + if bucket is None: + bucket = str(uuid.uuid4()) + + if object_lock_enabled_for_bucket is None: + object_lock = "" + elif object_lock_enabled_for_bucket: + object_lock = " --object-lock-enabled-for-bucket" + else: + object_lock = " --no-object-lock-enabled-for-bucket" + cmd = ( + f"aws {self.common_flags} s3api create-bucket --bucket {bucket} " + f"{object_lock} --endpoint {self.s3gate_endpoint}" + ) + if acl: + cmd += f" --acl {acl}" + if grant_full_control: + cmd += f" --grant-full-control {grant_full_control}" + if grant_write: + cmd += f" --grant-write {grant_write}" + if grant_read: + cmd += f" --grant-read {grant_read}" + if location_constraint: + cmd += f" --create-bucket-configuration LocationConstraint={location_constraint}" + _cmd_run(cmd) + sleep(S3_SYNC_WAIT_TIME) + + return bucket + + @reporter.step_deco("List buckets S3") + def list_buckets(self) -> list[str]: + cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {self.s3gate_endpoint}" + output = _cmd_run(cmd) + buckets_json = self._to_json(output) + return [bucket["Name"] for bucket in buckets_json["Buckets"]] + + @reporter.step_deco("Delete bucket S3") + def delete_bucket(self, bucket: str) -> None: + cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint}" + _cmd_run(cmd, LONG_TIMEOUT) + sleep(S3_SYNC_WAIT_TIME) + + @reporter.step_deco("Head bucket S3") + def head_bucket(self, bucket: str) -> None: + cmd = f"aws {self.common_flags} s3api head-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint}" + _cmd_run(cmd) + + @reporter.step_deco("Put bucket versioning status") + def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: + cmd = ( + f"aws {self.common_flags} s3api put-bucket-versioning --bucket {bucket} " + f"--versioning-configuration Status={status.value} " + f"--endpoint {self.s3gate_endpoint}" + ) + _cmd_run(cmd) + + @reporter.step_deco("Get bucket versioning status") + def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: + cmd = ( + f"aws {self.common_flags} s3api get-bucket-versioning --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + return response.get("Status") + + @reporter.step_deco("Put bucket tagging") + def put_bucket_tagging(self, bucket: str, tags: list) -> None: + tags_json = { + "TagSet": [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] + } + cmd = ( + f"aws {self.common_flags} s3api put-bucket-tagging --bucket {bucket} " + f"--tagging '{json.dumps(tags_json)}' --endpoint {self.s3gate_endpoint}" + ) + _cmd_run(cmd) + + @reporter.step_deco("Get bucket tagging") + def get_bucket_tagging(self, bucket: str) -> list: + cmd = ( + f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + return response.get("TagSet") + + @reporter.step_deco("Get bucket acl") + def get_bucket_acl(self, bucket: str) -> list: + cmd = ( + f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + return response.get("Grants") + + @reporter.step_deco("Get bucket location") + def get_bucket_location(self, bucket: str) -> dict: + cmd = ( + f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + return response.get("LocationConstraint") + + @reporter.step_deco("List objects S3") + def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + cmd = ( + f"aws {self.common_flags} s3api list-objects --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + + obj_list = [obj["Key"] for obj in response.get("Contents", [])] + logger.info(f"Found s3 objects: {obj_list}") + + return response if full_output else obj_list + + @reporter.step_deco("List objects S3 v2") + def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + cmd = ( + f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + + obj_list = [obj["Key"] for obj in response.get("Contents", [])] + logger.info(f"Found s3 objects: {obj_list}") + + return response if full_output else obj_list + + @reporter.step_deco("List objects versions S3") + def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: + cmd = ( + f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + return response if full_output else response.get("Versions", []) + + @reporter.step_deco("List objects delete markers S3") + def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: + cmd = ( + f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + return response if full_output else response.get("DeleteMarkers", []) + + @reporter.step_deco("Copy object S3") + def copy_object( + self, + source_bucket: str, + source_key: str, + bucket: Optional[str] = None, + key: Optional[str] = None, + acl: Optional[str] = None, + metadata_directive: Optional[Literal["COPY", "REPLACE"]] = None, + metadata: Optional[dict] = None, + tagging_directive: Optional[Literal["COPY", "REPLACE"]] = None, + tagging: Optional[str] = None, + ) -> str: + if bucket is None: + bucket = source_bucket + if key is None: + key = os.path.join(os.getcwd(), str(uuid.uuid4())) + copy_source = f"{source_bucket}/{source_key}" + + cmd = ( + f"aws {self.common_flags} s3api copy-object --copy-source {copy_source} " + f"--bucket {bucket} --key {key} --endpoint {self.s3gate_endpoint}" + ) + if acl: + cmd += f" --acl {acl}" + if metadata_directive: + cmd += f" --metadata-directive {metadata_directive}" + if metadata: + cmd += " --metadata " + for meta_key, value in metadata.items(): + cmd += f" {meta_key}={value}" + if tagging_directive: + cmd += f" --tagging-directive {tagging_directive}" + if tagging: + cmd += f" --tagging {tagging}" + _cmd_run(cmd, LONG_TIMEOUT) + return key + + @reporter.step_deco("Put object S3") + def put_object( + self, + bucket: str, + filepath: str, + key: Optional[str] = None, + metadata: Optional[dict] = None, + tagging: Optional[str] = None, + acl: Optional[str] = None, + object_lock_mode: Optional[str] = None, + object_lock_retain_until_date: Optional[datetime] = None, + object_lock_legal_hold_status: Optional[str] = None, + grant_full_control: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> str: + if key is None: + key = os.path.basename(filepath) + + cmd = ( + f"aws {self.common_flags} s3api put-object --bucket {bucket} --key {key} " + f"--body {filepath} --endpoint {self.s3gate_endpoint}" + ) + if metadata: + cmd += " --metadata" + for key, value in metadata.items(): + cmd += f" {key}={value}" + if tagging: + cmd += f" --tagging '{tagging}'" + if acl: + cmd += f" --acl {acl}" + if object_lock_mode: + cmd += f" --object-lock-mode {object_lock_mode}" + if object_lock_retain_until_date: + cmd += f' --object-lock-retain-until-date "{object_lock_retain_until_date}"' + if object_lock_legal_hold_status: + cmd += f" --object-lock-legal-hold-status {object_lock_legal_hold_status}" + if grant_full_control: + cmd += f" --grant-full-control '{grant_full_control}'" + if grant_read: + cmd += f" --grant-read {grant_read}" + output = _cmd_run(cmd, LONG_TIMEOUT) + response = self._to_json(output) + return response.get("VersionId") + + @reporter.step_deco("Head object S3") + def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: + version = f" --version-id {version_id}" if version_id else "" + cmd = ( + f"aws {self.common_flags} s3api head-object --bucket {bucket} --key {key} " + f"{version} --endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + return response + + @reporter.step_deco("Get object S3") + def get_object( + self, + bucket: str, + key: str, + version_id: Optional[str] = None, + object_range: Optional[tuple[int, int]] = None, + full_output: bool = False, + ) -> Union[dict, str]: + file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) + version = f" --version-id {version_id}" if version_id else "" + cmd = ( + f"aws {self.common_flags} s3api get-object --bucket {bucket} --key {key} " + f"{version} {file_path} --endpoint {self.s3gate_endpoint}" + ) + if object_range: + cmd += f" --range bytes={object_range[0]}-{object_range[1]}" + output = _cmd_run(cmd) + response = self._to_json(output) + return response if full_output else file_path + + @reporter.step_deco("Get object ACL") + def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: + version = f" --version-id {version_id}" if version_id else "" + cmd = ( + f"aws {self.common_flags} s3api get-object-acl --bucket {bucket} --key {key} " + f"{version} --endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + return response.get("Grants") + + @reporter.step_deco("Put object ACL") + def put_object_acl( + self, + bucket: str, + key: str, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> list: + cmd = ( + f"aws {self.common_flags} s3api put-object-acl --bucket {bucket} --key {key} " + f" --endpoint {self.s3gate_endpoint}" + ) + if acl: + cmd += f" --acl {acl}" + if grant_write: + cmd += f" --grant-write {grant_write}" + if grant_read: + cmd += f" --grant-read {grant_read}" + output = _cmd_run(cmd) + response = self._to_json(output) + return response.get("Grants") + + @reporter.step_deco("Put bucket ACL") + def put_bucket_acl( + self, + bucket: str, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> None: + cmd = ( + f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} " + f" --endpoint {self.s3gate_endpoint}" + ) + if acl: + cmd += f" --acl {acl}" + if grant_write: + cmd += f" --grant-write {grant_write}" + if grant_read: + cmd += f" --grant-read {grant_read}" + _cmd_run(cmd) + + @reporter.step_deco("Delete objects S3") + def delete_objects(self, bucket: str, keys: list[str]) -> dict: + file_path = os.path.join(os.getcwd(), ASSETS_DIR, "delete.json") + delete_structure = json.dumps(_make_objs_dict(keys)) + with open(file_path, "w") as out_file: + out_file.write(delete_structure) + logger.info(f"Input file for delete-objects: {delete_structure}") + + cmd = ( + f"aws {self.common_flags} s3api delete-objects --bucket {bucket} " + f"--delete file://{file_path} --endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd, LONG_TIMEOUT) + response = self._to_json(output) + sleep(S3_SYNC_WAIT_TIME) + return response + + @reporter.step_deco("Delete object S3") + def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: + version = f" --version-id {version_id}" if version_id else "" + cmd = ( + f"aws {self.common_flags} s3api delete-object --bucket {bucket} " + f"--key {key} {version} --endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd, LONG_TIMEOUT) + sleep(S3_SYNC_WAIT_TIME) + return self._to_json(output) + + @reporter.step_deco("Delete object versions S3") + def delete_object_versions(self, bucket: str, object_versions: list) -> dict: + # Build deletion list in S3 format + delete_list = { + "Objects": [ + { + "Key": object_version["Key"], + "VersionId": object_version["VersionId"], + } + for object_version in object_versions + ] + } + + file_path = os.path.join(os.getcwd(), ASSETS_DIR, "delete.json") + delete_structure = json.dumps(delete_list) + with open(file_path, "w") as out_file: + out_file.write(delete_structure) + logger.info(f"Input file for delete-objects: {delete_structure}") + + cmd = ( + f"aws {self.common_flags} s3api delete-objects --bucket {bucket} " + f"--delete file://{file_path} --endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd, LONG_TIMEOUT) + sleep(S3_SYNC_WAIT_TIME) + return self._to_json(output) + + @reporter.step_deco("Delete object versions S3 without delete markers") + def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: + # Delete objects without creating delete markers + for object_version in object_versions: + self.delete_object( + bucket=bucket, key=object_version["Key"], version_id=object_version["VersionId"] + ) + + @reporter.step_deco("Get object attributes") + def get_object_attributes( + self, + bucket: str, + key: str, + attributes: list[str], + version_id: str = "", + max_parts: int = 0, + part_number: int = 0, + full_output: bool = True, + ) -> dict: + + attrs = ",".join(attributes) + version = f" --version-id {version_id}" if version_id else "" + parts = f"--max-parts {max_parts}" if max_parts else "" + part_number_str = f"--part-number-marker {part_number}" if part_number else "" + cmd = ( + f"aws {self.common_flags} s3api get-object-attributes --bucket {bucket} " + f"--key {key} {version} {parts} {part_number_str} --object-attributes {attrs} " + f"--endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + + for attr in attributes: + assert attr in response, f"Expected attribute {attr} in {response}" + + if full_output: + return response + else: + return response.get(attributes[0]) + + @reporter.step_deco("Get bucket policy") + def get_bucket_policy(self, bucket: str) -> dict: + cmd = ( + f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + return response.get("Policy") + + @reporter.step_deco("Put bucket policy") + def put_bucket_policy(self, bucket: str, policy: dict) -> None: + # Leaving it as is was in test repo. Double dumps to escape resulting string + # Example: + # policy = {"a": 1} + # json.dumps(policy) => {"a": 1} + # json.dumps(json.dumps(policy)) => "{\"a\": 1}" + # TODO: update this + dumped_policy = json.dumps(json.dumps(policy)) + cmd = ( + f"aws {self.common_flags} s3api put-bucket-policy --bucket {bucket} " + f"--policy {dumped_policy} --endpoint {self.s3gate_endpoint}" + ) + _cmd_run(cmd) + + @reporter.step_deco("Get bucket cors") + def get_bucket_cors(self, bucket: str) -> dict: + cmd = ( + f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + return response.get("CORSRules") + + @reporter.step_deco("Put bucket cors") + def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: + cmd = ( + f"aws {self.common_flags} s3api put-bucket-cors --bucket {bucket} " + f"--cors-configuration '{json.dumps(cors_configuration)}' --endpoint {self.s3gate_endpoint}" + ) + _cmd_run(cmd) + + @reporter.step_deco("Delete bucket cors") + def delete_bucket_cors(self, bucket: str) -> None: + cmd = ( + f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint}" + ) + _cmd_run(cmd) + + @reporter.step_deco("Delete bucket tagging") + def delete_bucket_tagging(self, bucket: str) -> None: + cmd = ( + f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint}" + ) + _cmd_run(cmd) + + @reporter.step_deco("Put object retention") + def put_object_retention( + self, + bucket: str, + key: str, + retention: dict, + version_id: Optional[str] = None, + bypass_governance_retention: Optional[bool] = None, + ) -> None: + version = f" --version-id {version_id}" if version_id else "" + cmd = ( + f"aws {self.common_flags} s3api put-object-retention --bucket {bucket} --key {key} " + f"{version} --retention '{json.dumps(retention, indent=4, sort_keys=True, default=str)}' --endpoint {self.s3gate_endpoint}" + ) + if bypass_governance_retention is not None: + cmd += " --bypass-governance-retention" + _cmd_run(cmd) + + @reporter.step_deco("Put object legal hold") + def put_object_legal_hold( + self, + bucket: str, + key: str, + legal_hold_status: Literal["ON", "OFF"], + version_id: Optional[str] = None, + ) -> None: + version = f" --version-id {version_id}" if version_id else "" + legal_hold = json.dumps({"Status": legal_hold_status}) + cmd = ( + f"aws {self.common_flags} s3api put-object-legal-hold --bucket {bucket} --key {key} " + f"{version} --legal-hold '{legal_hold}' --endpoint {self.s3gate_endpoint}" + ) + _cmd_run(cmd) + + @reporter.step_deco("Put object tagging") + def put_object_tagging(self, bucket: str, key: str, tags: list) -> None: + tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] + tagging = {"TagSet": tags} + cmd = ( + f"aws {self.common_flags} s3api put-object-tagging --bucket {bucket} --key {key} " + f"--tagging '{json.dumps(tagging)}' --endpoint {self.s3gate_endpoint}" + ) + _cmd_run(cmd) + + @reporter.step_deco("Get object tagging") + def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: + version = f" --version-id {version_id}" if version_id else "" + cmd = ( + f"aws {self.common_flags} s3api get-object-tagging --bucket {bucket} --key {key} " + f"{version} --endpoint {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + return response.get("TagSet") + + @reporter.step_deco("Delete object tagging") + def delete_object_tagging(self, bucket: str, key: str) -> None: + cmd = ( + f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} " + f"--key {key} --endpoint {self.s3gate_endpoint}" + ) + _cmd_run(cmd) + + @reporter.step_deco("Sync directory S3") + def sync( + self, + bucket: str, + dir_path: str, + acl: Optional[str] = None, + metadata: Optional[dict] = None, + ) -> dict: + cmd = ( + f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " + f"--endpoint-url {self.s3gate_endpoint}" + ) + if metadata: + cmd += " --metadata" + for key, value in metadata.items(): + cmd += f" {key}={value}" + if acl: + cmd += f" --acl {acl}" + output = _cmd_run(cmd, LONG_TIMEOUT) + return self._to_json(output) + + @reporter.step_deco("CP directory S3") + def cp( + self, + bucket: str, + dir_path: str, + acl: Optional[str] = None, + metadata: Optional[dict] = None, + ) -> dict: + cmd = ( + f"aws {self.common_flags} s3 cp {dir_path} s3://{bucket} " + f"--endpoint-url {self.s3gate_endpoint} --recursive" + ) + if metadata: + cmd += " --metadata" + for key, value in metadata.items(): + cmd += f" {key}={value}" + if acl: + cmd += f" --acl {acl}" + output = _cmd_run(cmd, LONG_TIMEOUT) + return self._to_json(output) + + @reporter.step_deco("Create multipart upload S3") + def create_multipart_upload(self, bucket: str, key: str) -> str: + cmd = ( + f"aws {self.common_flags} s3api create-multipart-upload --bucket {bucket} " + f"--key {key} --endpoint-url {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + + assert response.get("UploadId"), f"Expected UploadId in response:\n{response}" + + return response["UploadId"] + + @reporter.step_deco("List multipart uploads S3") + def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: + cmd = ( + f"aws {self.common_flags} s3api list-multipart-uploads --bucket {bucket} " + f"--endpoint-url {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + return response.get("Uploads") + + @reporter.step_deco("Abort multipart upload S3") + def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: + cmd = ( + f"aws {self.common_flags} s3api abort-multipart-upload --bucket {bucket} " + f"--key {key} --upload-id {upload_id} --endpoint-url {self.s3gate_endpoint}" + ) + _cmd_run(cmd) + + @reporter.step_deco("Upload part S3") + def upload_part( + self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str + ) -> str: + cmd = ( + f"aws {self.common_flags} s3api upload-part --bucket {bucket} --key {key} " + f"--upload-id {upload_id} --part-number {part_num} --body {filepath} " + f"--endpoint-url {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd, LONG_TIMEOUT) + response = self._to_json(output) + assert response.get("ETag"), f"Expected ETag in response:\n{response}" + return response["ETag"] + + @reporter.step_deco("Upload copy part S3") + def upload_part_copy( + self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str + ) -> str: + cmd = ( + f"aws {self.common_flags} s3api upload-part-copy --bucket {bucket} --key {key} " + f"--upload-id {upload_id} --part-number {part_num} --copy-source {copy_source} " + f"--endpoint-url {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd, LONG_TIMEOUT) + response = self._to_json(output) + assert response.get("CopyPartResult", []).get( + "ETag" + ), f"Expected ETag in response:\n{response}" + + return response["CopyPartResult"]["ETag"] + + @reporter.step_deco("List parts S3") + def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: + cmd = ( + f"aws {self.common_flags} s3api list-parts --bucket {bucket} --key {key} " + f"--upload-id {upload_id} --endpoint-url {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + + assert response.get("Parts"), f"Expected Parts in response:\n{response}" + + return response["Parts"] + + @reporter.step_deco("Complete multipart upload S3") + def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: + file_path = os.path.join(os.getcwd(), ASSETS_DIR, "parts.json") + parts_dict = {"Parts": [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]} + + with open(file_path, "w") as out_file: + out_file.write(json.dumps(parts_dict)) + + logger.info(f"Input file for complete-multipart-upload: {json.dumps(parts_dict)}") + + cmd = ( + f"aws {self.common_flags} s3api complete-multipart-upload --bucket {bucket} " + f"--key {key} --upload-id {upload_id} --multipart-upload file://{file_path} " + f"--endpoint-url {self.s3gate_endpoint}" + ) + _cmd_run(cmd) + + @reporter.step_deco("Put object lock configuration") + def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: + cmd = ( + f"aws {self.common_flags} s3api put-object-lock-configuration --bucket {bucket} " + f"--object-lock-configuration '{json.dumps(configuration)}' --endpoint-url {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + return self._to_json(output) + + @reporter.step_deco("Get object lock configuration") + def get_object_lock_configuration(self, bucket: str): + cmd = ( + f"aws {self.common_flags} s3api get-object-lock-configuration --bucket {bucket} " + f"--endpoint-url {self.s3gate_endpoint}" + ) + output = _cmd_run(cmd) + response = self._to_json(output) + return response.get("ObjectLockConfiguration") + + @staticmethod + def _to_json(output: str) -> dict: + json_output = {} + if "{" not in output and "}" not in output: + logger.warning(f"Could not parse json from output {output}") + return json_output + + json_output = json.loads(output[output.index("{") :]) + + return json_output diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py new file mode 100644 index 0000000..07c693f --- /dev/null +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -0,0 +1,661 @@ +import json +import logging +import os +import uuid +from datetime import datetime +from functools import wraps +from time import sleep +from typing import Literal, Optional, Union + +import boto3 +import urllib3 +from botocore.config import Config +from botocore.exceptions import ClientError +from mypy_boto3_s3 import S3Client + +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.common import ( + ASSETS_DIR, + MAX_REQUEST_ATTEMPTS, + RETRY_MODE, + S3_SYNC_WAIT_TIME, +) +from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict +from frostfs_testlib.utils.cli_utils import log_command_execution + +reporter = get_reporter() +logger = logging.getLogger("NeoLogger") + +# Disable warnings on self-signed certificate which the +# boto library produces on requests to S3-gate in dev-env +urllib3.disable_warnings() + + +def report_error(func): + @wraps(func) + def deco(*a, **kw): + try: + return func(*a, **kw) + except ClientError as err: + log_command_execution("Result", str(err)) + raise + + return deco + + +class Boto3ClientWrapper(S3ClientWrapper): + @reporter.step_deco("Configure S3 client (boto3)") + @report_error + def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None: + session = boto3.Session() + config = Config( + retries={ + "max_attempts": MAX_REQUEST_ATTEMPTS, + "mode": RETRY_MODE, + } + ) + + self.boto3_client: S3Client = session.client( + service_name="s3", + aws_access_key_id=access_key_id, + aws_secret_access_key=secret_access_key, + config=config, + endpoint_url=s3gate_endpoint, + verify=False, + ) + + def _to_s3_param(self, param: str): + replacement_map = { + "Acl": "ACL", + "Cors": "CORS", + "_": "", + } + result = param.title() + for find, replace in replacement_map.items(): + result = result.replace(find, replace) + return result + + # BUCKET METHODS # + @reporter.step_deco("Create bucket S3") + @report_error + def create_bucket( + self, + bucket: Optional[str] = None, + object_lock_enabled_for_bucket: Optional[bool] = None, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + grant_full_control: Optional[str] = None, + location_constraint: Optional[str] = None, + ) -> str: + if bucket is None: + bucket = str(uuid.uuid4()) + + params = {"Bucket": bucket} + if object_lock_enabled_for_bucket is not None: + params.update({"ObjectLockEnabledForBucket": object_lock_enabled_for_bucket}) + if acl is not None: + params.update({"ACL": acl}) + elif grant_write or grant_read or grant_full_control: + if grant_write: + params.update({"GrantWrite": grant_write}) + elif grant_read: + params.update({"GrantRead": grant_read}) + elif grant_full_control: + params.update({"GrantFullControl": grant_full_control}) + if location_constraint: + params.update( + {"CreateBucketConfiguration": {"LocationConstraint": location_constraint}} + ) + + s3_bucket = self.boto3_client.create_bucket(**params) + log_command_execution(f"Created S3 bucket {bucket}", s3_bucket) + sleep(S3_SYNC_WAIT_TIME) + return bucket + + @reporter.step_deco("List buckets S3") + @report_error + def list_buckets(self) -> list[str]: + found_buckets = [] + + response = self.boto3_client.list_buckets() + log_command_execution("S3 List buckets result", response) + + for bucket in response["Buckets"]: + found_buckets.append(bucket["Name"]) + + return found_buckets + + @reporter.step_deco("Delete bucket S3") + @report_error + def delete_bucket(self, bucket: str) -> None: + response = self.boto3_client.delete_bucket(Bucket=bucket) + log_command_execution("S3 Delete bucket result", response) + sleep(S3_SYNC_WAIT_TIME) + + @reporter.step_deco("Head bucket S3") + @report_error + def head_bucket(self, bucket: str) -> None: + response = self.boto3_client.head_bucket(Bucket=bucket) + log_command_execution("S3 Head bucket result", response) + + @reporter.step_deco("Put bucket versioning status") + @report_error + def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: + response = self.boto3_client.put_bucket_versioning( + Bucket=bucket, VersioningConfiguration={"Status": status.value} + ) + log_command_execution("S3 Set bucket versioning to", response) + + @reporter.step_deco("Get bucket versioning status") + @report_error + def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: + response = self.boto3_client.get_bucket_versioning(Bucket=bucket) + status = response.get("Status") + log_command_execution("S3 Got bucket versioning status", response) + return status + + @reporter.step_deco("Put bucket tagging") + @report_error + def put_bucket_tagging(self, bucket: str, tags: list) -> None: + tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] + tagging = {"TagSet": tags} + response = self.boto3_client.put_bucket_tagging(Bucket=bucket, Tagging=tagging) + log_command_execution("S3 Put bucket tagging", response) + + @reporter.step_deco("Get bucket tagging") + @report_error + def get_bucket_tagging(self, bucket: str) -> list: + response = self.boto3_client.get_bucket_tagging(Bucket=bucket) + log_command_execution("S3 Get bucket tagging", response) + return response.get("TagSet") + + @reporter.step_deco("Get bucket acl") + @report_error + def get_bucket_acl(self, bucket: str) -> list: + response = self.boto3_client.get_bucket_acl(Bucket=bucket) + log_command_execution("S3 Get bucket acl", response) + return response.get("Grants") + + @reporter.step_deco("Delete bucket tagging") + @report_error + def delete_bucket_tagging(self, bucket: str) -> None: + response = self.boto3_client.delete_bucket_tagging(Bucket=bucket) + log_command_execution("S3 Delete bucket tagging", response) + + @reporter.step_deco("Put bucket ACL") + @report_error + def put_bucket_acl( + self, + bucket: str, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> None: + params = { + self._to_s3_param(param): value + for param, value in locals().items() + if param not in ["self"] and value is not None + } + response = self.boto3_client.put_bucket_acl(**params) + log_command_execution("S3 ACL bucket result", response) + + @reporter.step_deco("Put object lock configuration") + @report_error + def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: + response = self.boto3_client.put_object_lock_configuration( + Bucket=bucket, ObjectLockConfiguration=configuration + ) + log_command_execution("S3 put_object_lock_configuration result", response) + return response + + @reporter.step_deco("Get object lock configuration") + @report_error + def get_object_lock_configuration(self, bucket: str) -> dict: + response = self.boto3_client.get_object_lock_configuration(Bucket=bucket) + log_command_execution("S3 get_object_lock_configuration result", response) + return response.get("ObjectLockConfiguration") + + @reporter.step_deco("Get bucket policy") + @report_error + def get_bucket_policy(self, bucket: str) -> str: + response = self.boto3_client.get_bucket_policy(Bucket=bucket) + log_command_execution("S3 get_bucket_policy result", response) + return response.get("Policy") + + @reporter.step_deco("Put bucket policy") + @report_error + def put_bucket_policy(self, bucket: str, policy: dict) -> None: + response = self.boto3_client.put_bucket_policy(Bucket=bucket, Policy=json.dumps(policy)) + log_command_execution("S3 put_bucket_policy result", response) + return response + + @reporter.step_deco("Get bucket cors") + @report_error + def get_bucket_cors(self, bucket: str) -> dict: + response = self.boto3_client.get_bucket_cors(Bucket=bucket) + log_command_execution("S3 get_bucket_cors result", response) + return response.get("CORSRules") + + @reporter.step_deco("Get bucket location") + @report_error + def get_bucket_location(self, bucket: str) -> str: + response = self.boto3_client.get_bucket_location(Bucket=bucket) + log_command_execution("S3 get_bucket_location result", response) + return response.get("LocationConstraint") + + @reporter.step_deco("Put bucket cors") + @report_error + def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: + response = self.boto3_client.put_bucket_cors( + Bucket=bucket, CORSConfiguration=cors_configuration + ) + log_command_execution("S3 put_bucket_cors result", response) + return response + + @reporter.step_deco("Delete bucket cors") + @report_error + def delete_bucket_cors(self, bucket: str) -> None: + response = self.boto3_client.delete_bucket_cors(Bucket=bucket) + log_command_execution("S3 delete_bucket_cors result", response) + + # END OF BUCKET METHODS # + # OBJECT METHODS # + + @reporter.step_deco("List objects S3 v2") + @report_error + def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + response = self.boto3_client.list_objects_v2(Bucket=bucket) + log_command_execution("S3 v2 List objects result", response) + + obj_list = [obj["Key"] for obj in response.get("Contents", [])] + logger.info(f"Found s3 objects: {obj_list}") + + return response if full_output else obj_list + + @reporter.step_deco("List objects S3") + @report_error + def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + response = self.boto3_client.list_objects(Bucket=bucket) + log_command_execution("S3 List objects result", response) + + obj_list = [obj["Key"] for obj in response.get("Contents", [])] + logger.info(f"Found s3 objects: {obj_list}") + + return response if full_output else obj_list + + @reporter.step_deco("List objects versions S3") + @report_error + def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: + response = self.boto3_client.list_object_versions(Bucket=bucket) + log_command_execution("S3 List objects versions result", response) + return response if full_output else response.get("Versions", []) + + @reporter.step_deco("List objects delete markers S3") + @report_error + def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: + response = self.boto3_client.list_object_versions(Bucket=bucket) + log_command_execution("S3 List objects delete markers result", response) + return response if full_output else response.get("DeleteMarkers", []) + + @reporter.step_deco("Put object S3") + @report_error + def put_object( + self, + bucket: str, + filepath: str, + key: Optional[str] = None, + metadata: Optional[dict] = None, + tagging: Optional[str] = None, + acl: Optional[str] = None, + object_lock_mode: Optional[str] = None, + object_lock_retain_until_date: Optional[datetime] = None, + object_lock_legal_hold_status: Optional[str] = None, + grant_full_control: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> str: + if key is None: + key = os.path.basename(filepath) + + with open(filepath, "rb") as put_file: + body = put_file.read() + + params = { + self._to_s3_param(param): value + for param, value in locals().items() + if param not in ["self", "filepath", "put_file"] and value is not None + } + response = self.boto3_client.put_object(**params) + log_command_execution("S3 Put object result", response) + return response.get("VersionId") + + @reporter.step_deco("Head object S3") + @report_error + def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: + params = { + self._to_s3_param(param): value + for param, value in locals().items() + if param not in ["self"] and value is not None + } + response = self.boto3_client.head_object(**params) + log_command_execution("S3 Head object result", response) + return response + + @reporter.step_deco("Delete object S3") + @report_error + def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: + params = { + self._to_s3_param(param): value + for param, value in locals().items() + if param not in ["self"] and value is not None + } + response = self.boto3_client.delete_object(**params) + log_command_execution("S3 Delete object result", response) + sleep(S3_SYNC_WAIT_TIME) + return response + + @reporter.step_deco("Delete objects S3") + @report_error + def delete_objects(self, bucket: str, keys: list[str]) -> dict: + response = self.boto3_client.delete_objects(Bucket=bucket, Delete=_make_objs_dict(keys)) + log_command_execution("S3 Delete objects result", response) + assert ( + "Errors" not in response + ), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}' + sleep(S3_SYNC_WAIT_TIME) + return response + + @reporter.step_deco("Delete object versions S3") + @report_error + def delete_object_versions(self, bucket: str, object_versions: list) -> dict: + # Build deletion list in S3 format + delete_list = { + "Objects": [ + { + "Key": object_version["Key"], + "VersionId": object_version["VersionId"], + } + for object_version in object_versions + ] + } + response = self.boto3_client.delete_objects(Bucket=bucket, Delete=delete_list) + log_command_execution("S3 Delete objects result", response) + return response + + @reporter.step_deco("Delete object versions S3 without delete markers") + @report_error + def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: + # Delete objects without creating delete markers + for object_version in object_versions: + response = self.boto3_client.delete_object( + Bucket=bucket, Key=object_version["Key"], VersionId=object_version["VersionId"] + ) + log_command_execution("S3 Delete object result", response) + + @reporter.step_deco("Put object ACL") + @report_error + def put_object_acl( + self, + bucket: str, + key: str, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> list: + # pytest.skip("Method put_object_acl is not supported by boto3 client") + raise NotImplementedError("Unsupported for boto3 client") + + @reporter.step_deco("Get object ACL") + @report_error + def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: + params = { + self._to_s3_param(param): value + for param, value in locals().items() + if param not in ["self"] and value is not None + } + response = self.boto3_client.get_object_acl(**params) + log_command_execution("S3 ACL objects result", response) + return response.get("Grants") + + @reporter.step_deco("Copy object S3") + @report_error + def copy_object( + self, + source_bucket: str, + source_key: str, + bucket: Optional[str] = None, + key: Optional[str] = None, + acl: Optional[str] = None, + metadata_directive: Optional[Literal["COPY", "REPLACE"]] = None, + metadata: Optional[dict] = None, + tagging_directive: Optional[Literal["COPY", "REPLACE"]] = None, + tagging: Optional[str] = None, + ) -> str: + if bucket is None: + bucket = source_bucket + if key is None: + key = os.path.join(os.getcwd(), str(uuid.uuid4())) + copy_source = f"{source_bucket}/{source_key}" + + params = { + self._to_s3_param(param): value + for param, value in locals().items() + if param not in ["self", "source_bucket", "source_key"] and value is not None + } + response = self.boto3_client.copy_object(**params) + log_command_execution("S3 Copy objects result", response) + return key + + @reporter.step_deco("Get object S3") + @report_error + def get_object( + self, + bucket: str, + key: str, + version_id: Optional[str] = None, + object_range: Optional[tuple[int, int]] = None, + full_output: bool = False, + ) -> Union[dict, str]: + filename = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) + range_str = None + if object_range: + range_str = f"bytes={object_range[0]}-{object_range[1]}" + + params = { + self._to_s3_param(param): value + for param, value in {**locals(), **{"Range": range_str}}.items() + if param not in ["self", "object_range", "full_output", "range_str", "filename"] + and value is not None + } + response = self.boto3_client.get_object(**params) + log_command_execution("S3 Get objects result", response) + + with open(f"{filename}", "wb") as get_file: + chunk = response["Body"].read(1024) + while chunk: + get_file.write(chunk) + chunk = response["Body"].read(1024) + return response if full_output else filename + + @reporter.step_deco("Create multipart upload S3") + @report_error + def create_multipart_upload(self, bucket: str, key: str) -> str: + response = self.boto3_client.create_multipart_upload(Bucket=bucket, Key=key) + log_command_execution("S3 Created multipart upload", response) + assert response.get("UploadId"), f"Expected UploadId in response:\n{response}" + + return response["UploadId"] + + @reporter.step_deco("List multipart uploads S3") + @report_error + def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: + response = self.boto3_client.list_multipart_uploads(Bucket=bucket) + log_command_execution("S3 List multipart upload", response) + + return response.get("Uploads") + + @reporter.step_deco("Abort multipart upload S3") + @report_error + def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: + response = self.boto3_client.abort_multipart_upload( + Bucket=bucket, Key=key, UploadId=upload_id + ) + log_command_execution("S3 Abort multipart upload", response) + + @reporter.step_deco("Upload part S3") + @report_error + def upload_part( + self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str + ) -> str: + with open(filepath, "rb") as put_file: + body = put_file.read() + + response = self.boto3_client.upload_part( + UploadId=upload_id, + Bucket=bucket, + Key=key, + PartNumber=part_num, + Body=body, + ) + log_command_execution("S3 Upload part", response) + assert response.get("ETag"), f"Expected ETag in response:\n{response}" + + return response["ETag"] + + @reporter.step_deco("Upload copy part S3") + @report_error + def upload_part_copy( + self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str + ) -> str: + response = self.boto3_client.upload_part_copy( + UploadId=upload_id, + Bucket=bucket, + Key=key, + PartNumber=part_num, + CopySource=copy_source, + ) + log_command_execution("S3 Upload copy part", response) + assert response.get("CopyPartResult", []).get( + "ETag" + ), f"Expected ETag in response:\n{response}" + + return response["CopyPartResult"]["ETag"] + + @reporter.step_deco("List parts S3") + @report_error + def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: + response = self.boto3_client.list_parts(UploadId=upload_id, Bucket=bucket, Key=key) + log_command_execution("S3 List part", response) + assert response.get("Parts"), f"Expected Parts in response:\n{response}" + + return response["Parts"] + + @reporter.step_deco("Complete multipart upload S3") + @report_error + def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: + parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts] + response = self.boto3_client.complete_multipart_upload( + Bucket=bucket, Key=key, UploadId=upload_id, MultipartUpload={"Parts": parts} + ) + log_command_execution("S3 Complete multipart upload", response) + + @reporter.step_deco("Put object retention") + @report_error + def put_object_retention( + self, + bucket: str, + key: str, + retention: dict, + version_id: Optional[str] = None, + bypass_governance_retention: Optional[bool] = None, + ) -> None: + params = { + self._to_s3_param(param): value + for param, value in locals().items() + if param not in ["self"] and value is not None + } + response = self.boto3_client.put_object_retention(**params) + log_command_execution("S3 Put object retention ", response) + + @reporter.step_deco("Put object legal hold") + @report_error + def put_object_legal_hold( + self, + bucket: str, + key: str, + legal_hold_status: Literal["ON", "OFF"], + version_id: Optional[str] = None, + ) -> None: + legal_hold = {"Status": legal_hold_status} + params = { + self._to_s3_param(param): value + for param, value in locals().items() + if param not in ["self", "legal_hold_status"] and value is not None + } + response = self.boto3_client.put_object_legal_hold(**params) + log_command_execution("S3 Put object legal hold ", response) + + @reporter.step_deco("Put object tagging") + @report_error + def put_object_tagging(self, bucket: str, key: str, tags: list) -> None: + tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] + tagging = {"TagSet": tags} + response = self.boto3_client.put_object_tagging(Bucket=bucket, Key=key, Tagging=tagging) + log_command_execution("S3 Put object tagging", response) + + @reporter.step_deco("Get object tagging") + @report_error + def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: + params = { + self._to_s3_param(param): value + for param, value in locals().items() + if param not in ["self"] and value is not None + } + response = self.boto3_client.get_object_tagging(**params) + log_command_execution("S3 Get object tagging", response) + return response.get("TagSet") + + @reporter.step_deco("Delete object tagging") + @report_error + def delete_object_tagging(self, bucket: str, key: str) -> None: + response = self.boto3_client.delete_object_tagging(Bucket=bucket, Key=key) + log_command_execution("S3 Delete object tagging", response) + + @reporter.step_deco("Get object attributes") + @report_error + def get_object_attributes( + self, + bucket: str, + key: str, + attributes: list[str], + version_id: Optional[str] = None, + max_parts: Optional[int] = None, + part_number: Optional[int] = None, + full_output: bool = True, + ) -> dict: + logger.warning("Method get_object_attributes is not supported by boto3 client") + return {} + + @reporter.step_deco("Sync directory S3") + @report_error + def sync( + self, + bucket: str, + dir_path: str, + acl: Optional[str] = None, + metadata: Optional[dict] = None, + ) -> dict: + raise NotImplementedError("Sync is not supported for boto3 client") + + @reporter.step_deco("CP directory S3") + @report_error + def cp( + self, + bucket: str, + dir_path: str, + acl: Optional[str] = None, + metadata: Optional[dict] = None, + ) -> dict: + raise NotImplementedError("Cp is not supported for boto3 client") + + # END OBJECT METHODS # diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py new file mode 100644 index 0000000..bd1379c --- /dev/null +++ b/src/frostfs_testlib/s3/interfaces.py @@ -0,0 +1,378 @@ +from abc import ABC, abstractmethod +from datetime import datetime +from enum import Enum +from typing import Literal, Optional, Union + + +def _make_objs_dict(key_names): + objs_list = [] + for key in key_names: + obj_dict = {"Key": key} + objs_list.append(obj_dict) + objs_dict = {"Objects": objs_list} + return objs_dict + + +class VersioningStatus(Enum): + ENABLED = "Enabled" + SUSPENDED = "Suspended" + + +ACL_COPY = [ + "private", + "public-read", + "public-read-write", + "authenticated-read", + "aws-exec-read", + "bucket-owner-read", + "bucket-owner-full-control", +] + + +class S3ClientWrapper(ABC): + @abstractmethod + def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None: + pass + + @abstractmethod + def create_bucket( + self, + bucket: Optional[str] = None, + object_lock_enabled_for_bucket: Optional[bool] = None, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + grant_full_control: Optional[str] = None, + location_constraint: Optional[str] = None, + ) -> str: + """Create a bucket.""" + + # BUCKET METHODS # + + @abstractmethod + def list_buckets(self) -> list[str]: + """List buckets.""" + + @abstractmethod + def delete_bucket(self, bucket: str) -> None: + """Delete bucket""" + + @abstractmethod + def head_bucket(self, bucket: str) -> None: + """This action is useful to determine if a bucket exists and you have permission to access it. + The action returns a 200 OK if the bucket exists and you have permission to access it. + + If the bucket does not exist or you do not have permission to access it, the HEAD request + returns a generic 400 Bad Request, 403 Forbidden or 404 Not Found code. + A message body is not included, so you cannot determine the exception beyond these error codes. + """ + + @abstractmethod + def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: + """Sets the versioning state of an existing bucket. + + You can set the versioning state with one of the following values: + + Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID. + + Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null. + + If the versioning state has never been set on a bucket, it has no versioning state + """ + + @abstractmethod + def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: + """Returns the versioning state of a bucket. + + To retrieve the versioning state of a bucket, you must be the bucket owner. + """ + + @abstractmethod + def put_bucket_tagging(self, bucket: str, tags: list) -> None: + """Sets the tags for a bucket.""" + + @abstractmethod + def get_bucket_tagging(self, bucket: str) -> list: + """Returns the tag set associated with the Outposts bucket.""" + + @abstractmethod + def delete_bucket_tagging(self, bucket: str) -> None: + """Deletes the tags from the bucket.""" + + @abstractmethod + def get_bucket_acl(self, bucket: str) -> list: + """This implementation of the GET action uses the acl subresource to return the access control list (ACL) of a bucket.""" + + @abstractmethod + def put_bucket_acl( + self, + bucket: str, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> list: + """Sets the permissions on an existing bucket using access control lists (ACL).""" + + @abstractmethod + def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: + """Places an Object Lock configuration on the specified bucket. + The rule specified in the Object Lock configuration will be applied by + default to every new object placed in the specified bucket.""" + + @abstractmethod + def get_object_lock_configuration(self, bucket: str) -> dict: + """Gets the Object Lock configuration for a bucket. + The rule specified in the Object Lock configuration will be applied by + default to every new object placed in the specified bucket.""" + + @abstractmethod + def get_bucket_policy(self, bucket: str) -> str: + """Returns the policy of a specified bucket.""" + + @abstractmethod + def put_bucket_policy(self, bucket: str, policy: dict) -> None: + """Applies S3 bucket policy to an S3 bucket.""" + + @abstractmethod + def get_bucket_cors(self, bucket: str) -> dict: + """Returns the Cross-Origin Resource Sharing (CORS) configuration information set for the bucket.""" + + @abstractmethod + def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: + """Sets the cors configuration for your bucket. If the configuration exists, S3 replaces it.""" + + @abstractmethod + def delete_bucket_cors(self, bucket: str) -> None: + """Deletes the cors configuration information set for the bucket.""" + + @abstractmethod + def get_bucket_location(self, bucket: str) -> str: + """Returns the LocationConstraint the bucket resides in. You can set the it + using the LocationConstraint request parameter in a CreateBucket request.""" + + # END OF BUCKET METHODS # + + # OBJECT METHODS # + + @abstractmethod + def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + """Returns some or all (up to 1,000) of the objects in a bucket with each request. + You can use the request parameters as selection criteria to return a subset of the objects in a bucket. + A 200 OK response can contain valid or invalid XML. Make sure to design your application + to parse the contents of the response and handle it appropriately. + """ + + @abstractmethod + def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + """Returns some or all (up to 1,000) of the objects in a bucket with each request. + You can use the request parameters as selection criteria to return a subset of the objects in a bucket. + A 200 OK response can contain valid or invalid XML. Make sure to design your application + to parse the contents of the response and handle it appropriately. + """ + + @abstractmethod + def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: + """Returns metadata about all versions of the objects in a bucket.""" + + @abstractmethod + def list_delete_markers(self, bucket: str, full_output: bool = False) -> dict: + """Returns metadata about all delete markers of the objects in a bucket.""" + + @abstractmethod + def put_object( + self, + bucket: str, + filepath: str, + key: Optional[str] = None, + metadata: Optional[dict] = None, + tagging: Optional[str] = None, + acl: Optional[str] = None, + object_lock_mode: Optional[str] = None, + object_lock_retain_until_date: Optional[datetime] = None, + object_lock_legal_hold_status: Optional[str] = None, + grant_full_control: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> str: + """Adds an object to a bucket.""" + + @abstractmethod + def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: + """The HEAD action retrieves metadata from an object without returning the object itself. + This action is useful if you're only interested in an object's metadata.""" + + @abstractmethod + def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: + """Removes the null version (if there is one) of an object and inserts a delete marker, + which becomes the latest version of the object. If there isn't a null version, + S3 does not remove any objects but will still respond that the command was successful.""" + + @abstractmethod + def delete_objects(self, bucket: str, keys: list[str]) -> dict: + """This action enables you to delete multiple objects from a bucket + using a single HTTP request. If you know the object keys that + you want to delete, then this action provides a suitable alternative + to sending individual delete requests, reducing per-request overhead. + + The request contains a list of up to 1000 keys that you want to delete.""" + + @abstractmethod + def delete_object_versions(self, bucket: str, object_versions: list) -> dict: + """Delete object versions""" + + @abstractmethod + def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: + """Delete object versions without delete markers""" + + @abstractmethod + def put_object_acl( + self, + bucket: str, + key: str, + acl: Optional[str] = None, + grant_write: Optional[str] = None, + grant_read: Optional[str] = None, + ) -> list: + """Uses the acl subresource to set the access control + list (ACL) permissions for a new or existing object in an S3 bucket.""" + + @abstractmethod + def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: + """Returns the access control list (ACL) of an object.""" + + @abstractmethod + def copy_object( + self, + source_bucket: str, + source_key: str, + bucket: Optional[str] = None, + key: Optional[str] = None, + acl: Optional[str] = None, + metadata_directive: Optional[Literal["COPY", "REPLACE"]] = None, + metadata: Optional[dict] = None, + tagging_directive: Optional[Literal["COPY", "REPLACE"]] = None, + tagging: Optional[str] = None, + ) -> str: + """Creates a copy of an object""" + + @abstractmethod + def get_object( + self, + bucket: str, + key: str, + version_id: Optional[str] = None, + object_range: Optional[tuple[int, int]] = None, + full_output: bool = False, + ) -> Union[dict, str]: + """Retrieves objects from S3.""" + + @abstractmethod + def create_multipart_upload(self, bucket: str, key: str) -> str: + """This action initiates a multipart upload and returns an upload ID. + This upload ID is used to associate all of the parts in the specific multipart upload. + You specify this upload ID in each of your subsequent upload part requests (see UploadPart). + You also include this upload ID in the final request to either complete or abort the multipart upload request.""" + + @abstractmethod + def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: + """This action lists in-progress multipart uploads. + An in-progress multipart upload is a multipart upload that has been initiated + using the Initiate Multipart Upload request, but has not yet been completed or aborted. + + This action returns at most 1,000 multipart uploads in the response.""" + + @abstractmethod + def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: + """This action aborts a multipart upload. After a multipart upload is aborted, + no additional parts can be uploaded using that upload ID. + The storage consumed by any previously uploaded parts will be freed. + However, if any part uploads are currently in progress, those part + uploads might or might not succeed. As a result, it might be necessary to + abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.""" + + @abstractmethod + def upload_part( + self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str + ) -> str: + """Uploads a part in a multipart upload.""" + + @abstractmethod + def upload_part_copy( + self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str + ) -> str: + """Uploads a part by copying data from an existing object as data source.""" + + @abstractmethod + def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: + """Lists the parts that have been uploaded for a specific multipart upload.""" + + @abstractmethod + def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: + """Completes a multipart upload by assembling previously uploaded parts.""" + + @abstractmethod + def put_object_retention( + self, + bucket: str, + key: str, + retention: dict, + version_id: Optional[str] = None, + bypass_governance_retention: Optional[bool] = None, + ) -> None: + """Places an Object Retention configuration on an object.""" + + @abstractmethod + def put_object_legal_hold( + self, + bucket: str, + key: str, + legal_hold_status: Literal["ON", "OFF"], + version_id: Optional[str] = None, + ) -> None: + """Applies a legal hold configuration to the specified object.""" + + @abstractmethod + def put_object_tagging(self, bucket: str, key: str, tags: list) -> None: + """Sets the tag-set for an object.""" + + @abstractmethod + def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: + """Returns the tag-set of an object.""" + + @abstractmethod + def delete_object_tagging(self, bucket: str, key: str) -> None: + """Removes the entire tag set from the specified object.""" + + @abstractmethod + def get_object_attributes( + self, + bucket: str, + key: str, + attributes: list[str], + version_id: str = "", + max_parts: int = 0, + part_number: int = 0, + full_output: bool = True, + ) -> dict: + """Retrieves all the metadata from an object without returning the object itself.""" + + @abstractmethod + def sync( + self, + bucket: str, + dir_path: str, + acl: Optional[str] = None, + metadata: Optional[dict] = None, + ) -> dict: + """sync directory TODO: Add proper description""" + + @abstractmethod + def cp( + self, + bucket: str, + dir_path: str, + acl: Optional[str] = None, + metadata: Optional[dict] = None, + ) -> dict: + """cp directory TODO: Add proper description""" + + # END OF OBJECT METHODS # diff --git a/src/frostfs_testlib/shell/interfaces.py b/src/frostfs_testlib/shell/interfaces.py index 4c87a78..219bc7c 100644 --- a/src/frostfs_testlib/shell/interfaces.py +++ b/src/frostfs_testlib/shell/interfaces.py @@ -60,6 +60,23 @@ class CommandOptions: self.timeout = Options.get_default_shell_timeout() +@dataclass +class SshCredentials: + """Represents ssh credentials. + + Attributes: + ssh_login: ssh login. + ssh_password: ssh password as plain text (unsecure, for local setup only). + ssh_key_path: path to a ssh key file. + ssh_key_passphrase: passphrase to ssh key file. + """ + + ssh_login: str + ssh_password: Optional[str] = None + ssh_key_path: Optional[str] = None + ssh_key_passphrase: Optional[str] = None + + @dataclass class CommandResult: """Represents a result of a command executed via shell. diff --git a/src/frostfs_testlib/shell/ssh_shell.py b/src/frostfs_testlib/shell/ssh_shell.py index 04d42ee..6ef3dfb 100644 --- a/src/frostfs_testlib/shell/ssh_shell.py +++ b/src/frostfs_testlib/shell/ssh_shell.py @@ -29,7 +29,7 @@ reporter = get_reporter() class HostIsNotAvailable(Exception): """Raised when host is not reachable via SSH connection.""" - def __init__(self, host: str = None): + def __init__(self, host: Optional[str] = None): msg = f"Host {host} is not available" super().__init__(msg) diff --git a/src/frostfs_testlib/steps/__init__.py b/src/frostfs_testlib/steps/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/frostfs_testlib/steps/acl.py b/src/frostfs_testlib/steps/acl.py new file mode 100644 index 0000000..0ef101b --- /dev/null +++ b/src/frostfs_testlib/steps/acl.py @@ -0,0 +1,191 @@ +import base64 +import json +import logging +import os +import uuid +from time import sleep +from typing import List, Optional, Union + +import base58 + +from frostfs_testlib.cli import FrostfsCli +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC +from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG +from frostfs_testlib.shell import Shell +from frostfs_testlib.storage.dataclasses.acl import ( + EACL_LIFETIME, + FROSTFS_CONTRACT_CACHE_TIMEOUT, + EACLPubKey, + EACLRole, + EACLRule, +) +from frostfs_testlib.utils import wallet_utils + +reporter = get_reporter() +logger = logging.getLogger("NeoLogger") + + +@reporter.step_deco("Get extended ACL") +def get_eacl(wallet_path: str, cid: str, shell: Shell, endpoint: str) -> Optional[str]: + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) + try: + result = cli.container.get_eacl(wallet=wallet_path, rpc_endpoint=endpoint, cid=cid) + except RuntimeError as exc: + logger.info("Extended ACL table is not set for this container") + logger.info(f"Got exception while getting eacl: {exc}") + return None + if "extended ACL table is not set for this container" in result.stdout: + return None + return result.stdout + + +@reporter.step_deco("Set extended ACL") +def set_eacl( + wallet_path: str, + cid: str, + eacl_table_path: str, + shell: Shell, + endpoint: str, + session_token: Optional[str] = None, +) -> None: + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) + cli.container.set_eacl( + wallet=wallet_path, + rpc_endpoint=endpoint, + cid=cid, + table=eacl_table_path, + await_mode=True, + session=session_token, + ) + + +def _encode_cid_for_eacl(cid: str) -> str: + cid_base58 = base58.b58decode(cid) + return base64.b64encode(cid_base58).decode("utf-8") + + +def create_eacl(cid: str, rules_list: List[EACLRule], shell: Shell) -> str: + table_file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"eacl_table_{str(uuid.uuid4())}.json") + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) + cli.acl.extended_create(cid=cid, out=table_file_path, rule=rules_list) + + with open(table_file_path, "r") as file: + table_data = file.read() + logger.info(f"Generated eACL:\n{table_data}") + + return table_file_path + + +def form_bearertoken_file( + wif: str, + cid: str, + eacl_rule_list: List[Union[EACLRule, EACLPubKey]], + shell: Shell, + endpoint: str, + sign: Optional[bool] = True, +) -> str: + """ + This function fetches eACL for given on behalf of , + then extends it with filters taken from , signs + with bearer token and writes to file + """ + enc_cid = _encode_cid_for_eacl(cid) if cid else None + file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) + + eacl = get_eacl(wif, cid, shell, endpoint) + json_eacl = dict() + if eacl: + eacl = eacl.replace("eACL: ", "").split("Signature")[0] + json_eacl = json.loads(eacl) + logger.info(json_eacl) + eacl_result = { + "body": { + "eaclTable": {"containerID": {"value": enc_cid} if cid else enc_cid, "records": []}, + "lifetime": {"exp": EACL_LIFETIME, "nbf": "1", "iat": "0"}, + } + } + + assert eacl_rules, "Got empty eacl_records list" + for rule in eacl_rule_list: + op_data = { + "operation": rule.operation.value.upper(), + "action": rule.access.value.upper(), + "filters": rule.filters or [], + "targets": [], + } + + if isinstance(rule.role, EACLRole): + op_data["targets"] = [{"role": rule.role.value.upper()}] + elif isinstance(rule.role, EACLPubKey): + op_data["targets"] = [{"keys": rule.role.keys}] + + eacl_result["body"]["eaclTable"]["records"].append(op_data) + + # Add records from current eACL + if "records" in json_eacl.keys(): + for record in json_eacl["records"]: + eacl_result["body"]["eaclTable"]["records"].append(record) + + with open(file_path, "w", encoding="utf-8") as eacl_file: + json.dump(eacl_result, eacl_file, ensure_ascii=False, indent=4) + + logger.info(f"Got these extended ACL records: {eacl_result}") + if sign: + sign_bearer( + shell=shell, + wallet_path=wif, + eacl_rules_file_from=file_path, + eacl_rules_file_to=file_path, + json=True, + ) + return file_path + + +def eacl_rules(access: str, verbs: list, user: str) -> list[str]: + """ + This function creates a list of eACL rules. + Args: + access (str): identifies if the following operation(s) + is allowed or denied + verbs (list): a list of operations to set rules for + user (str): a group of users (user/others) or a wallet of + a certain user for whom rules are set + Returns: + (list): a list of eACL rules + """ + if user not in ("others", "user"): + pubkey = wallet_utils.get_wallet_public_key(user, wallet_password="") + user = f"pubkey:{pubkey}" + + rules = [] + for verb in verbs: + rule = f"{access} {verb} {user}" + rules.append(rule) + return rules + + +def sign_bearer( + shell: Shell, wallet_path: str, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool +) -> None: + frostfscli = FrostfsCli( + shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG + ) + frostfscli.util.sign_bearer_token( + wallet=wallet_path, from_file=eacl_rules_file_from, to_file=eacl_rules_file_to, json=json + ) + + +@reporter.step_deco("Wait for eACL cache expired") +def wait_for_cache_expired(): + sleep(FROSTFS_CONTRACT_CACHE_TIMEOUT) + return + + +@reporter.step_deco("Return bearer token in base64 to caller") +def bearer_token_base64_from_file( + bearer_path: str, +) -> str: + with open(bearer_path, "rb") as file: + signed = file.read() + return base64.b64encode(signed).decode("utf-8") diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py new file mode 100644 index 0000000..89070c4 --- /dev/null +++ b/src/frostfs_testlib/steps/cli/container.py @@ -0,0 +1,359 @@ +import json +import logging +from dataclasses import dataclass +from time import sleep +from typing import Optional, Union + +from frostfs_testlib.cli import FrostfsCli +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC +from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG +from frostfs_testlib.shell import Shell +from frostfs_testlib.steps.cli.object import put_object, put_object_to_random_node +from frostfs_testlib.storage.cluster import Cluster +from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +from frostfs_testlib.utils import json_utils +from frostfs_testlib.utils.file_utils import generate_file, get_file_hash + +reporter = get_reporter() +logger = logging.getLogger("NeoLogger") + + +@dataclass +class StorageContainerInfo: + id: str + wallet_file: WalletInfo + + +class StorageContainer: + def __init__( + self, + storage_container_info: StorageContainerInfo, + shell: Shell, + cluster: Cluster, + ) -> None: + self.shell = shell + self.storage_container_info = storage_container_info + self.cluster = cluster + + def get_id(self) -> str: + return self.storage_container_info.id + + def get_wallet_path(self) -> str: + return self.storage_container_info.wallet_file.path + + def get_wallet_config_path(self) -> str: + return self.storage_container_info.wallet_file.config_path + + @reporter.step_deco("Generate new object and put in container") + def generate_object( + self, + size: int, + expire_at: Optional[int] = None, + bearer_token: Optional[str] = None, + endpoint: Optional[str] = None, + ) -> StorageObjectInfo: + with reporter.step(f"Generate object with size {size}"): + file_path = generate_file(size) + file_hash = get_file_hash(file_path) + + container_id = self.get_id() + wallet_path = self.get_wallet_path() + wallet_config = self.get_wallet_config_path() + with reporter.step(f"Put object with size {size} to container {container_id}"): + if endpoint: + object_id = put_object( + wallet=wallet_path, + path=file_path, + cid=container_id, + expire_at=expire_at, + shell=self.shell, + endpoint=endpoint, + bearer=bearer_token, + wallet_config=wallet_config, + ) + else: + object_id = put_object_to_random_node( + wallet=wallet_path, + path=file_path, + cid=container_id, + expire_at=expire_at, + shell=self.shell, + cluster=self.cluster, + bearer=bearer_token, + wallet_config=wallet_config, + ) + + storage_object = StorageObjectInfo( + container_id, + object_id, + size=size, + wallet_file_path=wallet_path, + file_path=file_path, + file_hash=file_hash, + ) + + return storage_object + + +DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X" +SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X" +REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X" + + +@reporter.step_deco("Create Container") +def create_container( + wallet: str, + shell: Shell, + endpoint: str, + rule: str = DEFAULT_PLACEMENT_RULE, + basic_acl: str = "", + attributes: Optional[dict] = None, + session_token: str = "", + session_wallet: str = "", + name: Optional[str] = None, + options: Optional[dict] = None, + await_mode: bool = True, + wait_for_creation: bool = True, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> str: + """ + A wrapper for `frostfs-cli container create` call. + + Args: + wallet (str): a wallet on whose behalf a container is created + rule (optional, str): placement rule for container + basic_acl (optional, str): an ACL for container, will be + appended to `--basic-acl` key + attributes (optional, dict): container attributes , will be + appended to `--attributes` key + session_token (optional, str): a path to session token file + session_wallet(optional, str): a path to the wallet which signed + the session token; this parameter makes sense + when paired with `session_token` + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + options (optional, dict): any other options to pass to the call + name (optional, str): container name attribute + await_mode (bool): block execution until container is persisted + wait_for_creation (): Wait for container shows in container list + timeout: Timeout for the operation. + + Returns: + (str): CID of the created container + """ + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) + result = cli.container.create( + rpc_endpoint=endpoint, + wallet=session_wallet if session_wallet else wallet, + policy=rule, + basic_acl=basic_acl, + attributes=attributes, + name=name, + session=session_token, + await_mode=await_mode, + timeout=timeout, + **options or {}, + ) + + cid = _parse_cid(result.stdout) + + logger.info("Container created; waiting until it is persisted in the sidechain") + + if wait_for_creation: + wait_for_container_creation(wallet, cid, shell, endpoint) + + return cid + + +def wait_for_container_creation( + wallet: str, cid: str, shell: Shell, endpoint: str, attempts: int = 15, sleep_interval: int = 1 +): + for _ in range(attempts): + containers = list_containers(wallet, shell, endpoint) + if cid in containers: + return + logger.info(f"There is no {cid} in {containers} yet; sleep {sleep_interval} and continue") + sleep(sleep_interval) + raise RuntimeError( + f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting" + ) + + +def wait_for_container_deletion( + wallet: str, cid: str, shell: Shell, endpoint: str, attempts: int = 30, sleep_interval: int = 1 +): + for _ in range(attempts): + try: + get_container(wallet, cid, shell=shell, endpoint=endpoint) + sleep(sleep_interval) + continue + except Exception as err: + if "container not found" not in str(err): + raise AssertionError(f'Expected "container not found" in error, got\n{err}') + return + raise AssertionError(f"Expected container deleted during {attempts * sleep_interval} sec.") + + +@reporter.step_deco("List Containers") +def list_containers( + wallet: str, shell: Shell, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT +) -> list[str]: + """ + A wrapper for `frostfs-cli container list` call. It returns all the + available containers for the given wallet. + Args: + wallet (str): a wallet on whose behalf we list the containers + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + timeout: Timeout for the operation. + Returns: + (list): list of containers + """ + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) + result = cli.container.list(rpc_endpoint=endpoint, wallet=wallet, timeout=timeout) + logger.info(f"Containers: \n{result}") + return result.stdout.split() + + +@reporter.step_deco("List Objects in container") +def list_objects( + wallet: str, + shell: Shell, + container_id: str, + endpoint: str, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> list[str]: + """ + A wrapper for `frostfs-cli container list-objects` call. It returns all the + available objects in container. + Args: + wallet (str): a wallet on whose behalf we list the containers objects + shell: executor for cli command + container_id: cid of container + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + timeout: Timeout for the operation. + Returns: + (list): list of containers + """ + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) + result = cli.container.list_objects( + rpc_endpoint=endpoint, wallet=wallet, cid=container_id, timeout=timeout + ) + logger.info(f"Container objects: \n{result}") + return result.stdout.split() + + +@reporter.step_deco("Get Container") +def get_container( + wallet: str, + cid: str, + shell: Shell, + endpoint: str, + json_mode: bool = True, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> Union[dict, str]: + """ + A wrapper for `frostfs-cli container get` call. It extracts container's + attributes and rearranges them into a more compact view. + Args: + wallet (str): path to a wallet on whose behalf we get the container + cid (str): ID of the container to get + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + json_mode (bool): return container in JSON format + timeout: Timeout for the operation. + Returns: + (dict, str): dict of container attributes + """ + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) + result = cli.container.get( + rpc_endpoint=endpoint, wallet=wallet, cid=cid, json_mode=json_mode, timeout=timeout + ) + + if not json_mode: + return result.stdout + + container_info = json.loads(result.stdout) + attributes = dict() + for attr in container_info["attributes"]: + attributes[attr["key"]] = attr["value"] + container_info["attributes"] = attributes + container_info["ownerID"] = json_utils.json_reencode(container_info["ownerID"]["value"]) + return container_info + + +@reporter.step_deco("Delete Container") +# TODO: make the error message about a non-found container more user-friendly +def delete_container( + wallet: str, + cid: str, + shell: Shell, + endpoint: str, + force: bool = False, + session_token: Optional[str] = None, + await_mode: bool = False, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> None: + """ + A wrapper for `frostfs-cli container delete` call. + Args: + wallet (str): path to a wallet on whose behalf we delete the container + cid (str): ID of the container to delete + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + force (bool): do not check whether container contains locks and remove immediately + session_token: a path to session token file + timeout: Timeout for the operation. + This function doesn't return anything. + """ + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) + cli.container.delete( + wallet=wallet, + cid=cid, + rpc_endpoint=endpoint, + force=force, + session=session_token, + await_mode=await_mode, + timeout=timeout, + ) + + +def _parse_cid(output: str) -> str: + """ + Parses container ID from a given CLI output. The input string we expect: + container ID: 2tz86kVTDpJxWHrhw3h6PbKMwkLtBEwoqhHQCKTre1FN + awaiting... + container has been persisted on sidechain + We want to take 'container ID' value from the string. + + Args: + output (str): CLI output to parse + + Returns: + (str): extracted CID + """ + try: + # taking first line from command's output + first_line = output.split("\n")[0] + except Exception: + first_line = "" + logger.error(f"Got empty output: {output}") + splitted = first_line.split(": ") + if len(splitted) != 2: + raise ValueError(f"no CID was parsed from command output: \t{first_line}") + return splitted[1] + + +@reporter.step_deco("Search container by name") +def search_container_by_name(wallet: str, name: str, shell: Shell, endpoint: str): + list_cids = list_containers(wallet, shell, endpoint) + for cid in list_cids: + cont_info = get_container(wallet, cid, shell, endpoint, True) + if cont_info.get("attributes", {}).get("Name", None) == name: + return cid + return None diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py new file mode 100644 index 0000000..8be7982 --- /dev/null +++ b/src/frostfs_testlib/steps/cli/object.py @@ -0,0 +1,727 @@ +import json +import logging +import os +import re +import uuid +from typing import Any, Optional + +from frostfs_testlib.cli import FrostfsCli +from frostfs_testlib.cli.neogo import NeoGo +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE +from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG +from frostfs_testlib.shell import Shell +from frostfs_testlib.storage.cluster import Cluster +from frostfs_testlib.utils import json_utils + +logger = logging.getLogger("NeoLogger") +reporter = get_reporter() + + +@reporter.step_deco("Get object from random node") +def get_object_from_random_node( + wallet: str, + cid: str, + oid: str, + shell: Shell, + cluster: Cluster, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + wallet_config: Optional[str] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> str: + """ + GET from FrostFS random storage node + + Args: + wallet: wallet on whose behalf GET is done + cid: ID of Container where we get the Object from + oid: Object ID + shell: executor for cli command + cluster: cluster object + bearer (optional, str): path to Bearer Token file, appends to `--bearer` key + write_object (optional, str): path to downloaded file, appends to `--file` key + wallet_config(optional, str): path to the wallet config + no_progress(optional, bool): do not show progress bar + xhdr (optional, dict): Request X-Headers in form of Key=Value + session (optional, dict): path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str): path to downloaded file + """ + endpoint = cluster.get_random_storage_rpc_endpoint() + return get_object( + wallet, + cid, + oid, + shell, + endpoint, + bearer, + write_object, + xhdr, + wallet_config, + no_progress, + session, + timeout, + ) + + +@reporter.step_deco("Get object from {endpoint}") +def get_object( + wallet: str, + cid: str, + oid: str, + shell: Shell, + endpoint: str, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + wallet_config: Optional[str] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> str: + """ + GET from FrostFS. + + Args: + wallet (str): wallet on whose behalf GET is done + cid (str): ID of Container where we get the Object from + oid (str): Object ID + shell: executor for cli command + bearer: path to Bearer Token file, appends to `--bearer` key + write_object: path to downloaded file, appends to `--file` key + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + wallet_config(optional, str): path to the wallet config + no_progress(optional, bool): do not show progress bar + xhdr (optional, dict): Request X-Headers in form of Key=Value + session (optional, dict): path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str): path to downloaded file + """ + + if not write_object: + write_object = str(uuid.uuid4()) + file_path = os.path.join(ASSETS_DIR, write_object) + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + cli.object.get( + rpc_endpoint=endpoint, + wallet=wallet, + cid=cid, + oid=oid, + file=file_path, + bearer=bearer, + no_progress=no_progress, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + return file_path + + +@reporter.step_deco("Get Range Hash from {endpoint}") +def get_range_hash( + wallet: str, + cid: str, + oid: str, + range_cut: str, + shell: Shell, + endpoint: str, + bearer: Optional[str] = None, + wallet_config: Optional[str] = None, + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +): + """ + GETRANGEHASH of given Object. + + Args: + wallet: wallet on whose behalf GETRANGEHASH is done + cid: ID of Container where we get the Object from + oid: Object ID + shell: executor for cli command + bearer: path to Bearer Token file, appends to `--bearer` key + range_cut: Range to take hash from in the form offset1:length1,..., + value to pass to the `--range` parameter + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + wallet_config: path to the wallet config + xhdr: Request X-Headers in form of Key=Values + session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session. + timeout: Timeout for the operation. + Returns: + None + """ + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + result = cli.object.hash( + rpc_endpoint=endpoint, + wallet=wallet, + cid=cid, + oid=oid, + range=range_cut, + bearer=bearer, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + # cutting off output about range offset and length + return result.stdout.split(":")[1].strip() + + +@reporter.step_deco("Put object to random node") +def put_object_to_random_node( + wallet: str, + path: str, + cid: str, + shell: Shell, + cluster: Cluster, + bearer: Optional[str] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + wallet_config: Optional[str] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +): + """ + PUT of given file to a random storage node. + + Args: + wallet: wallet on whose behalf PUT is done + path: path to file to be PUT + cid: ID of Container where we get the Object from + shell: executor for cli command + cluster: cluster under test + bearer: path to Bearer Token file, appends to `--bearer` key + attributes: User attributes in form of Key1=Value1,Key2=Value2 + cluster: cluster under test + wallet_config: path to the wallet config + no_progress: do not show progress bar + expire_at: Last epoch in the life of the object + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + ID of uploaded Object + """ + + endpoint = cluster.get_random_storage_rpc_endpoint() + return put_object( + wallet, + path, + cid, + shell, + endpoint, + bearer, + attributes, + xhdr, + wallet_config, + expire_at, + no_progress, + session, + timeout=timeout, + ) + + +@reporter.step_deco("Put object at {endpoint} in container {cid}") +def put_object( + wallet: str, + path: str, + cid: str, + shell: Shell, + endpoint: str, + bearer: Optional[str] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + wallet_config: Optional[str] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +): + """ + PUT of given file. + + Args: + wallet: wallet on whose behalf PUT is done + path: path to file to be PUT + cid: ID of Container where we get the Object from + shell: executor for cli command + bearer: path to Bearer Token file, appends to `--bearer` key + attributes: User attributes in form of Key1=Value1,Key2=Value2 + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + wallet_config: path to the wallet config + no_progress: do not show progress bar + expire_at: Last epoch in the life of the object + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str): ID of uploaded Object + """ + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + result = cli.object.put( + rpc_endpoint=endpoint, + wallet=wallet, + file=path, + cid=cid, + attributes=attributes, + bearer=bearer, + expire_at=expire_at, + no_progress=no_progress, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + # Splitting CLI output to separate lines and taking the penultimate line + id_str = result.stdout.strip().split("\n")[-2] + oid = id_str.split(":")[1] + return oid.strip() + + +@reporter.step_deco("Delete object {cid}/{oid} from {endpoint}") +def delete_object( + wallet: str, + cid: str, + oid: str, + shell: Shell, + endpoint: str, + bearer: str = "", + wallet_config: Optional[str] = None, + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +): + """ + DELETE an Object. + + Args: + wallet: wallet on whose behalf DELETE is done + cid: ID of Container where we get the Object from + oid: ID of Object we are going to delete + shell: executor for cli command + bearer: path to Bearer Token file, appends to `--bearer` key + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + wallet_config: path to the wallet config + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str): Tombstone ID + """ + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + result = cli.object.delete( + rpc_endpoint=endpoint, + wallet=wallet, + cid=cid, + oid=oid, + bearer=bearer, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + id_str = result.stdout.split("\n")[1] + tombstone = id_str.split(":")[1] + return tombstone.strip() + + +@reporter.step_deco("Get Range") +def get_range( + wallet: str, + cid: str, + oid: str, + range_cut: str, + shell: Shell, + endpoint: str, + wallet_config: Optional[str] = None, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +): + """ + GETRANGE an Object. + + Args: + wallet: wallet on whose behalf GETRANGE is done + cid: ID of Container where we get the Object from + oid: ID of Object we are going to request + range_cut: range to take data from in the form offset:length + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + bearer: path to Bearer Token file, appends to `--bearer` key + wallet_config: path to the wallet config + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str, bytes) - path to the file with range content and content of this file as bytes + """ + range_file_path = os.path.join(ASSETS_DIR, str(uuid.uuid4())) + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + cli.object.range( + rpc_endpoint=endpoint, + wallet=wallet, + cid=cid, + oid=oid, + range=range_cut, + file=range_file_path, + bearer=bearer, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + with open(range_file_path, "rb") as file: + content = file.read() + return range_file_path, content + + +@reporter.step_deco("Lock Object") +def lock_object( + wallet: str, + cid: str, + oid: str, + shell: Shell, + endpoint: str, + lifetime: Optional[int] = None, + expire_at: Optional[int] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + session: Optional[str] = None, + wallet_config: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> str: + """ + Locks object in container. + + Args: + address: Address of wallet account. + bearer: File with signed JSON or binary encoded bearer token. + cid: Container ID. + oid: Object ID. + lifetime: Lock lifetime. + expire_at: Lock expiration epoch. + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + session: Path to a JSON-encoded container session token. + ttl: TTL value in request meta header (default 2). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Dict with request X-Headers. + timeout: Timeout for the operation. + + Returns: + Lock object ID + """ + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + result = cli.object.lock( + rpc_endpoint=endpoint, + lifetime=lifetime, + expire_at=expire_at, + address=address, + wallet=wallet, + cid=cid, + oid=oid, + bearer=bearer, + xhdr=xhdr, + session=session, + ttl=ttl, + timeout=timeout, + ) + + # Splitting CLI output to separate lines and taking the penultimate line + id_str = result.stdout.strip().split("\n")[0] + oid = id_str.split(":")[1] + return oid.strip() + + +@reporter.step_deco("Search object") +def search_object( + wallet: str, + cid: str, + shell: Shell, + endpoint: str, + bearer: str = "", + filters: Optional[dict] = None, + expected_objects_list: Optional[list] = None, + wallet_config: Optional[str] = None, + xhdr: Optional[dict] = None, + session: Optional[str] = None, + phy: bool = False, + root: bool = False, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> list: + """ + SEARCH an Object. + + Args: + wallet: wallet on whose behalf SEARCH is done + cid: ID of Container where we get the Object from + shell: executor for cli command + bearer: path to Bearer Token file, appends to `--bearer` key + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + filters: key=value pairs to filter Objects + expected_objects_list: a list of ObjectIDs to compare found Objects with + wallet_config: path to the wallet config + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + phy: Search physically stored objects. + root: Search for user objects. + timeout: Timeout for the operation. + + Returns: + list of found ObjectIDs + """ + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + result = cli.object.search( + rpc_endpoint=endpoint, + wallet=wallet, + cid=cid, + bearer=bearer, + xhdr=xhdr, + filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] + if filters + else None, + session=session, + phy=phy, + root=root, + timeout=timeout, + ) + + found_objects = re.findall(r"(\w{43,44})", result.stdout) + + if expected_objects_list: + if sorted(found_objects) == sorted(expected_objects_list): + logger.info( + f"Found objects list '{found_objects}' " + f"is equal for expected list '{expected_objects_list}'" + ) + else: + logger.warning( + f"Found object list {found_objects} " + f"is not equal to expected list '{expected_objects_list}'" + ) + + return found_objects + + +@reporter.step_deco("Get netmap netinfo") +def get_netmap_netinfo( + wallet: str, + shell: Shell, + endpoint: str, + wallet_config: Optional[str] = None, + address: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> dict[str, Any]: + """ + Get netmap netinfo output from node + + Args: + wallet (str): wallet on whose behalf request is done + shell: executor for cli command + endpoint (optional, str): FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + address: Address of wallet account + ttl: TTL value in request meta header (default 2) + wallet: Path to the wallet or binary key + xhdr: Request X-Headers in form of Key=Value + timeout: Timeout for the operation. + + Returns: + (dict): dict of parsed command output + """ + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + output = cli.netmap.netinfo( + wallet=wallet, + rpc_endpoint=endpoint, + address=address, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + + settings = dict() + + patterns = [ + (re.compile("(.*): (\d+)"), int), + (re.compile("(.*): (false|true)"), bool), + (re.compile("(.*): (\d+\.\d+)"), float), + ] + for pattern, func in patterns: + for setting, value in re.findall(pattern, output.stdout): + settings[setting.lower().strip().replace(" ", "_")] = func(value) + + return settings + + +@reporter.step_deco("Head object") +def head_object( + wallet: str, + cid: str, + oid: str, + shell: Shell, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + json_output: bool = True, + is_raw: bool = False, + is_direct: bool = False, + wallet_config: Optional[str] = None, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +): + """ + HEAD an Object. + + Args: + wallet (str): wallet on whose behalf HEAD is done + cid (str): ID of Container where we get the Object from + oid (str): ObjectID to HEAD + shell: executor for cli command + bearer (optional, str): path to Bearer Token file, appends to `--bearer` key + endpoint(optional, str): FrostFS endpoint to send request to + json_output(optional, bool): return response in JSON format or not; this flag + turns into `--json` key + is_raw(optional, bool): send "raw" request or not; this flag + turns into `--raw` key + is_direct(optional, bool): send request directly to the node or not; this flag + turns into `--ttl 1` key + wallet_config(optional, str): path to the wallet config + xhdr (optional, dict): Request X-Headers in form of Key=Value + session (optional, dict): path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + depending on the `json_output` parameter value, the function returns + (dict): HEAD response in JSON format + or + (str): HEAD response as a plain text + """ + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + result = cli.object.head( + rpc_endpoint=endpoint, + wallet=wallet, + cid=cid, + oid=oid, + bearer=bearer, + json_mode=json_output, + raw=is_raw, + ttl=1 if is_direct else None, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + if not json_output: + return result + + try: + decoded = json.loads(result.stdout) + except Exception as exc: + # If we failed to parse output as JSON, the cause might be + # the plain text string in the beginning of the output. + # Here we cut off first string and try to parse again. + logger.info(f"failed to parse output: {exc}") + logger.info("parsing output in another way") + fst_line_idx = result.stdout.find("\n") + decoded = json.loads(result.stdout[fst_line_idx:]) + + # If response is Complex Object header, it has `splitId` key + if "splitId" in decoded.keys(): + logger.info("decoding split header") + return json_utils.decode_split_header(decoded) + + # If response is Last or Linking Object header, + # it has `header` dictionary and non-null `split` dictionary + if "split" in decoded["header"].keys(): + if decoded["header"]["split"]: + logger.info("decoding linking object") + return json_utils.decode_linking_object(decoded) + + if decoded["header"]["objectType"] == "STORAGE_GROUP": + logger.info("decoding storage group") + return json_utils.decode_storage_group(decoded) + + if decoded["header"]["objectType"] == "TOMBSTONE": + logger.info("decoding tombstone") + return json_utils.decode_tombstone(decoded) + + logger.info("decoding simple header") + return json_utils.decode_simple_header(decoded) + + +@reporter.step_deco("Run neo-go dump-keys") +def neo_go_dump_keys(shell: Shell, wallet: str) -> dict: + """ + Run neo-go dump keys command + + Args: + shell: executor for cli command + wallet: wallet path to dump from + Returns: + dict Address:Wallet Key + """ + neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE) + output = neogo.wallet.dump_keys(wallet=wallet).stdout + first_line = "" + try: + # taking first line from command's output contain wallet address + first_line = output.split("\n")[0] + except Exception: + logger.error(f"Got empty output (neo-go dump keys): {output}") + address_id = first_line.split()[0] + # taking second line from command's output contain wallet key + wallet_key = output.split("\n")[1] + return {address_id: wallet_key} + + +@reporter.step_deco("Run neo-go query height") +def neo_go_query_height(shell: Shell, endpoint: str) -> dict: + """ + Run neo-go query height command + + Args: + shell: executor for cli command + endpoint: endpoint to execute + Returns: + dict-> + Latest block: {value} + Validated state: {value} + + """ + neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE) + output = neogo.query.height(rpc_endpoint=endpoint).stdout + first_line = "" + try: + # taking first line from command's output contain the latest block in blockchain + first_line = output.split("\n")[0] + except Exception: + logger.error(f"Got empty output (neo-go query height): {output}") + latest_block = first_line.split(":") + # taking second line from command's output contain wallet key + second_line = output.split("\n")[1] + validated_state = second_line.split(":") + return { + latest_block[0].replace(":", ""): int(latest_block[1]), + validated_state[0].replace(":", ""): int(validated_state[1]), + } diff --git a/src/frostfs_testlib/steps/complex_object_actions.py b/src/frostfs_testlib/steps/complex_object_actions.py new file mode 100644 index 0000000..54e5fc2 --- /dev/null +++ b/src/frostfs_testlib/steps/complex_object_actions.py @@ -0,0 +1,210 @@ +#!/usr/bin/python3 + +""" + This module contains functions which are used for Large Object assembling: + getting Last Object and split and getting Link Object. It is not enough to + simply perform a "raw" HEAD request. + Therefore, the reliable retrieval of the aforementioned objects must be done + this way: send direct "raw" HEAD request to the every Storage Node and return + the desired OID on first non-null response. +""" + +import logging +from typing import Optional, Tuple + +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT +from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG +from frostfs_testlib.shell import Shell +from frostfs_testlib.steps.cli.object import head_object +from frostfs_testlib.storage.cluster import Cluster, StorageNode +from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo + +reporter = get_reporter() +logger = logging.getLogger("NeoLogger") + + +def get_storage_object_chunks( + storage_object: StorageObjectInfo, + shell: Shell, + cluster: Cluster, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> list[str]: + """ + Get complex object split objects ids (no linker object) + + Args: + storage_object: storage_object to get it's chunks + shell: client shell to do cmd requests + cluster: cluster object under test + timeout: Timeout for an operation. + + Returns: + list of object ids of complex object chunks + """ + + with reporter.step(f"Get complex object chunks (f{storage_object.oid})"): + split_object_id = get_link_object( + storage_object.wallet_file_path, + storage_object.cid, + storage_object.oid, + shell, + cluster.services(StorageNode), + is_direct=False, + timeout=timeout, + ) + head = head_object( + storage_object.wallet_file_path, + storage_object.cid, + split_object_id, + shell, + cluster.default_rpc_endpoint, + timeout=timeout, + ) + + chunks_object_ids = [] + if "split" in head["header"] and "children" in head["header"]["split"]: + chunks_object_ids = head["header"]["split"]["children"] + + return chunks_object_ids + + +def get_complex_object_split_ranges( + storage_object: StorageObjectInfo, + shell: Shell, + cluster: Cluster, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> list[Tuple[int, int]]: + + """ + Get list of split ranges tuples (offset, length) of a complex object + For example if object size if 100 and max object size in system is 30 + the returned list should be + [(0, 30), (30, 30), (60, 30), (90, 10)] + + Args: + storage_object: storage_object to get it's chunks + shell: client shell to do cmd requests + cluster: cluster object under test + timeout: Timeout for an operation. + + Returns: + list of object ids of complex object chunks + """ + + ranges: list = [] + offset = 0 + chunks_ids = get_storage_object_chunks(storage_object, shell, cluster) + for chunk_id in chunks_ids: + head = head_object( + storage_object.wallet_file_path, + storage_object.cid, + chunk_id, + shell, + cluster.default_rpc_endpoint, + timeout=timeout, + ) + + length = int(head["header"]["payloadLength"]) + ranges.append((offset, length)) + + offset = offset + length + + return ranges + + +@reporter.step_deco("Get Link Object") +def get_link_object( + wallet: str, + cid: str, + oid: str, + shell: Shell, + nodes: list[StorageNode], + bearer: str = "", + wallet_config: str = DEFAULT_WALLET_CONFIG, + is_direct: bool = True, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +): + """ + Args: + wallet (str): path to the wallet on whose behalf the Storage Nodes + are requested + cid (str): Container ID which stores the Large Object + oid (str): Large Object ID + shell: executor for cli command + nodes: list of nodes to do search on + bearer (optional, str): path to Bearer token file + wallet_config (optional, str): path to the frostfs-cli config file + is_direct: send request directly to the node or not; this flag + turns into `--ttl 1` key + timeout: Timeout for an operation. + Returns: + (str): Link Object ID + When no Link Object ID is found after all Storage Nodes polling, + the function throws an error. + """ + for node in nodes: + endpoint = node.get_rpc_endpoint() + try: + resp = head_object( + wallet, + cid, + oid, + shell=shell, + endpoint=endpoint, + is_raw=True, + is_direct=is_direct, + bearer=bearer, + wallet_config=wallet_config, + timeout=timeout, + ) + if resp["link"]: + return resp["link"] + except Exception: + logger.info(f"No Link Object found on {endpoint}; continue") + logger.error(f"No Link Object for {cid}/{oid} found among all Storage Nodes") + return None + + +@reporter.step_deco("Get Last Object") +def get_last_object( + wallet: str, + cid: str, + oid: str, + shell: Shell, + nodes: list[StorageNode], + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> Optional[str]: + """ + Args: + wallet (str): path to the wallet on whose behalf the Storage Nodes + are requested + cid (str): Container ID which stores the Large Object + oid (str): Large Object ID + shell: executor for cli command + nodes: list of nodes to do search on + timeout: Timeout for an operation. + Returns: + (str): Last Object ID + When no Last Object ID is found after all Storage Nodes polling, + the function throws an error. + """ + for node in nodes: + endpoint = node.get_rpc_endpoint() + try: + resp = head_object( + wallet, + cid, + oid, + shell=shell, + endpoint=endpoint, + is_raw=True, + is_direct=True, + timeout=timeout, + ) + if resp["lastPart"]: + return resp["lastPart"] + except Exception: + logger.info(f"No Last Object found on {endpoint}; continue") + logger.error(f"No Last Object for {cid}/{oid} found among all Storage Nodes") + return None diff --git a/src/frostfs_testlib/steps/epoch.py b/src/frostfs_testlib/steps/epoch.py new file mode 100644 index 0000000..0d40f8d --- /dev/null +++ b/src/frostfs_testlib/steps/epoch.py @@ -0,0 +1,131 @@ +import logging +from time import sleep +from typing import Optional + +from frostfs_testlib.cli import FrostfsAdm, FrostfsCli, NeoGo +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.cli import ( + CLI_DEFAULT_TIMEOUT, + FROSTFS_ADM_CONFIG_PATH, + FROSTFS_ADM_EXEC, + FROSTFS_CLI_EXEC, + NEOGO_EXECUTABLE, +) +from frostfs_testlib.resources.common import MORPH_BLOCK_TIME +from frostfs_testlib.shell import Shell +from frostfs_testlib.steps.payment_neogo import get_contract_hash +from frostfs_testlib.storage.cluster import Cluster, StorageNode +from frostfs_testlib.storage.dataclasses.frostfs_services import InnerRing, MorphChain +from frostfs_testlib.testing.test_control import wait_for_success +from frostfs_testlib.utils import datetime_utils, wallet_utils + +reporter = get_reporter() +logger = logging.getLogger("NeoLogger") + + +@reporter.step_deco("Get epochs from nodes") +def get_epochs_from_nodes(shell: Shell, cluster: Cluster) -> dict[str, int]: + """ + Get current epochs on each node. + + Args: + shell: shell to run commands on. + cluster: cluster under test. + + Returns: + Dict of {node_ip: epoch}. + """ + epochs_by_node = {} + for node in cluster.services(StorageNode): + epochs_by_node[node.host.config.address] = get_epoch(shell, cluster, node) + return epochs_by_node + + +@reporter.step_deco("Ensure fresh epoch") +def ensure_fresh_epoch( + shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None +) -> int: + # ensure new fresh epoch to avoid epoch switch during test session + alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] + current_epoch = get_epoch(shell, cluster, alive_node) + tick_epoch(shell, cluster, alive_node) + epoch = get_epoch(shell, cluster, alive_node) + assert epoch > current_epoch, "Epoch wasn't ticked" + return epoch + + +@reporter.step_deco("Wait for epochs align in whole cluster") +@wait_for_success(60, 5) +def wait_for_epochs_align(shell: Shell, cluster: Cluster) -> None: + epochs = [] + for node in cluster.services(StorageNode): + epochs.append(get_epoch(shell, cluster, node)) + unique_epochs = list(set(epochs)) + assert ( + len(unique_epochs) == 1 + ), f"unaligned epochs found, {epochs}, count of unique epochs {len(unique_epochs)}" + + +@reporter.step_deco("Get Epoch") +def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None): + alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] + endpoint = alive_node.get_rpc_endpoint() + wallet_path = alive_node.get_wallet_path() + wallet_config = alive_node.get_wallet_config_path() + + cli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config) + + epoch = cli.netmap.epoch(endpoint, wallet_path, timeout=CLI_DEFAULT_TIMEOUT) + return int(epoch.stdout) + + +@reporter.step_deco("Tick Epoch") +def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None): + """ + Tick epoch using frostfs-adm or NeoGo if frostfs-adm is not available (DevEnv) + Args: + shell: local shell to make queries about current epoch. Remote shell will be used to tick new one + cluster: cluster instance under test + alive_node: node to send requests to (first node in cluster by default) + """ + + alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] + remote_shell = alive_node.host.get_shell() + + if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH: + # If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests) + frostfs_adm = FrostfsAdm( + shell=remote_shell, + frostfs_adm_exec_path=FROSTFS_ADM_EXEC, + config_file=FROSTFS_ADM_CONFIG_PATH, + ) + frostfs_adm.morph.force_new_epoch() + return + + # Otherwise we tick epoch using transaction + cur_epoch = get_epoch(shell, cluster) + + # Use first node by default + ir_node = cluster.services(InnerRing)[0] + # In case if no local_wallet_path is provided, we use wallet_path + ir_wallet_path = ir_node.get_wallet_path() + ir_wallet_pass = ir_node.get_wallet_password() + ir_address = wallet_utils.get_last_address_from_wallet(ir_wallet_path, ir_wallet_pass) + + morph_chain = cluster.services(MorphChain)[0] + morph_endpoint = morph_chain.get_endpoint() + + neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE) + neogo.contract.invokefunction( + wallet=ir_wallet_path, + wallet_password=ir_wallet_pass, + scripthash=get_contract_hash(morph_chain, "netmap.frostfs", shell=shell), + method="newEpoch", + arguments=f"int:{cur_epoch + 1}", + multisig_hash=f"{ir_address}:Global", + address=ir_address, + rpc_endpoint=morph_endpoint, + force=True, + gas=1, + ) + sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) diff --git a/src/frostfs_testlib/steps/http/__init__.py b/src/frostfs_testlib/steps/http/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/frostfs_testlib/steps/http/http_gate.py b/src/frostfs_testlib/steps/http/http_gate.py new file mode 100644 index 0000000..c9769fb --- /dev/null +++ b/src/frostfs_testlib/steps/http/http_gate.py @@ -0,0 +1,355 @@ +import logging +import os +import random +import re +import shutil +import uuid +import zipfile +from typing import Optional +from urllib.parse import quote_plus + +import requests + +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.common import SIMPLE_OBJECT_SIZE +from frostfs_testlib.s3.aws_cli_client import LONG_TIMEOUT +from frostfs_testlib.shell import Shell +from frostfs_testlib.steps.cli.object import get_object +from frostfs_testlib.steps.storage_policy import get_nodes_without_object +from frostfs_testlib.storage.cluster import StorageNode +from frostfs_testlib.utils.cli_utils import _cmd_run +from frostfs_testlib.utils.file_utils import get_file_hash + +reporter = get_reporter() + +logger = logging.getLogger("NeoLogger") + +ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/") + + +@reporter.step_deco("Get via HTTP Gate") +def get_via_http_gate(cid: str, oid: str, endpoint: str, request_path: Optional[str] = None): + """ + This function gets given object from HTTP gate + cid: container id to get object from + oid: object ID + endpoint: http gate endpoint + request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}] + """ + + # if `request_path` parameter omitted, use default + if request_path is None: + request = f"{endpoint}/get/{cid}/{oid}" + else: + request = f"{endpoint}{request_path}" + + resp = requests.get(request, stream=True) + + if not resp.ok: + raise Exception( + f"""Failed to get object via HTTP gate: + request: {resp.request.path_url}, + response: {resp.text}, + status code: {resp.status_code} {resp.reason}""" + ) + + logger.info(f"Request: {request}") + _attach_allure_step(request, resp.status_code) + + file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}") + with open(file_path, "wb") as file: + shutil.copyfileobj(resp.raw, file) + return file_path + + +@reporter.step_deco("Get via Zip HTTP Gate") +def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str): + """ + This function gets given object from HTTP gate + cid: container id to get object from + prefix: common prefix + endpoint: http gate endpoint + """ + request = f"{endpoint}/zip/{cid}/{prefix}" + resp = requests.get(request, stream=True) + + if not resp.ok: + raise Exception( + f"""Failed to get object via HTTP gate: + request: {resp.request.path_url}, + response: {resp.text}, + status code: {resp.status_code} {resp.reason}""" + ) + + logger.info(f"Request: {request}") + _attach_allure_step(request, resp.status_code) + + file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_archive.zip") + with open(file_path, "wb") as file: + shutil.copyfileobj(resp.raw, file) + + with zipfile.ZipFile(file_path, "r") as zip_ref: + zip_ref.extractall(ASSETS_DIR) + + return os.path.join(os.getcwd(), ASSETS_DIR, prefix) + + +@reporter.step_deco("Get via HTTP Gate by attribute") +def get_via_http_gate_by_attribute( + cid: str, attribute: dict, endpoint: str, request_path: Optional[str] = None +): + """ + This function gets given object from HTTP gate + cid: CID to get object from + attribute: attribute {name: attribute} value pair + endpoint: http gate endpoint + request_path: (optional) http request path, if ommited - use default [{endpoint}/get_by_attribute/{Key}/{Value}] + """ + attr_name = list(attribute.keys())[0] + attr_value = quote_plus(str(attribute.get(attr_name))) + # if `request_path` parameter ommited, use default + if request_path is None: + request = f"{endpoint}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}" + else: + request = f"{endpoint}{request_path}" + + resp = requests.get(request, stream=True) + + if not resp.ok: + raise Exception( + f"""Failed to get object via HTTP gate: + request: {resp.request.path_url}, + response: {resp.text}, + status code: {resp.status_code} {resp.reason}""" + ) + + logger.info(f"Request: {request}") + _attach_allure_step(request, resp.status_code) + + file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{str(uuid.uuid4())}") + with open(file_path, "wb") as file: + shutil.copyfileobj(resp.raw, file) + return file_path + + +@reporter.step_deco("Upload via HTTP Gate") +def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: Optional[dict] = None) -> str: + """ + This function upload given object through HTTP gate + cid: CID to get object from + path: File path to upload + endpoint: http gate endpoint + headers: Object header + """ + request = f"{endpoint}/upload/{cid}" + files = {"upload_file": open(path, "rb")} + body = {"filename": path} + resp = requests.post(request, files=files, data=body, headers=headers) + + if not resp.ok: + raise Exception( + f"""Failed to get object via HTTP gate: + request: {resp.request.path_url}, + response: {resp.text}, + status code: {resp.status_code} {resp.reason}""" + ) + + logger.info(f"Request: {request}") + _attach_allure_step(request, resp.json(), req_type="POST") + + assert resp.json().get("object_id"), f"OID found in response {resp}" + + return resp.json().get("object_id") + + +@reporter.step_deco("Check is the passed object large") +def is_object_large(filepath: str) -> bool: + """ + This function check passed file size and return True if file_size > SIMPLE_OBJECT_SIZE + filepath: File path to check + """ + file_size = os.path.getsize(filepath) + logger.info(f"Size= {file_size}") + if file_size > int(SIMPLE_OBJECT_SIZE): + return True + else: + return False + + +@reporter.step_deco("Upload via HTTP Gate using Curl") +def upload_via_http_gate_curl( + cid: str, + filepath: str, + endpoint: str, + headers: Optional[list] = None, + error_pattern: Optional[str] = None, +) -> str: + """ + This function upload given object through HTTP gate using curl utility. + cid: CID to get object from + filepath: File path to upload + headers: Object header + endpoint: http gate endpoint + error_pattern: [optional] expected error message from the command + """ + request = f"{endpoint}/upload/{cid}" + attributes = "" + if headers: + # parse attributes + attributes = " ".join(headers) + + large_object = is_object_large(filepath) + if large_object: + # pre-clean + _cmd_run("rm pipe -f") + files = f"file=@pipe;filename={os.path.basename(filepath)}" + cmd = f"mkfifo pipe;cat {filepath} > pipe & curl --no-buffer -F '{files}' {attributes} {request}" + output = _cmd_run(cmd, LONG_TIMEOUT) + # clean up pipe + _cmd_run("rm pipe") + else: + files = f"file=@{filepath};filename={os.path.basename(filepath)}" + cmd = f"curl -F '{files}' {attributes} {request}" + output = _cmd_run(cmd) + + if error_pattern: + match = error_pattern.casefold() in str(output).casefold() + assert match, f"Expected {output} to match {error_pattern}" + return "" + + oid_re = re.search(r'"object_id": "(.*)"', output) + if not oid_re: + raise AssertionError(f'Could not find "object_id" in {output}') + return oid_re.group(1) + + +@reporter.step_deco("Get via HTTP Gate using Curl") +def get_via_http_curl(cid: str, oid: str, endpoint: str) -> str: + """ + This function gets given object from HTTP gate using curl utility. + cid: CID to get object from + oid: object OID + endpoint: http gate endpoint + """ + request = f"{endpoint}/get/{cid}/{oid}" + file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}") + + cmd = f"curl {request} > {file_path}" + _cmd_run(cmd) + + return file_path + + +def _attach_allure_step(request: str, status_code: int, req_type="GET"): + command_attachment = f"REQUEST: '{request}'\n" f"RESPONSE:\n {status_code}\n" + with reporter.step(f"{req_type} Request"): + reporter.attach(command_attachment, f"{req_type} Request") + + +@reporter.step_deco("Try to get object and expect error") +def try_to_get_object_and_expect_error( + cid: str, oid: str, error_pattern: str, endpoint: str +) -> None: + try: + get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint) + raise AssertionError(f"Expected error on getting object with cid: {cid}") + except Exception as err: + match = error_pattern.casefold() in str(err).casefold() + assert match, f"Expected {err} to match {error_pattern}" + + +@reporter.step_deco("Verify object can be get using HTTP header attribute") +def get_object_by_attr_and_verify_hashes( + oid: str, file_name: str, cid: str, attrs: dict, endpoint: str +) -> None: + got_file_path_http = get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint) + got_file_path_http_attr = get_via_http_gate_by_attribute( + cid=cid, attribute=attrs, endpoint=endpoint + ) + assert_hashes_are_equal(file_name, got_file_path_http, got_file_path_http_attr) + + +def verify_object_hash( + oid: str, + file_name: str, + wallet: str, + cid: str, + shell: Shell, + nodes: list[StorageNode], + endpoint: str, + object_getter=None, +) -> None: + + nodes_list = get_nodes_without_object( + wallet=wallet, + cid=cid, + oid=oid, + shell=shell, + nodes=nodes, + ) + # for some reason we can face with case when nodes_list is empty due to object resides in all nodes + if nodes_list: + random_node = random.choice(nodes_list) + else: + random_node = random.choice(nodes) + + object_getter = object_getter or get_via_http_gate + + got_file_path = get_object( + wallet=wallet, + cid=cid, + oid=oid, + shell=shell, + endpoint=random_node.get_rpc_endpoint(), + ) + got_file_path_http = object_getter(cid=cid, oid=oid, endpoint=endpoint) + + assert_hashes_are_equal(file_name, got_file_path, got_file_path_http) + + +def assert_hashes_are_equal(orig_file_name: str, got_file_1: str, got_file_2: str) -> None: + msg = "Expected hashes are equal for files {f1} and {f2}" + got_file_hash_http = get_file_hash(got_file_1) + assert get_file_hash(got_file_2) == got_file_hash_http, msg.format(f1=got_file_2, f2=got_file_1) + assert get_file_hash(orig_file_name) == got_file_hash_http, msg.format( + f1=orig_file_name, f2=got_file_1 + ) + + +def attr_into_header(attrs: dict) -> dict: + return {f"X-Attribute-{_key}": _value for _key, _value in attrs.items()} + + +@reporter.step_deco( + "Convert each attribute (Key=Value) to the following format: -H 'X-Attribute-Key: Value'" +) +def attr_into_str_header_curl(attrs: dict) -> list: + headers = [] + for k, v in attrs.items(): + headers.append(f"-H 'X-Attribute-{k}: {v}'") + logger.info(f"[List of Attrs for curl:] {headers}") + return headers + + +@reporter.step_deco( + "Try to get object via http (pass http_request and optional attributes) and expect error" +) +def try_to_get_object_via_passed_request_and_expect_error( + cid: str, + oid: str, + error_pattern: str, + endpoint: str, + http_request_path: str, + attrs: Optional[dict] = None, +) -> None: + try: + if attrs is None: + get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, request_path=http_request_path) + else: + get_via_http_gate_by_attribute( + cid=cid, attribute=attrs, endpoint=endpoint, request_path=http_request_path + ) + raise AssertionError(f"Expected error on getting object with cid: {cid}") + except Exception as err: + match = error_pattern.casefold() in str(err).casefold() + assert match, f"Expected {err} to match {error_pattern}" diff --git a/src/frostfs_testlib/steps/node_management.py b/src/frostfs_testlib/steps/node_management.py new file mode 100644 index 0000000..aec9b8a --- /dev/null +++ b/src/frostfs_testlib/steps/node_management.py @@ -0,0 +1,351 @@ +import logging +import random +import re +import time +from dataclasses import dataclass +from time import sleep +from typing import Optional + +from frostfs_testlib.cli import FrostfsAdm, FrostfsCli +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.cli import ( + FROSTFS_ADM_CONFIG_PATH, + FROSTFS_ADM_EXEC, + FROSTFS_CLI_EXEC, +) +from frostfs_testlib.resources.common import MORPH_BLOCK_TIME +from frostfs_testlib.shell import Shell +from frostfs_testlib.steps.epoch import tick_epoch +from frostfs_testlib.storage.cluster import Cluster, StorageNode +from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate +from frostfs_testlib.utils import datetime_utils + +reporter = get_reporter() +logger = logging.getLogger("NeoLogger") + + +@dataclass +class HealthStatus: + network_status: Optional[str] = None + health_status: Optional[str] = None + + @staticmethod + def from_stdout(output: str) -> "HealthStatus": + network, health = None, None + for line in output.split("\n"): + if "Network status" in line: + network = line.split(":")[-1].strip() + if "Health status" in line: + health = line.split(":")[-1].strip() + return HealthStatus(network, health) + + +@reporter.step_deco("Stop random storage nodes") +def stop_random_storage_nodes(number: int, nodes: list[StorageNode]) -> list[StorageNode]: + """ + Shuts down the given number of randomly selected storage nodes. + Args: + number: the number of storage nodes to stop + nodes: the list of storage nodes to stop + Returns: + the list of nodes that were stopped + """ + nodes_to_stop = random.sample(nodes, number) + for node in nodes_to_stop: + node.stop_service() + return nodes_to_stop + + +@reporter.step_deco("Start storage node") +def start_storage_nodes(nodes: list[StorageNode]) -> None: + """ + The function starts specified storage nodes. + Args: + nodes: the list of nodes to start + """ + for node in nodes: + node.start_service() + + +@reporter.step_deco("Stop storage node") +def stop_storage_nodes(nodes: list[StorageNode]) -> None: + """ + The function starts specified storage nodes. + Args: + nodes: the list of nodes to start + """ + for node in nodes: + node.stop_service() + + +@reporter.step_deco("Get Locode from random storage node") +def get_locode_from_random_node(cluster: Cluster) -> str: + node = random.choice(cluster.services(StorageNode)) + locode = node.get_un_locode() + logger.info(f"Chosen '{locode}' locode from node {node}") + return locode + + +@reporter.step_deco("Healthcheck for storage node {node}") +def storage_node_healthcheck(node: StorageNode) -> HealthStatus: + """ + The function returns storage node's health status. + Args: + node: storage node for which health status should be retrieved. + Returns: + health status as HealthStatus object. + """ + command = "control healthcheck" + output = _run_control_command_with_retries(node, command) + return HealthStatus.from_stdout(output) + + +@reporter.step_deco("Set status for {node}") +def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) -> None: + """ + The function sets particular status for given node. + Args: + node: node for which status should be set. + status: online or offline. + retries (optional, int): number of retry attempts if it didn't work from the first time + """ + command = f"control set-status --status {status}" + _run_control_command_with_retries(node, command, retries) + + +@reporter.step_deco("Get netmap snapshot") +def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str: + """ + The function returns string representation of netmap snapshot. + Args: + node: node from which netmap snapshot should be requested. + Returns: + string representation of netmap + """ + + storage_wallet_config = node.get_wallet_config_path() + storage_wallet_path = node.get_wallet_path() + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, config_file=storage_wallet_config) + return cli.netmap.snapshot( + rpc_endpoint=node.get_rpc_endpoint(), + wallet=storage_wallet_path, + ).stdout + + +@reporter.step_deco("Get shard list for {node}") +def node_shard_list(node: StorageNode) -> list[str]: + """ + The function returns list of shards for specified storage node. + Args: + node: node for which shards should be returned. + Returns: + list of shards. + """ + command = "control shards list" + output = _run_control_command_with_retries(node, command) + return re.findall(r"Shard (.*):", output) + + +@reporter.step_deco("Shard set for {node}") +def node_shard_set_mode(node: StorageNode, shard: str, mode: str) -> str: + """ + The function sets mode for specified shard. + Args: + node: node on which shard mode should be set. + """ + command = f"control shards set-mode --id {shard} --mode {mode}" + return _run_control_command_with_retries(node, command) + + +@reporter.step_deco("Drop object from {node}") +def drop_object(node: StorageNode, cid: str, oid: str) -> str: + """ + The function drops object from specified node. + Args: + node_id str: node from which object should be dropped. + """ + command = f"control drop-objects -o {cid}/{oid}" + return _run_control_command_with_retries(node, command) + + +@reporter.step_deco("Delete data from host for node {node}") +def delete_node_data(node: StorageNode) -> None: + node.stop_service() + node.host.delete_storage_node_data(node.name) + time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) + + +@reporter.step_deco("Exclude node {node_to_exclude} from network map") +def exclude_node_from_network_map( + node_to_exclude: StorageNode, + alive_node: StorageNode, + shell: Shell, + cluster: Cluster, +) -> None: + node_netmap_key = node_to_exclude.get_wallet_public_key() + + storage_node_set_status(node_to_exclude, status="offline") + + time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) + tick_epoch(shell, cluster) + + snapshot = get_netmap_snapshot(node=alive_node, shell=shell) + assert ( + node_netmap_key not in snapshot + ), f"Expected node with key {node_netmap_key} to be absent in network map" + + +@reporter.step_deco("Include node {node_to_include} into network map") +def include_node_to_network_map( + node_to_include: StorageNode, + alive_node: StorageNode, + shell: Shell, + cluster: Cluster, +) -> None: + storage_node_set_status(node_to_include, status="online") + + # Per suggestion of @fyrchik we need to wait for 2 blocks after we set status and after tick epoch. + # First sleep can be omitted after https://github.com/TrueCloudLab/frostfs-node/issues/60 complete. + + time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2) + tick_epoch(shell, cluster) + time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2) + + check_node_in_map(node_to_include, shell, alive_node) + + +@reporter.step_deco("Check node {node} in network map") +def check_node_in_map( + node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None +) -> None: + alive_node = alive_node or node + + node_netmap_key = node.get_wallet_public_key() + logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}") + + snapshot = get_netmap_snapshot(alive_node, shell) + assert ( + node_netmap_key in snapshot + ), f"Expected node with key {node_netmap_key} to be in network map" + + +@reporter.step_deco("Check node {node} NOT in network map") +def check_node_not_in_map( + node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None +) -> None: + alive_node = alive_node or node + + node_netmap_key = node.get_wallet_public_key() + logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}") + + snapshot = get_netmap_snapshot(alive_node, shell) + assert ( + node_netmap_key not in snapshot + ), f"Expected node with key {node_netmap_key} to be NOT in network map" + + +@reporter.step_deco("Wait for node {node} is ready") +def wait_for_node_to_be_ready(node: StorageNode) -> None: + timeout, attempts = 30, 6 + for _ in range(attempts): + try: + health_check = storage_node_healthcheck(node) + if health_check.health_status == "READY": + return + except Exception as err: + logger.warning(f"Node {node} is not ready:\n{err}") + sleep(timeout) + raise AssertionError( + f"Node {node} hasn't gone to the READY state after {timeout * attempts} seconds" + ) + + +@reporter.step_deco("Remove nodes from network map trough cli-adm morph command") +def remove_nodes_from_map_morph( + shell: Shell, + cluster: Cluster, + remove_nodes: list[StorageNode], + alive_node: Optional[StorageNode] = None, +): + """ + Move node to the Offline state in the candidates list and tick an epoch to update the netmap + using frostfs-adm + Args: + shell: local shell to make queries about current epoch. Remote shell will be used to tick new one + cluster: cluster instance under test + alive_node: node to send requests to (first node in cluster by default) + remove_nodes: list of nodes which would be removed from map + """ + + alive_node = alive_node if alive_node else remove_nodes[0] + remote_shell = alive_node.host.get_shell() + + node_netmap_keys = list(map(StorageNode.get_wallet_public_key, remove_nodes)) + logger.info(f"Nodes netmap keys are: {' '.join(node_netmap_keys)}") + + if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH: + # If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests) + frostfsadm = FrostfsAdm( + shell=remote_shell, + frostfs_adm_exec_path=FROSTFS_ADM_EXEC, + config_file=FROSTFS_ADM_CONFIG_PATH, + ) + frostfsadm.morph.remove_nodes(node_netmap_keys) + + +def _run_control_command_with_retries(node: StorageNode, command: str, retries: int = 0) -> str: + for attempt in range(1 + retries): # original attempt + specified retries + try: + return _run_control_command(node, command) + except AssertionError as err: + if attempt < retries: + logger.warning(f"Command {command} failed with error {err} and will be retried") + continue + raise AssertionError(f"Command {command} failed with error {err}") from err + + +def _run_control_command(node: StorageNode, command: str) -> None: + host = node.host + + service_config = host.get_service_config(node.name) + wallet_path = service_config.attributes["wallet_path"] + wallet_password = service_config.attributes["wallet_password"] + control_endpoint = service_config.attributes["control_endpoint"] + + shell = host.get_shell() + wallet_config_path = f"/tmp/{node.name}-config.yaml" + wallet_config = f'password: "{wallet_password}"' + shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") + + cli_config = host.get_cli_config("frostfs-cli") + + # TODO: implement cli.control + # cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) + result = shell.exec( + f"{cli_config.exec_path} {command} --endpoint {control_endpoint} " + f"--wallet {wallet_path} --config {wallet_config_path}" + ) + return result.stdout + + +@reporter.step_deco("Start services s3gate ") +def start_s3gates(cluster: Cluster) -> None: + """ + The function starts specified storage nodes. + Args: + cluster: cluster instance under test + """ + for gate in cluster.services(S3Gate): + gate.start_service() + + +@reporter.step_deco("Stop services s3gate ") +def stop_s3gates(cluster: Cluster) -> None: + """ + The function starts specified storage nodes. + Args: + cluster: cluster instance under test + """ + for gate in cluster.services(S3Gate): + gate.stop_service() diff --git a/src/frostfs_testlib/steps/payment_neogo.py b/src/frostfs_testlib/steps/payment_neogo.py new file mode 100644 index 0000000..07dddd2 --- /dev/null +++ b/src/frostfs_testlib/steps/payment_neogo.py @@ -0,0 +1,217 @@ +import base64 +import json +import logging +import re +import time +from typing import Optional + +from neo3.wallet import utils as neo3_utils +from neo3.wallet import wallet as neo3_wallet + +from frostfs_testlib.cli import NeoGo +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.cli import NEOGO_EXECUTABLE +from frostfs_testlib.resources.common import FROSTFS_CONTRACT, GAS_HASH, MORPH_BLOCK_TIME +from frostfs_testlib.shell import Shell +from frostfs_testlib.storage.dataclasses.frostfs_services import MainChain, MorphChain +from frostfs_testlib.utils import converting_utils, datetime_utils, wallet_utils + +reporter = get_reporter() +logger = logging.getLogger("NeoLogger") + +EMPTY_PASSWORD = "" +TX_PERSIST_TIMEOUT = 15 # seconds +ASSET_POWER_MAINCHAIN = 10**8 +ASSET_POWER_SIDECHAIN = 10**12 + + +def get_nns_contract_hash(morph_chain: MorphChain) -> str: + return morph_chain.rpc_client.get_contract_state(1)["hash"] + + +def get_contract_hash(morph_chain: MorphChain, resolve_name: str, shell: Shell) -> str: + nns_contract_hash = get_nns_contract_hash(morph_chain) + neogo = NeoGo(shell=shell, neo_go_exec_path=NEOGO_EXECUTABLE) + out = neogo.contract.testinvokefunction( + scripthash=nns_contract_hash, + method="resolve", + arguments=f"string:{resolve_name} int:16", + rpc_endpoint=morph_chain.get_endpoint(), + ) + stack_data = json.loads(out.stdout.replace("\n", ""))["stack"][0]["value"] + return bytes.decode(base64.b64decode(stack_data[0]["value"])) + + +@reporter.step_deco("Withdraw Mainnet Gas") +def withdraw_mainnet_gas(shell: Shell, main_chain: MainChain, wlt: str, amount: int): + address = wallet_utils.get_last_address_from_wallet(wlt, EMPTY_PASSWORD) + scripthash = neo3_utils.address_to_script_hash(address) + + neogo = NeoGo(shell=shell, neo_go_exec_path=NEOGO_EXECUTABLE) + out = neogo.contract.invokefunction( + wallet=wlt, + address=address, + rpc_endpoint=main_chain.get_endpoint(), + scripthash=FROSTFS_CONTRACT, + method="withdraw", + arguments=f"{scripthash} int:{amount}", + multisig_hash=f"{scripthash}:Global", + wallet_password="", + ) + + m = re.match(r"^Sent invocation transaction (\w{64})$", out.stdout) + if m is None: + raise Exception("Can not get Tx.") + tx = m.group(1) + if not transaction_accepted(main_chain, tx): + raise AssertionError(f"TX {tx} hasn't been processed") + + +def transaction_accepted(main_chain: MainChain, tx_id: str): + """ + This function returns True in case of accepted TX. + Args: + tx_id(str): transaction ID + Returns: + (bool) + """ + + try: + for _ in range(0, TX_PERSIST_TIMEOUT): + time.sleep(1) + resp = main_chain.rpc_client.get_transaction_height(tx_id) + if resp is not None: + logger.info(f"TX is accepted in block: {resp}") + return True, resp + except Exception as out: + logger.info(f"request failed with error: {out}") + raise out + return False + + +@reporter.step_deco("Get FrostFS Balance") +def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_password: str = ""): + """ + This function returns FrostFS balance for given wallet. + """ + with open(wallet_path) as wallet_file: + wallet = neo3_wallet.Wallet.from_json(json.load(wallet_file), password=wallet_password) + acc = wallet.accounts[-1] + payload = [{"type": "Hash160", "value": str(acc.script_hash)}] + try: + resp = morph_chain.rpc_client.invoke_function( + get_contract_hash(morph_chain, "balance.frostfs", shell=shell), "balanceOf", payload + ) + logger.info(f"Got response \n{resp}") + value = int(resp["stack"][0]["value"]) + return value / ASSET_POWER_SIDECHAIN + except Exception as out: + logger.error(f"failed to get wallet balance: {out}") + raise out + + +@reporter.step_deco("Transfer Gas") +def transfer_gas( + shell: Shell, + amount: int, + main_chain: MainChain, + wallet_from_path: Optional[str] = None, + wallet_from_password: Optional[str] = None, + address_from: Optional[str] = None, + address_to: Optional[str] = None, + wallet_to_path: Optional[str] = None, + wallet_to_password: Optional[str] = None, +): + """ + This function transfer GAS in main chain from mainnet wallet to + the provided wallet. If the wallet contains more than one address, + the assets will be transferred to the last one. + Args: + shell: Shell instance. + wallet_from_password: Password of the wallet; it is required to decode the wallet + and extract its addresses. + wallet_from_path: Path to chain node wallet. + address_from: The address of the wallet to transfer assets from. + wallet_to_path: The path to the wallet to transfer assets to. + wallet_to_password: The password to the wallet to transfer assets to. + address_to: The address of the wallet to transfer assets to. + amount: Amount of gas to transfer. + """ + wallet_from_path = wallet_from_path or main_chain.get_wallet_path() + wallet_from_password = ( + wallet_from_password + if wallet_from_password is not None + else main_chain.get_wallet_password() + ) + address_from = address_from or wallet_utils.get_last_address_from_wallet( + wallet_from_path, wallet_from_password + ) + address_to = address_to or wallet_utils.get_last_address_from_wallet( + wallet_to_path, wallet_to_password + ) + + neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE) + out = neogo.nep17.transfer( + rpc_endpoint=main_chain.get_endpoint(), + wallet=wallet_from_path, + wallet_password=wallet_from_password, + amount=amount, + from_address=address_from, + to_address=address_to, + token="GAS", + force=True, + ) + txid = out.stdout.strip().split("\n")[-1] + if len(txid) != 64: + raise Exception("Got no TXID after run the command") + if not transaction_accepted(main_chain, txid): + raise AssertionError(f"TX {txid} hasn't been processed") + time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) + + +@reporter.step_deco("FrostFS Deposit") +def deposit_gas( + shell: Shell, + main_chain: MainChain, + amount: int, + wallet_from_path: str, + wallet_from_password: str, +): + """ + Transferring GAS from given wallet to FrostFS contract address. + """ + # get FrostFS contract address + deposit_addr = converting_utils.contract_hash_to_address(FROSTFS_CONTRACT) + logger.info(f"FrostFS contract address: {deposit_addr}") + address_from = wallet_utils.get_last_address_from_wallet( + wallet_path=wallet_from_path, wallet_password=wallet_from_password + ) + transfer_gas( + shell=shell, + main_chain=main_chain, + amount=amount, + wallet_from_path=wallet_from_path, + wallet_from_password=wallet_from_password, + address_to=deposit_addr, + address_from=address_from, + ) + + +@reporter.step_deco("Get Mainnet Balance") +def get_mainnet_balance(main_chain: MainChain, address: str): + resp = main_chain.rpc_client.get_nep17_balances(address=address) + logger.info(f"Got getnep17balances response: {resp}") + for balance in resp["balance"]: + if balance["assethash"] == GAS_HASH: + return float(balance["amount"]) / ASSET_POWER_MAINCHAIN + return float(0) + + +@reporter.step_deco("Get Sidechain Balance") +def get_sidechain_balance(morph_chain: MorphChain, address: str): + resp = morph_chain.rpc_client.get_nep17_balances(address=address) + logger.info(f"Got getnep17balances response: {resp}") + for balance in resp["balance"]: + if balance["assethash"] == GAS_HASH: + return float(balance["amount"]) / ASSET_POWER_SIDECHAIN + return float(0) diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py new file mode 100644 index 0000000..87f929e --- /dev/null +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -0,0 +1,247 @@ +import json +import logging +import os +import re +import uuid +from datetime import datetime, timedelta +from typing import Optional + +from dateutil.parser import parse + +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC +from frostfs_testlib.resources.common import CREDENTIALS_CREATE_TIMEOUT +from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus +from frostfs_testlib.storage.cluster import Cluster +from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate +from frostfs_testlib.utils.cli_utils import _run_with_passwd + +reporter = get_reporter() +logger = logging.getLogger("NeoLogger") + + +@reporter.step_deco("Expected all objects are presented in the bucket") +def check_objects_in_bucket( + s3_client: S3ClientWrapper, + bucket: str, + expected_objects: list, + unexpected_objects: Optional[list] = None, +) -> None: + unexpected_objects = unexpected_objects or [] + bucket_objects = s3_client.list_objects(bucket) + assert len(bucket_objects) == len( + expected_objects + ), f"Expected {len(expected_objects)} objects in the bucket" + for bucket_object in expected_objects: + assert ( + bucket_object in bucket_objects + ), f"Expected object {bucket_object} in objects list {bucket_objects}" + + for bucket_object in unexpected_objects: + assert ( + bucket_object not in bucket_objects + ), f"Expected object {bucket_object} not in objects list {bucket_objects}" + + +@reporter.step_deco("Try to get object and got error") +def try_to_get_objects_and_expect_error( + s3_client: S3ClientWrapper, bucket: str, object_keys: list +) -> None: + for obj in object_keys: + try: + s3_client.get_object(bucket, obj) + raise AssertionError(f"Object {obj} found in bucket {bucket}") + except Exception as err: + assert "The specified key does not exist" in str( + err + ), f"Expected error in exception {err}" + + +@reporter.step_deco("Set versioning status to '{status}' for bucket '{bucket}'") +def set_bucket_versioning(s3_client: S3ClientWrapper, bucket: str, status: VersioningStatus): + s3_client.get_bucket_versioning_status(bucket) + s3_client.put_bucket_versioning(bucket, status=status) + bucket_status = s3_client.get_bucket_versioning_status(bucket) + assert bucket_status == status.value, f"Expected {bucket_status} status. Got {status.value}" + + +def object_key_from_file_path(full_path: str) -> str: + return os.path.basename(full_path) + + +def assert_tags( + actual_tags: list, expected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None +) -> None: + expected_tags = ( + [{"Key": key, "Value": value} for key, value in expected_tags] if expected_tags else [] + ) + unexpected_tags = ( + [{"Key": key, "Value": value} for key, value in unexpected_tags] if unexpected_tags else [] + ) + if expected_tags == []: + assert not actual_tags, f"Expected there is no tags, got {actual_tags}" + assert len(expected_tags) == len(actual_tags) + for tag in expected_tags: + assert tag in actual_tags, f"Tag {tag} must be in {actual_tags}" + for tag in unexpected_tags: + assert tag not in actual_tags, f"Tag {tag} should not be in {actual_tags}" + + +@reporter.step_deco("Expected all tags are presented in object") +def check_tags_by_object( + s3_client: S3ClientWrapper, + bucket: str, + key: str, + expected_tags: list, + unexpected_tags: Optional[list] = None, +) -> None: + actual_tags = s3_client.get_object_tagging(bucket, key) + assert_tags( + expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags + ) + + +@reporter.step_deco("Expected all tags are presented in bucket") +def check_tags_by_bucket( + s3_client: S3ClientWrapper, + bucket: str, + expected_tags: list, + unexpected_tags: Optional[list] = None, +) -> None: + actual_tags = s3_client.get_bucket_tagging(bucket) + assert_tags( + expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags + ) + + +def assert_object_lock_mode( + s3_client: S3ClientWrapper, + bucket: str, + file_name: str, + object_lock_mode: str, + retain_until_date: datetime, + legal_hold_status: str = "OFF", + retain_period: Optional[int] = None, +): + object_dict = s3_client.get_object(bucket, file_name, full_output=True) + assert ( + object_dict.get("ObjectLockMode") == object_lock_mode + ), f"Expected Object Lock Mode is {object_lock_mode}" + assert ( + object_dict.get("ObjectLockLegalHoldStatus") == legal_hold_status + ), f"Expected Object Lock Legal Hold Status is {legal_hold_status}" + object_retain_date = object_dict.get("ObjectLockRetainUntilDate") + retain_date = ( + parse(object_retain_date) if isinstance(object_retain_date, str) else object_retain_date + ) + if retain_until_date: + assert retain_date.strftime("%Y-%m-%dT%H:%M:%S") == retain_until_date.strftime( + "%Y-%m-%dT%H:%M:%S" + ), f'Expected Object Lock Retain Until Date is {str(retain_until_date.strftime("%Y-%m-%dT%H:%M:%S"))}' + elif retain_period: + last_modify_date = object_dict.get("LastModified") + last_modify = ( + parse(last_modify_date) if isinstance(last_modify_date, str) else last_modify_date + ) + assert ( + retain_date - last_modify + timedelta(seconds=1) + ).days == retain_period, f"Expected retention period is {retain_period} days" + + +def assert_s3_acl(acl_grants: list, permitted_users: str): + if permitted_users == "AllUsers": + grantees = {"AllUsers": 0, "CanonicalUser": 0} + for acl_grant in acl_grants: + if acl_grant.get("Grantee", {}).get("Type") == "Group": + uri = acl_grant.get("Grantee", {}).get("URI") + permission = acl_grant.get("Permission") + assert (uri, permission) == ( + "http://acs.amazonaws.com/groups/global/AllUsers", + "FULL_CONTROL", + ), "All Groups should have FULL_CONTROL" + grantees["AllUsers"] += 1 + if acl_grant.get("Grantee", {}).get("Type") == "CanonicalUser": + permission = acl_grant.get("Permission") + assert permission == "FULL_CONTROL", "Canonical User should have FULL_CONTROL" + grantees["CanonicalUser"] += 1 + assert grantees["AllUsers"] >= 1, "All Users should have FULL_CONTROL" + assert grantees["CanonicalUser"] >= 1, "Canonical User should have FULL_CONTROL" + + if permitted_users == "CanonicalUser": + for acl_grant in acl_grants: + if acl_grant.get("Grantee", {}).get("Type") == "CanonicalUser": + permission = acl_grant.get("Permission") + assert permission == "FULL_CONTROL", "Only CanonicalUser should have FULL_CONTROL" + else: + logger.error("FULL_CONTROL is given to All Users") + + +@reporter.step_deco("Init S3 Credentials") +def init_s3_credentials( + wallet_path: str, + cluster: Cluster, + s3_bearer_rules_file: str, + policy: Optional[dict] = None, +): + bucket = str(uuid.uuid4()) + + s3gate_node = cluster.services(S3Gate)[0] + gate_public_key = s3gate_node.get_wallet_public_key() + cmd = ( + f"{FROSTFS_AUTHMATE_EXEC} --debug --with-log --timeout {CREDENTIALS_CREATE_TIMEOUT} " + f"issue-secret --wallet {wallet_path} --gate-public-key={gate_public_key} " + f"--peer {cluster.default_rpc_endpoint} --container-friendly-name {bucket} " + f"--bearer-rules {s3_bearer_rules_file}" + ) + if policy: + cmd += f" --container-policy {policy}'" + logger.info(f"Executing command: {cmd}") + + try: + output = _run_with_passwd(cmd) + logger.info(f"Command completed with output: {output}") + + # output contains some debug info and then several JSON structures, so we find each + # JSON structure by curly brackets (naive approach, but works while JSON is not nested) + # and then we take JSON containing secret_access_key + json_blocks = re.findall(r"\{.*?\}", output, re.DOTALL) + for json_block in json_blocks: + try: + parsed_json_block = json.loads(json_block) + if "secret_access_key" in parsed_json_block: + return ( + parsed_json_block["container_id"], + parsed_json_block["access_key_id"], + parsed_json_block["secret_access_key"], + ) + except json.JSONDecodeError: + raise AssertionError(f"Could not parse info from output\n{output}") + raise AssertionError(f"Could not find AWS credentials in output:\n{output}") + + except Exception as exc: + raise RuntimeError(f"Failed to init s3 credentials because of error\n{exc}") from exc + + +@reporter.step_deco("Delete bucket with all objects") +def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str): + versioning_status = s3_client.get_bucket_versioning_status(bucket) + if versioning_status == VersioningStatus.ENABLED.value: + # From versioned bucket we should delete all versions and delete markers of all objects + objects_versions = s3_client.list_objects_versions(bucket) + if objects_versions: + s3_client.delete_object_versions_without_dm(bucket, objects_versions) + objects_delete_markers = s3_client.list_delete_markers(bucket) + if objects_delete_markers: + s3_client.delete_object_versions_without_dm(bucket, objects_delete_markers) + + else: + # From non-versioned bucket it's sufficient to delete objects by key + objects = s3_client.list_objects(bucket) + if objects: + s3_client.delete_objects(bucket, objects) + objects_delete_markers = s3_client.list_delete_markers(bucket) + if objects_delete_markers: + s3_client.delete_object_versions_without_dm(bucket, objects_delete_markers) + + # Delete the bucket itself + s3_client.delete_bucket(bucket) diff --git a/src/frostfs_testlib/steps/session_token.py b/src/frostfs_testlib/steps/session_token.py new file mode 100644 index 0000000..14e25f1 --- /dev/null +++ b/src/frostfs_testlib/steps/session_token.py @@ -0,0 +1,287 @@ +import base64 +import json +import logging +import os +import uuid +from dataclasses import dataclass +from enum import Enum +from typing import Any, Optional + +from frostfs_testlib.cli import FrostfsCli +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC +from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG +from frostfs_testlib.shell import Shell +from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +from frostfs_testlib.utils import json_utils, wallet_utils + +reporter = get_reporter() +logger = logging.getLogger("NeoLogger") + +UNRELATED_KEY = "unrelated key in the session" +UNRELATED_OBJECT = "unrelated object in the session" +UNRELATED_CONTAINER = "unrelated container in the session" +WRONG_VERB = "wrong verb of the session" +INVALID_SIGNATURE = "invalid signature of the session data" + + +class ObjectVerb(Enum): + PUT = "PUT" + DELETE = "DELETE" + GET = "GET" + RANGEHASH = "RANGEHASH" + RANGE = "RANGE" + HEAD = "HEAD" + SEARCH = "SEARCH" + + +class ContainerVerb(Enum): + CREATE = "PUT" + DELETE = "DELETE" + SETEACL = "SETEACL" + + +@dataclass +class Lifetime: + exp: int = 100000000 + nbf: int = 0 + iat: int = 0 + + +@reporter.step_deco("Generate Session Token") +def generate_session_token( + owner_wallet: WalletInfo, + session_wallet: WalletInfo, + session: dict[str, dict[str, Any]], + tokens_dir: str, + lifetime: Optional[Lifetime] = None, +) -> str: + """ + This function generates session token and writes it to the file. + Args: + owner_wallet: wallet of container owner + session_wallet: wallet to which we grant the access via session token + session: Contains allowed operation with parameters + tokens_dir: Dir for token + lifetime: lifetime options for session + Returns: + The path to the generated session token file + """ + + file_path = os.path.join(tokens_dir, str(uuid.uuid4())) + + pub_key_64 = wallet_utils.get_wallet_public_key( + session_wallet.path, session_wallet.password, "base64" + ) + + lifetime = lifetime or Lifetime() + + session_token = { + "body": { + "id": f"{base64.b64encode(uuid.uuid4().bytes).decode('utf-8')}", + "ownerID": {"value": f"{json_utils.encode_for_json(owner_wallet.get_address())}"}, + "lifetime": { + "exp": f"{lifetime.exp}", + "nbf": f"{lifetime.nbf}", + "iat": f"{lifetime.iat}", + }, + "sessionKey": pub_key_64, + } + } + session_token["body"].update(session) + + logger.info(f"Got this Session Token: {session_token}") + with open(file_path, "w", encoding="utf-8") as session_token_file: + json.dump(session_token, session_token_file, ensure_ascii=False, indent=4) + + return file_path + + +@reporter.step_deco("Generate Session Token For Container") +def generate_container_session_token( + owner_wallet: WalletInfo, + session_wallet: WalletInfo, + verb: ContainerVerb, + tokens_dir: str, + lifetime: Optional[Lifetime] = None, + cid: Optional[str] = None, +) -> str: + """ + This function generates session token for ContainerSessionContext + and writes it to the file. It is able to prepare session token file + for a specific container () or for every container (adds + "wildcard" field). + Args: + owner_wallet: wallet of container owner. + session_wallet: wallet to which we grant the access via session token. + verb: verb to grant access to. + lifetime: lifetime options for session. + cid: container ID of the container + Returns: + The path to the generated session token file + """ + session = { + "container": { + "verb": verb.value, + "wildcard": cid is None, + **( + {"containerID": {"value": f"{json_utils.encode_for_json(cid)}"}} + if cid is not None + else {} + ), + }, + } + + return generate_session_token( + owner_wallet=owner_wallet, + session_wallet=session_wallet, + session=session, + tokens_dir=tokens_dir, + lifetime=lifetime, + ) + + +@reporter.step_deco("Generate Session Token For Object") +def generate_object_session_token( + owner_wallet: WalletInfo, + session_wallet: WalletInfo, + oids: list[str], + cid: str, + verb: ObjectVerb, + tokens_dir: str, + lifetime: Optional[Lifetime] = None, +) -> str: + """ + This function generates session token for ObjectSessionContext + and writes it to the file. + Args: + owner_wallet: wallet of container owner + session_wallet: wallet to which we grant the access via session token + cid: container ID of the container + oids: list of objectIDs to put into session + verb: verb to grant access to; Valid verbs are: ObjectVerb. + lifetime: lifetime options for session + Returns: + The path to the generated session token file + """ + session = { + "object": { + "verb": verb.value, + "target": { + "container": {"value": json_utils.encode_for_json(cid)}, + "objects": [{"value": json_utils.encode_for_json(oid)} for oid in oids], + }, + }, + } + + return generate_session_token( + owner_wallet=owner_wallet, + session_wallet=session_wallet, + session=session, + tokens_dir=tokens_dir, + lifetime=lifetime, + ) + + +@reporter.step_deco("Get signed token for container session") +def get_container_signed_token( + owner_wallet: WalletInfo, + user_wallet: WalletInfo, + verb: ContainerVerb, + shell: Shell, + tokens_dir: str, + lifetime: Optional[Lifetime] = None, +) -> str: + """ + Returns signed token file path for static container session + """ + session_token_file = generate_container_session_token( + owner_wallet=owner_wallet, + session_wallet=user_wallet, + verb=verb, + tokens_dir=tokens_dir, + lifetime=lifetime, + ) + return sign_session_token(shell, session_token_file, owner_wallet) + + +@reporter.step_deco("Get signed token for object session") +def get_object_signed_token( + owner_wallet: WalletInfo, + user_wallet: WalletInfo, + cid: str, + storage_objects: list[StorageObjectInfo], + verb: ObjectVerb, + shell: Shell, + tokens_dir: str, + lifetime: Optional[Lifetime] = None, +) -> str: + """ + Returns signed token file path for static object session + """ + storage_object_ids = [storage_object.oid for storage_object in storage_objects] + session_token_file = generate_object_session_token( + owner_wallet=owner_wallet, + session_wallet=user_wallet, + oids=storage_object_ids, + cid=cid, + verb=verb, + tokens_dir=tokens_dir, + lifetime=lifetime, + ) + return sign_session_token(shell, session_token_file, owner_wallet) + + +@reporter.step_deco("Create Session Token") +def create_session_token( + shell: Shell, + owner: str, + wallet_path: str, + wallet_password: str, + rpc_endpoint: str, +) -> str: + """ + Create session token for an object. + Args: + shell: Shell instance. + owner: User that writes the token. + wallet_path: The path to wallet to which we grant the access via session token. + wallet_password: Wallet password. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + Returns: + The path to the generated session token file. + """ + session_token = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) + frostfscli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC) + frostfscli.session.create( + rpc_endpoint=rpc_endpoint, + address=owner, + wallet=wallet_path, + wallet_password=wallet_password, + out=session_token, + ) + return session_token + + +@reporter.step_deco("Sign Session Token") +def sign_session_token(shell: Shell, session_token_file: str, wlt: WalletInfo) -> str: + """ + This function signs the session token by the given wallet. + + Args: + shell: Shell instance. + session_token_file: The path to the session token file. + wlt: The path to the signing wallet. + + Returns: + The path to the signed token. + """ + signed_token_file = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) + frostfscli = FrostfsCli( + shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG + ) + frostfscli.util.sign_session_token( + wallet=wlt.path, from_file=session_token_file, to_file=signed_token_file + ) + return signed_token_file diff --git a/src/frostfs_testlib/steps/storage_object.py b/src/frostfs_testlib/steps/storage_object.py new file mode 100644 index 0000000..7776754 --- /dev/null +++ b/src/frostfs_testlib/steps/storage_object.py @@ -0,0 +1,63 @@ +import logging +from time import sleep + +import pytest + +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.error_patterns import OBJECT_ALREADY_REMOVED +from frostfs_testlib.shell import Shell +from frostfs_testlib.steps.cli.object import delete_object, get_object +from frostfs_testlib.steps.epoch import tick_epoch +from frostfs_testlib.steps.tombstone import verify_head_tombstone +from frostfs_testlib.storage.cluster import Cluster +from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo + +reporter = get_reporter() +logger = logging.getLogger("NeoLogger") + +CLEANUP_TIMEOUT = 10 + + +@reporter.step_deco("Delete Objects") +def delete_objects( + storage_objects: list[StorageObjectInfo], shell: Shell, cluster: Cluster +) -> None: + """ + Deletes given storage objects. + + Args: + storage_objects: list of objects to delete + shell: executor for cli command + """ + + with reporter.step("Delete objects"): + for storage_object in storage_objects: + storage_object.tombstone = delete_object( + storage_object.wallet_file_path, + storage_object.cid, + storage_object.oid, + shell=shell, + endpoint=cluster.default_rpc_endpoint, + ) + verify_head_tombstone( + wallet_path=storage_object.wallet_file_path, + cid=storage_object.cid, + oid_ts=storage_object.tombstone, + oid=storage_object.oid, + shell=shell, + endpoint=cluster.default_rpc_endpoint, + ) + + tick_epoch(shell, cluster) + sleep(CLEANUP_TIMEOUT) + + with reporter.step("Get objects and check errors"): + for storage_object in storage_objects: + with pytest.raises(Exception, match=OBJECT_ALREADY_REMOVED): + get_object( + storage_object.wallet_file_path, + storage_object.cid, + storage_object.oid, + shell=shell, + endpoint=cluster.default_rpc_endpoint, + ) diff --git a/src/frostfs_testlib/steps/storage_policy.py b/src/frostfs_testlib/steps/storage_policy.py new file mode 100644 index 0000000..eca25d2 --- /dev/null +++ b/src/frostfs_testlib/steps/storage_policy.py @@ -0,0 +1,173 @@ +#!/usr/bin/python3 + +""" + This module contains keywords which are used for asserting + that storage policies are respected. +""" +import logging + +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND +from frostfs_testlib.shell import Shell +from frostfs_testlib.steps.cli.object import head_object +from frostfs_testlib.steps.complex_object_actions import get_last_object +from frostfs_testlib.storage.cluster import StorageNode +from frostfs_testlib.utils import string_utils + +reporter = get_reporter() +logger = logging.getLogger("NeoLogger") + + +@reporter.step_deco("Get Object Copies") +def get_object_copies( + complexity: str, wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] +) -> int: + """ + The function performs requests to all nodes of the container and + finds out if they store a copy of the object. The procedure is + different for simple and complex object, so the function requires + a sign of object complexity. + Args: + complexity (str): the tag of object size and complexity, + [Simple|Complex] + wallet (str): the path to the wallet on whose behalf the + copies are got + cid (str): ID of the container + oid (str): ID of the Object + shell: executor for cli command + Returns: + (int): the number of object copies in the container + """ + return ( + get_simple_object_copies(wallet, cid, oid, shell, nodes) + if complexity == "Simple" + else get_complex_object_copies(wallet, cid, oid, shell, nodes) + ) + + +@reporter.step_deco("Get Simple Object Copies") +def get_simple_object_copies( + wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] +) -> int: + """ + To figure out the number of a simple object copies, only direct + HEAD requests should be made to the every node of the container. + We consider non-empty HEAD response as a stored object copy. + Args: + wallet (str): the path to the wallet on whose behalf the + copies are got + cid (str): ID of the container + oid (str): ID of the Object + shell: executor for cli command + nodes: nodes to search on + Returns: + (int): the number of object copies in the container + """ + copies = 0 + for node in nodes: + try: + response = head_object( + wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True + ) + if response: + logger.info(f"Found object {oid} on node {node}") + copies += 1 + except Exception: + logger.info(f"No {oid} object copy found on {node}, continue") + continue + return copies + + +@reporter.step_deco("Get Complex Object Copies") +def get_complex_object_copies( + wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] +) -> int: + """ + To figure out the number of a complex object copies, we firstly + need to retrieve its Last object. We consider that the number of + complex object copies is equal to the number of its last object + copies. When we have the Last object ID, the task is reduced + to getting simple object copies. + Args: + wallet (str): the path to the wallet on whose behalf the + copies are got + cid (str): ID of the container + oid (str): ID of the Object + shell: executor for cli command + Returns: + (int): the number of object copies in the container + """ + last_oid = get_last_object(wallet, cid, oid, shell, nodes) + assert last_oid, f"No Last Object for {cid}/{oid} found among all Storage Nodes" + return get_simple_object_copies(wallet, cid, last_oid, shell, nodes) + + +@reporter.step_deco("Get Nodes With Object") +def get_nodes_with_object( + cid: str, oid: str, shell: Shell, nodes: list[StorageNode] +) -> list[StorageNode]: + """ + The function returns list of nodes which store + the given object. + Args: + cid (str): ID of the container which store the object + oid (str): object ID + shell: executor for cli command + nodes: nodes to find on + Returns: + (list): nodes which store the object + """ + + nodes_list = [] + for node in nodes: + wallet = node.get_wallet_path() + wallet_config = node.get_wallet_config_path() + try: + res = head_object( + wallet, + cid, + oid, + shell=shell, + endpoint=node.get_rpc_endpoint(), + is_direct=True, + wallet_config=wallet_config, + ) + if res is not None: + logger.info(f"Found object {oid} on node {node}") + nodes_list.append(node) + except Exception: + logger.info(f"No {oid} object copy found on {node}, continue") + continue + return nodes_list + + +@reporter.step_deco("Get Nodes Without Object") +def get_nodes_without_object( + wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] +) -> list[StorageNode]: + """ + The function returns list of nodes which do not store + the given object. + Args: + wallet (str): the path to the wallet on whose behalf + we request the nodes + cid (str): ID of the container which store the object + oid (str): object ID + shell: executor for cli command + Returns: + (list): nodes which do not store the object + """ + nodes_list = [] + for node in nodes: + try: + res = head_object( + wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True + ) + if res is None: + nodes_list.append(node) + except Exception as err: + if string_utils.is_str_match_pattern(err, OBJECT_NOT_FOUND): + nodes_list.append(node) + else: + raise Exception(f"Got error {err} on head object command") from err + return nodes_list diff --git a/src/frostfs_testlib/steps/tombstone.py b/src/frostfs_testlib/steps/tombstone.py new file mode 100644 index 0000000..a46cf77 --- /dev/null +++ b/src/frostfs_testlib/steps/tombstone.py @@ -0,0 +1,41 @@ +import json +import logging + +from neo3.wallet import wallet + +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.shell import Shell +from frostfs_testlib.steps.cli.object import head_object + +reporter = get_reporter() +logger = logging.getLogger("NeoLogger") + + +@reporter.step_deco("Verify Head Tombstone") +def verify_head_tombstone( + wallet_path: str, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str +): + header = head_object(wallet_path, cid, oid_ts, shell=shell, endpoint=endpoint)["header"] + + s_oid = header["sessionToken"]["body"]["object"]["target"]["objects"] + logger.info(f"Header Session OIDs is {s_oid}") + logger.info(f"OID is {oid}") + + assert header["containerID"] == cid, "Tombstone Header CID is wrong" + + with open(wallet_path, "r") as file: + wlt_data = json.loads(file.read()) + wlt = wallet.Wallet.from_json(wlt_data, password="") + addr = wlt.accounts[0].address + + assert header["ownerID"] == addr, "Tombstone Owner ID is wrong" + assert header["objectType"] == "TOMBSTONE", "Header Type isn't Tombstone" + assert ( + header["sessionToken"]["body"]["object"]["verb"] == "DELETE" + ), "Header Session Type isn't DELETE" + assert ( + header["sessionToken"]["body"]["object"]["target"]["container"] == cid + ), "Header Session ID is wrong" + assert ( + oid in header["sessionToken"]["body"]["object"]["target"]["objects"] + ), "Header Session OID is wrong" diff --git a/src/frostfs_testlib/storage/__init__.py b/src/frostfs_testlib/storage/__init__.py new file mode 100644 index 0000000..531964c --- /dev/null +++ b/src/frostfs_testlib/storage/__init__.py @@ -0,0 +1,33 @@ +from frostfs_testlib.storage.constants import _FrostfsServicesNames +from frostfs_testlib.storage.dataclasses.frostfs_services import ( + HTTPGate, + InnerRing, + MainChain, + MorphChain, + S3Gate, + StorageNode, +) +from frostfs_testlib.storage.service_registry import ServiceRegistry + +__class_registry = ServiceRegistry() + +# Register default public services +__class_registry.register_service(_FrostfsServicesNames.STORAGE, StorageNode) +__class_registry.register_service(_FrostfsServicesNames.INNER_RING, InnerRing) +__class_registry.register_service(_FrostfsServicesNames.MORPH_CHAIN, MorphChain) +__class_registry.register_service(_FrostfsServicesNames.S3_GATE, S3Gate) +__class_registry.register_service(_FrostfsServicesNames.HTTP_GATE, HTTPGate) +# # TODO: Remove this since we are no longer have main chain +__class_registry.register_service(_FrostfsServicesNames.MAIN_CHAIN, MainChain) + + +def get_service_registry() -> ServiceRegistry: + """Returns registry with registered classes related to cluster and cluster nodes. + + ServiceClassRegistry is a singleton instance that can be configured with multiple classes that + represents service on the cluster physical node. + + Returns: + Singleton ServiceClassRegistry instance. + """ + return __class_registry diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py new file mode 100644 index 0000000..db2ea37 --- /dev/null +++ b/src/frostfs_testlib/storage/cluster.py @@ -0,0 +1,237 @@ +import random +import re + +import yaml + +from frostfs_testlib.hosting import Host, Hosting +from frostfs_testlib.hosting.config import ServiceConfig +from frostfs_testlib.storage import get_service_registry +from frostfs_testlib.storage.constants import ConfigAttributes +from frostfs_testlib.storage.dataclasses.frostfs_services import ( + HTTPGate, + InnerRing, + MorphChain, + S3Gate, + StorageNode, +) +from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass +from frostfs_testlib.storage.service_registry import ServiceRegistry + + +class ClusterNode: + """ + Represents physical node where multiple different services may be located + """ + + class_registry: ServiceRegistry + id: int + host: Host + + def __init__(self, host: Host, id: int) -> None: + self.host = host + self.id = id + self.class_registry = get_service_registry() + + @property + def host_ip(self): + return self.host.config.address + + def __eq__(self, other): + return self.host.config.address == other.host.config.address + + def __hash__(self): + return id(self.host.config.address) + + def __str__(self): + return self.host.config.address + + def __repr__(self) -> str: + return self.host.config.address + + # for backward compatibility and to not touch other codebase too much + @property + def storage_node(self) -> StorageNode: + return self.service(StorageNode) + + # for backward compatibility and to not touch other codebase too much + @property + def ir_node(self) -> InnerRing: + return self.service(InnerRing) + + # for backward compatibility and to not touch other codebase too much + @property + def morph_chain(self) -> MorphChain: + return self.service(MorphChain) + + # for backward compatibility and to not touch other codebase too much + @property + def http_gate(self) -> HTTPGate: + return self.service(HTTPGate) + + # for backward compatibility and to not touch other codebase too much + @property + def s3_gate(self) -> S3Gate: + return self.service(S3Gate) + + def service(self, service_type: type[ServiceClass]) -> ServiceClass: + """ + Get a service cluster node of specified type. + + Args: + service_type: type of the service which should be returned, + for frostfs it can be StorageNode, S3Gate, HttpGate, MorphChain and InnerRing. + + Returns: + service of service_type class. + """ + + service_entry = self.class_registry.get_entry(service_type) + service_name = service_entry["hosting_service_name"] + + pattern = f"{service_name}{self.id}" + config = self.host.get_service_config(pattern) + + return service_type( + self.id, + config.name, + self.host, + ) + + def get_list_of_services(self) -> list[str]: + return [ + config.attributes[ConfigAttributes.SERVICE_NAME] for config in self.host.config.services + ] + + +class Cluster: + """ + This class represents a Cluster object for the whole storage based on provided hosting + """ + + default_rpc_endpoint: str + default_s3_gate_endpoint: str + default_http_gate_endpoint: str + + def __init__(self, hosting: Hosting) -> None: + self._hosting = hosting + + self.class_registry = get_service_registry() + self.default_rpc_endpoint = self.services(StorageNode)[0].get_rpc_endpoint() + self.default_s3_gate_endpoint = self.services(S3Gate)[0].get_endpoint() + self.default_http_gate_endpoint = self.services(HTTPGate)[0].get_endpoint() + + @property + def hosts(self) -> list[Host]: + """ + Returns list of Hosts + """ + return self._hosting.hosts + + # for backward compatibility and to not touch other codebase too much + @property + def storage_nodes(self) -> list[StorageNode]: + return self.services(StorageNode) + + # for backward compatibility and to not touch other codebase too much + @property + def ir_nodes(self) -> list[InnerRing]: + return self.services(InnerRing) + + # for backward compatibility and to not touch other codebase too much + @property + def s3_gates(self) -> list[S3Gate]: + return self.services(S3Gate) + + @property + def http_gates(self) -> list[HTTPGate]: + return self.services(HTTPGate) + + @property + def morph_chain(self) -> list[MorphChain]: + return self.services(MorphChain) + + def services(self, service_type: type[ServiceClass]) -> list[ServiceClass]: + """ + Get all services in a cluster of specified type. + + Args: + service_type: type of the services which should be returned, + for frostfs it can be StorageNode, S3Gate, HttpGate, MorphChain and InnerRing. + + Returns: + list of services of service_type class. + """ + + service = self.class_registry.get_entry(service_type) + service_name = service["hosting_service_name"] + cls: type[NodeBase] = service["cls"] + + pattern = f"{service_name}\d*$" + configs = self.hosting.find_service_configs(pattern) + + found_nodes = [] + for config in configs: + # config.name is something like s3-gate01. Cut last digits to know service type + service_type = re.findall(".*\D", config.name)[0] + # exclude unsupported services + if service_type != service_name: + continue + + found_nodes.append( + cls( + self._get_id(config.name), + config.name, + self.hosting.get_host_by_service(config.name), + ) + ) + return found_nodes + + @property + def cluster_nodes(self) -> list[ClusterNode]: + """ + Returns list of Cluster Nodes + """ + + return [ClusterNode(host, id) for id, host in enumerate(self.hosts, start=1)] + + @property + def hosting(self) -> Hosting: + return self._hosting + + def _create_wallet_config(self, service: ServiceConfig) -> None: + wallet_path = service.attributes[ConfigAttributes.LOCAL_WALLET_CONFIG] + wallet_password = service.attributes[ConfigAttributes.WALLET_PASSWORD] + with open(wallet_path, "w") as file: + yaml.dump({"password": wallet_password}, file) + + def create_wallet_configs(self, hosting: Hosting) -> None: + configs = hosting.find_service_configs(".*") + for config in configs: + if ConfigAttributes.LOCAL_WALLET_CONFIG in config.attributes: + self._create_wallet_config(config) + + def is_local_devenv(self) -> bool: + if len(self.hosting.hosts) == 1: + host = self.hosting.hosts[0] + if host.config.address == "localhost" and host.config.plugin_name == "docker": + return True + return False + + def _get_id(self, node_name) -> int: + pattern = "\d*$" + + matches = re.search(pattern, node_name) + if not matches: + raise RuntimeError(f"Can't parse Id of the node {node_name}") + return int(matches.group()) + + def get_random_storage_rpc_endpoint(self) -> str: + return random.choice(self.get_storage_rpc_endpoints()) + + def get_storage_rpc_endpoints(self) -> list[str]: + nodes: list[StorageNode] = self.services(StorageNode) + return [node.get_rpc_endpoint() for node in nodes] + + def get_morph_endpoints(self) -> list[str]: + nodes: list[MorphChain] = self.services(MorphChain) + return [node.get_endpoint() for node in nodes] diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py new file mode 100644 index 0000000..95ea3f2 --- /dev/null +++ b/src/frostfs_testlib/storage/constants.py @@ -0,0 +1,22 @@ +class ConfigAttributes: + SERVICE_NAME = "systemd_service_name" + WALLET_PASSWORD = "wallet_password" + WALLET_PATH = "wallet_path" + WALLET_CONFIG = "wallet_config" + CONFIG_PATH = "config_path" + LOCAL_WALLET_PATH = "local_wallet_path" + LOCAL_WALLET_CONFIG = "local_config_path" + ENDPOINT_DATA_0 = "endpoint_data0" + ENDPOINT_DATA_1 = "endpoint_data1" + ENDPOINT_INTERNAL = "endpoint_internal0" + CONTROL_ENDPOINT = "control_endpoint" + UN_LOCODE = "un_locode" + + +class _FrostfsServicesNames: + STORAGE = "s" + S3_GATE = "s3-gate" + HTTP_GATE = "http-gate" + MORPH_CHAIN = "morph-chain" + INNER_RING = "ir" + MAIN_CHAIN = "main-chain" diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py new file mode 100644 index 0000000..fedda19 --- /dev/null +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -0,0 +1,207 @@ +import frostfs_testlib.resources.optionals as optionals +from frostfs_testlib.load.k6 import K6 +from frostfs_testlib.load.load_config import ( + EndpointSelectionStrategy, + K6ProcessAllocationStrategy, + LoadParams, + LoadScenario, + LoadType, +) +from frostfs_testlib.load.load_steps import init_s3_client, prepare_k6_instances +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.load_params import ( + K6_TEARDOWN_PERIOD, + LOAD_NODE_SSH_PASSWORD, + LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE, + LOAD_NODE_SSH_PRIVATE_KEY_PATH, + LOAD_NODE_SSH_USER, + LOAD_NODES, +) +from frostfs_testlib.shell.interfaces import SshCredentials +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +from frostfs_testlib.testing.test_control import run_optionally + +reporter = get_reporter() + + +class BackgroundLoadController: + k6_instances: list[K6] + k6_dir: str + load_params: LoadParams + load_nodes: list[str] + verification_params: LoadParams + nodes_under_load: list[ClusterNode] + ssh_credentials: SshCredentials + loaders_wallet: WalletInfo + endpoints: list[str] + + def __init__( + self, + k6_dir: str, + load_params: LoadParams, + loaders_wallet: WalletInfo, + nodes_under_load: list[ClusterNode], + ) -> None: + self.k6_dir = k6_dir + self.load_params = load_params + self.nodes_under_load = nodes_under_load + self.load_nodes = LOAD_NODES + self.loaders_wallet = loaders_wallet + + if load_params.endpoint_selection_strategy is None: + raise RuntimeError("endpoint_selection_strategy should not be None") + + self.endpoints = self._get_endpoints( + load_params.load_type, load_params.endpoint_selection_strategy + ) + self.verification_params = LoadParams( + clients=load_params.readers, + scenario=LoadScenario.VERIFY, + registry_file=load_params.registry_file, + verify_time=load_params.verify_time, + load_type=load_params.load_type, + load_id=load_params.load_id, + working_dir=load_params.working_dir, + endpoint_selection_strategy=load_params.endpoint_selection_strategy, + k6_process_allocation_strategy=load_params.k6_process_allocation_strategy, + ) + self.ssh_credentials = SshCredentials( + LOAD_NODE_SSH_USER, + LOAD_NODE_SSH_PASSWORD, + LOAD_NODE_SSH_PRIVATE_KEY_PATH, + LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE, + ) + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, []) + def _get_endpoints( + self, load_type: LoadType, endpoint_selection_strategy: EndpointSelectionStrategy + ): + all_endpoints = { + LoadType.gRPC: { + EndpointSelectionStrategy.ALL: list( + set( + endpoint + for node_under_load in self.nodes_under_load + for endpoint in node_under_load.service(StorageNode).get_all_rpc_endpoint() + ) + ), + EndpointSelectionStrategy.FIRST: list( + set( + node_under_load.service(StorageNode).get_rpc_endpoint() + for node_under_load in self.nodes_under_load + ) + ), + }, + # for some reason xk6 appends http protocol on its own + LoadType.S3: { + EndpointSelectionStrategy.ALL: list( + set( + endpoint.replace("http://", "") + for node_under_load in self.nodes_under_load + for endpoint in node_under_load.service(S3Gate).get_all_endpoints() + ) + ), + EndpointSelectionStrategy.FIRST: list( + set( + node_under_load.service(S3Gate).get_endpoint().replace("http://", "") + for node_under_load in self.nodes_under_load + ) + ), + }, + } + + return all_endpoints[load_type][endpoint_selection_strategy] + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step_deco("Prepare background load instances") + def prepare(self): + if self.load_params.load_type == LoadType.S3: + init_s3_client( + self.load_nodes, + self.load_params, + self.k6_dir, + self.ssh_credentials, + self.nodes_under_load, + self.loaders_wallet, + ) + + self._prepare(self.load_params) + + def _prepare(self, load_params: LoadParams): + self.k6_instances = prepare_k6_instances( + load_nodes=LOAD_NODES, + ssh_credentials=self.ssh_credentials, + k6_dir=self.k6_dir, + load_params=load_params, + endpoints=self.endpoints, + loaders_wallet=self.loaders_wallet, + ) + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step_deco("Start background load") + def start(self): + if self.load_params.preset is None: + raise RuntimeError("Preset should not be none at the moment of start") + + with reporter.step( + f"Start background load on nodes {self.nodes_under_load}: " + f"writers = {self.load_params.writers}, " + f"obj_size = {self.load_params.object_size}, " + f"load_time = {self.load_params.load_time}, " + f"prepare_json = {self.load_params.preset.pregen_json}, " + f"endpoints = {self.endpoints}" + ): + for k6_load_instance in self.k6_instances: + k6_load_instance.start() + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step_deco("Stop background load") + def stop(self): + for k6_load_instance in self.k6_instances: + k6_load_instance.stop() + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, True) + def is_running(self): + for k6_load_instance in self.k6_instances: + if not k6_load_instance.is_running: + return False + + return True + + def wait_until_finish(self): + if self.load_params.load_time is None: + raise RuntimeError("LoadTime should not be none") + + for k6_instance in self.k6_instances: + k6_instance.wait_until_finished(self.load_params.load_time + int(K6_TEARDOWN_PERIOD)) + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + def verify(self): + if self.verification_params.verify_time is None: + raise RuntimeError("verify_time should not be none") + + self._prepare(self.verification_params) + with reporter.step("Run verify background load data"): + for k6_verify_instance in self.k6_instances: + k6_verify_instance.start() + k6_verify_instance.wait_until_finished(self.verification_params.verify_time) + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step_deco("K6 run results") + def get_results(self) -> dict: + results = {} + for k6_instance in self.k6_instances: + if k6_instance.load_params.k6_process_allocation_strategy is None: + raise RuntimeError("k6_process_allocation_strategy should not be none") + + result = k6_instance.get_results() + keys_map = { + K6ProcessAllocationStrategy.PER_LOAD_NODE: k6_instance.load_node, + K6ProcessAllocationStrategy.PER_ENDPOINT: k6_instance.endpoints[0], + } + key = keys_map[k6_instance.load_params.k6_process_allocation_strategy] + results[key] = result + + return results diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py new file mode 100644 index 0000000..23d1a6c --- /dev/null +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -0,0 +1,130 @@ +import time + +import allure + +import frostfs_testlib.resources.optionals as optionals +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.shell import CommandOptions, Shell +from frostfs_testlib.steps import epoch +from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode +from frostfs_testlib.storage.controllers.disk_controller import DiskController +from frostfs_testlib.testing.test_control import run_optionally, wait_for_success +from frostfs_testlib.utils.failover_utils import ( + wait_all_storage_nodes_returned, + wait_for_host_offline, + wait_for_host_online, + wait_for_node_online, +) + +reporter = get_reporter() + + +class ClusterStateController: + def __init__(self, shell: Shell, cluster: Cluster) -> None: + self.stopped_nodes: list[ClusterNode] = [] + self.detached_disks: dict[str, DiskController] = {} + self.stopped_storage_nodes: list[StorageNode] = [] + self.cluster = cluster + self.shell = shell + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Stop host of node {node}") + def stop_node_host(self, node: ClusterNode, mode: str): + with allure.step(f"Stop host {node.host.config.address}"): + node.host.stop_host(mode=mode) + wait_for_host_offline(self.shell, node.storage_node) + self.stopped_nodes.append(node) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Start host of node {node}") + def start_node_host(self, node: ClusterNode): + with allure.step(f"Start host {node.host.config.address}"): + node.host.start_host() + wait_for_host_online(self.shell, node.storage_node) + wait_for_node_online(node.storage_node) + self.stopped_nodes.remove(node) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Start stopped hosts") + def start_stopped_hosts(self): + for node in self.stopped_nodes: + node.host.start_host() + self.stopped_nodes = [] + wait_all_storage_nodes_returned(self.shell, self.cluster) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Detach disk {device} at {mountpoint} on node {node}") + def detach_disk(self, node: StorageNode, device: str, mountpoint: str): + disk_controller = self._get_disk_controller(node, device, mountpoint) + self.detached_disks[disk_controller.id] = disk_controller + disk_controller.detach() + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Attach disk {device} at {mountpoint} on node {node}") + def attach_disk(self, node: StorageNode, device: str, mountpoint: str): + disk_controller = self._get_disk_controller(node, device, mountpoint) + disk_controller.attach() + self.detached_disks.pop(disk_controller.id, None) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Restore detached disks") + def restore_disks(self): + for disk_controller in self.detached_disks.values(): + disk_controller.attach() + self.detached_disks = {} + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Stop storage service on {node}") + def stop_storage_service(self, node: ClusterNode): + node.storage_node.stop_service() + self.stopped_storage_nodes.append(node.storage_node) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Start storage service on {node}") + def start_storage_service(self, node: ClusterNode): + node.storage_node.start_service() + self.stopped_storage_nodes.remove(node.storage_node) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Start stopped storage services") + def start_stopped_storage_services(self): + for node in self.stopped_storage_nodes: + node.start_service() + self.stopped_storage_nodes = [] + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Hard reboot host {node} via magic SysRq option") + def panic_reboot_host(self, node: ClusterNode): + shell = node.host.get_shell() + shell.exec('sudo sh -c "echo 1 > /proc/sys/kernel/sysrq"') + + options = CommandOptions(close_stdin=True, timeout=1, check=False) + shell.exec('sudo sh -c "echo b > /proc/sysrq-trigger"', options) + + # Let the things to be settled + # A little wait here to prevent ssh stuck during panic + time.sleep(10) + wait_for_host_online(self.shell, node.storage_node) + wait_for_node_online(node.storage_node) + + @reporter.step_deco("Wait up to {timeout} seconds for nodes on cluster to align epochs") + def wait_for_epochs_align(self, timeout=60): + @wait_for_success(timeout, 5, None, True) + def check_epochs(): + epochs_by_node = epoch.get_epochs_from_nodes(self.shell, self.cluster) + assert ( + len(set(epochs_by_node.values())) == 1 + ), f"unaligned epochs found: {epochs_by_node}" + + check_epochs() + + def _get_disk_controller( + self, node: StorageNode, device: str, mountpoint: str + ) -> DiskController: + disk_controller_id = DiskController.get_id(node, device) + if disk_controller_id in self.detached_disks.keys(): + disk_controller = self.detached_disks[disk_controller_id] + else: + disk_controller = DiskController(node, device, mountpoint) + + return disk_controller diff --git a/src/frostfs_testlib/storage/controllers/disk_controller.py b/src/frostfs_testlib/storage/controllers/disk_controller.py new file mode 100644 index 0000000..c2aa85c --- /dev/null +++ b/src/frostfs_testlib/storage/controllers/disk_controller.py @@ -0,0 +1,41 @@ +from frostfs_testlib.hosting.interfaces import DiskInfo +from frostfs_testlib.shell import CommandOptions +from frostfs_testlib.storage.cluster import StorageNode +from frostfs_testlib.testing.test_control import wait_for_success + + +class DiskController: + def __init__(self, node: StorageNode, device: str, mountpoint: str) -> None: + self.node: StorageNode = node + self.device: str = device + self.device_by_label: str + self.mountpoint: str = mountpoint.strip() + self.disk_info: DiskInfo = DiskInfo() + self.id = self.get_id(node, device) + + shell = node.host.get_shell() + cmd = f"sudo udevadm info -n {device} | egrep \"S:.*label\" | awk '{{print $2}}'" + self.device_by_label = f"/dev/{shell.exec(cmd).stdout.strip()}" + + @wait_for_success(60, 3, False) + def _wait_until_detached(self): + return self.node.host.is_disk_attached(self.device, self.disk_info) + + @wait_for_success(60, 3, True) + def _wait_until_attached(self): + return self.node.host.is_disk_attached(self.device, self.disk_info) + + def detach(self): + self.disk_info = self.node.host.detach_disk(self.device) + self._wait_until_detached() + + def attach(self): + self.node.host.attach_disk(self.device, self.disk_info) + self._wait_until_attached() + remote_shell = self.node.host.get_shell() + remote_shell.exec(f"sudo umount -l {self.device}", options=CommandOptions(check=False)) + remote_shell.exec(f"sudo mount {self.device_by_label} {self.mountpoint}") + + @staticmethod + def get_id(node: StorageNode, device: str): + return f"{node.host.config.address} - {device}" diff --git a/src/frostfs_testlib/storage/controllers/shards_watcher.py b/src/frostfs_testlib/storage/controllers/shards_watcher.py new file mode 100644 index 0000000..6607824 --- /dev/null +++ b/src/frostfs_testlib/storage/controllers/shards_watcher.py @@ -0,0 +1,118 @@ +import json +from typing import Any + +from frostfs_testlib.cli.frostfs_cli.shards import FrostfsCliShards +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.testing.test_control import wait_for_success + + +class ShardsWatcher: + shards_snapshots: list[dict[str, Any]] = [] + + def __init__(self, node_under_test: ClusterNode) -> None: + self.storage_node = node_under_test.storage_node + self.take_shards_snapshot() + + def take_shards_snapshot(self): + snapshot = self.get_shards_snapshot() + self.shards_snapshots.append(snapshot) + + def get_shards_snapshot(self): + shards_snapshot: dict[str, Any] = {} + + shards = self.get_shards() + for shard in shards: + shards_snapshot[shard["shard_id"]] = shard + + return shards_snapshot + + def _get_current_snapshot(self): + return self.shards_snapshots[-1] + + def _get_previous_snapshot(self): + return self.shards_snapshots[-2] + + def _is_shard_present(self, shard_id): + snapshot = self._get_current_snapshot() + return shard_id in snapshot + + def get_shards_with_new_errors(self): + current_snapshot = self._get_current_snapshot() + previous_snapshot = self._get_previous_snapshot() + shards_with_new_errors: dict[str, Any] = {} + for shard_id, shard in previous_snapshot.items(): + if current_snapshot[shard_id]["error_count"] > shard["error_count"]: + shards_with_new_errors[shard_id] = current_snapshot[shard_id] + + return shards_with_new_errors + + def get_shards_with_errors(self): + snapshot = self.get_shards_snapshot() + shards_with_errors: dict[str, Any] = {} + for shard_id, shard in snapshot.items(): + if shard["error_count"] > 0: + shards_with_errors[shard_id] = shard + + return shards_with_errors + + def get_shard_status(self, shard_id: str): + snapshot = self.get_shards_snapshot() + + assert shard_id in snapshot, f"Shard {shard_id} is missing: {snapshot}" + + return snapshot[shard_id]["mode"] + + @wait_for_success(60, 2) + def await_for_all_shards_status(self, status: str): + snapshot = self.get_shards_snapshot() + + for shard_id in snapshot: + assert snapshot[shard_id]["mode"] == status, f"Shard {shard_id} have wrong shard status" + + @wait_for_success(60, 2) + def await_for_shard_status(self, shard_id: str, status: str): + assert self.get_shard_status(shard_id) == status + + @wait_for_success(60, 2) + def await_for_shard_have_new_errors(self, shard_id: str): + self.take_shards_snapshot() + assert self._is_shard_present(shard_id) + shards_with_new_errors = self.get_shards_with_new_errors() + + assert ( + shard_id in shards_with_new_errors + ), f"Expected shard {shard_id} to have new errors, but haven't {self.shards_snapshots[-1]}" + + @wait_for_success(300, 5) + def await_for_shards_have_no_new_errors(self): + self.take_shards_snapshot() + shards_with_new_errors = self.get_shards_with_new_errors() + assert len(shards_with_new_errors) == 0 + + def get_shards(self) -> dict[str, Any]: + shards_cli = FrostfsCliShards( + self.storage_node.host.get_shell(), + self.storage_node.host.get_cli_config("frostfs-cli").exec_path, + ) + + response = shards_cli.list( + endpoint=self.storage_node.get_control_endpoint(), + wallet=self.storage_node.get_remote_wallet_path(), + wallet_password=self.storage_node.get_wallet_password(), + ) + + return json.loads(response.stdout.split(">", 1)[1]) + + def set_shard_mode(self, shard_id: str, mode: str, clear_errors: bool = True): + shards_cli = FrostfsCliShards( + self.storage_node.host.get_shell(), + self.storage_node.host.get_cli_config("frostfs-cli").exec_path, + ) + return shards_cli.set_mode( + self.storage_node.get_control_endpoint(), + self.storage_node.get_remote_wallet_path(), + self.storage_node.get_wallet_password(), + mode=mode, + id=[shard_id], + clear_errors=clear_errors, + ) diff --git a/src/frostfs_testlib/storage/dataclasses/__init__.py b/src/frostfs_testlib/storage/dataclasses/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/frostfs_testlib/storage/dataclasses/acl.py b/src/frostfs_testlib/storage/dataclasses/acl.py new file mode 100644 index 0000000..cceb4d8 --- /dev/null +++ b/src/frostfs_testlib/storage/dataclasses/acl.py @@ -0,0 +1,103 @@ +import logging +from dataclasses import dataclass +from enum import Enum +from typing import Any, Dict, List, Optional, Union + +from frostfs_testlib.utils import wallet_utils + +logger = logging.getLogger("NeoLogger") +EACL_LIFETIME = 100500 +FROSTFS_CONTRACT_CACHE_TIMEOUT = 30 + + +class EACLOperation(Enum): + PUT = "put" + GET = "get" + HEAD = "head" + GET_RANGE = "getrange" + GET_RANGE_HASH = "getrangehash" + SEARCH = "search" + DELETE = "delete" + + +class EACLAccess(Enum): + ALLOW = "allow" + DENY = "deny" + + +class EACLRole(Enum): + OTHERS = "others" + USER = "user" + SYSTEM = "system" + + +class EACLHeaderType(Enum): + REQUEST = "req" # Filter request headers + OBJECT = "obj" # Filter object headers + SERVICE = "SERVICE" # Filter service headers. These are not processed by FrostFS nodes and exist for service use only + + +class EACLMatchType(Enum): + STRING_EQUAL = "=" # Return true if strings are equal + STRING_NOT_EQUAL = "!=" # Return true if strings are different + + +@dataclass +class EACLFilter: + header_type: EACLHeaderType = EACLHeaderType.REQUEST + match_type: EACLMatchType = EACLMatchType.STRING_EQUAL + key: Optional[str] = None + value: Optional[str] = None + + def to_dict(self) -> Dict[str, Any]: + return { + "headerType": self.header_type, + "matchType": self.match_type, + "key": self.key, + "value": self.value, + } + + +@dataclass +class EACLFilters: + filters: Optional[List[EACLFilter]] = None + + def __str__(self): + return ",".join( + [ + f"{filter.header_type.value}:" + f"{filter.key}{filter.match_type.value}{filter.value}" + for filter in self.filters + ] + if self.filters + else [] + ) + + +@dataclass +class EACLPubKey: + keys: Optional[List[str]] = None + + +@dataclass +class EACLRule: + operation: Optional[EACLOperation] = None + access: Optional[EACLAccess] = None + role: Optional[Union[EACLRole, str]] = None + filters: Optional[EACLFilters] = None + + def to_dict(self) -> Dict[str, Any]: + return { + "Operation": self.operation, + "Access": self.access, + "Role": self.role, + "Filters": self.filters or [], + } + + def __str__(self): + role = ( + self.role.value + if isinstance(self.role, EACLRole) + else f'pubkey:{wallet_utils.get_wallet_public_key(self.role, "")}' + ) + return f'{self.access.value} {self.operation.value} {self.filters or ""} {role}' diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py new file mode 100644 index 0000000..1871aa3 --- /dev/null +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -0,0 +1,173 @@ +import yaml + +from frostfs_testlib.blockchain import RPCClient +from frostfs_testlib.storage.constants import ConfigAttributes +from frostfs_testlib.storage.dataclasses.node_base import NodeBase + + +class InnerRing(NodeBase): + """ + Class represents inner ring node in a cluster + + Inner ring node is not always the same as physical host (or physical node, if you will): + It can be service running in a container or on physical host + For testing perspective, it's not relevant how it is actually running, + since frostfs network will still treat it as "node" + """ + + def service_healthcheck(self) -> bool: + health_metric = "frostfs_node_ir_health" + output = ( + self.host.get_shell() + .exec(f"curl -s localhost:6662 | grep {health_metric} | sed 1,2d") + .stdout + ) + return health_metric in output + + def get_netmap_cleaner_threshold(self) -> str: + config_file = self.get_remote_config_path() + contents = self.host.get_shell().exec(f"cat {config_file}").stdout + + config = yaml.safe_load(contents) + value = config["netmap_cleaner"]["threshold"] + + return value + + +class S3Gate(NodeBase): + """ + Class represents S3 gateway in a cluster + """ + + def get_endpoint(self) -> str: + return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0) + + def get_all_endpoints(self) -> list[str]: + return [ + self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0), + self._get_attribute(ConfigAttributes.ENDPOINT_DATA_1), + ] + + def service_healthcheck(self) -> bool: + health_metric = "frostfs_s3_gw_state_health" + output = ( + self.host.get_shell() + .exec(f"curl -s localhost:8086 | grep {health_metric} | sed 1,2d") + .stdout + ) + return health_metric in output + + @property + def label(self) -> str: + return f"{self.name}: {self.get_endpoint()}" + + +class HTTPGate(NodeBase): + """ + Class represents HTTP gateway in a cluster + """ + + def get_endpoint(self) -> str: + return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0) + + def service_healthcheck(self) -> bool: + health_metric = "frostfs_http_gw_state_health" + output = ( + self.host.get_shell() + .exec(f"curl -s localhost:5662 | grep {health_metric} | sed 1,2d") + .stdout + ) + return health_metric in output + + @property + def label(self) -> str: + return f"{self.name}: {self.get_endpoint()}" + + +class MorphChain(NodeBase): + """ + Class represents side-chain aka morph-chain consensus node in a cluster + + Consensus node is not always the same as physical host (or physical node, if you will): + It can be service running in a container or on physical host + For testing perspective, it's not relevant how it is actually running, + since frostfs network will still treat it as "node" + """ + + rpc_client: RPCClient + + def construct(self): + self.rpc_client = RPCClient(self.get_endpoint()) + + def get_endpoint(self) -> str: + return self._get_attribute(ConfigAttributes.ENDPOINT_INTERNAL) + + def service_healthcheck(self) -> bool: + # TODO Rework in 1.3 Release when metrics for each service will be available + return True + + @property + def label(self) -> str: + return f"{self.name}: {self.get_endpoint()}" + + +class MainChain(NodeBase): + """ + Class represents main-chain consensus node in a cluster + + Consensus node is not always the same as physical host: + It can be service running in a container or on physical host (or physical node, if you will): + For testing perspective, it's not relevant how it is actually running, + since frostfs network will still treat it as "node" + """ + + rpc_client: RPCClient + + def construct(self): + self.rpc_client = RPCClient(self.get_endpoint()) + + def get_endpoint(self) -> str: + return self._get_attribute(ConfigAttributes.ENDPOINT_INTERNAL) + + @property + def label(self) -> str: + return f"{self.name}: {self.get_endpoint()}" + + +class StorageNode(NodeBase): + """ + Class represents storage node in a storage cluster + + Storage node is not always the same as physical host: + It can be service running in a container or on physical host (or physical node, if you will): + For testing perspective, it's not relevant how it is actually running, + since frostfs network will still treat it as "node" + """ + + def get_rpc_endpoint(self) -> str: + return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0) + + def get_all_rpc_endpoint(self) -> list[str]: + return [ + self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0), + self._get_attribute(ConfigAttributes.ENDPOINT_DATA_1), + ] + + def service_healthcheck(self) -> bool: + health_metric = "frostfs_node_state_health" + output = ( + self.host.get_shell() + .exec(f"curl -s localhost:6672 | grep {health_metric} | sed 1,2d") + .stdout + ) + return health_metric in output + + def get_control_endpoint(self) -> str: + return self._get_attribute(ConfigAttributes.CONTROL_ENDPOINT) + + def get_un_locode(self): + return self._get_attribute(ConfigAttributes.UN_LOCODE) + + @property + def label(self) -> str: + return f"{self.name}: {self.get_rpc_endpoint()}" diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py new file mode 100644 index 0000000..0d96775 --- /dev/null +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -0,0 +1,122 @@ +from abc import ABC, abstractmethod +from dataclasses import dataclass +from typing import Optional, TypedDict, TypeVar + +from frostfs_testlib.hosting.config import ServiceConfig +from frostfs_testlib.hosting.interfaces import Host +from frostfs_testlib.storage.constants import ConfigAttributes +from frostfs_testlib.utils import wallet_utils + + +@dataclass +class NodeBase(ABC): + """ + Represents a node of some underlying service + """ + + id: str + name: str + host: Host + + def __init__(self, id, name, host) -> None: + self.id = id + self.name = name + self.host = host + self.construct() + + def construct(self): + pass + + def __eq__(self, other): + return self.name == other.name + + def __hash__(self): + return id(self.name) + + def __str__(self): + return self.label + + def __repr__(self) -> str: + return self.label + + @property + def label(self) -> str: + return self.name + + def get_service_systemctl_name(self) -> str: + return self._get_attribute(ConfigAttributes.SERVICE_NAME) + + def start_service(self): + self.host.start_service(self.name) + + @abstractmethod + def service_healthcheck(self) -> bool: + """Service healthcheck.""" + + def stop_service(self): + self.host.stop_service(self.name) + + def restart_service(self): + self.host.restart_service(self.name) + + def get_wallet_password(self) -> str: + return self._get_attribute(ConfigAttributes.WALLET_PASSWORD) + + def get_wallet_path(self) -> str: + return self._get_attribute( + ConfigAttributes.LOCAL_WALLET_PATH, + ConfigAttributes.WALLET_PATH, + ) + + def get_remote_wallet_path(self) -> str: + """ + Returns node wallet file path located on remote host + """ + return self._get_attribute( + ConfigAttributes.WALLET_PATH, + ) + + def get_remote_config_path(self) -> str: + """ + Returns node config file path located on remote host + """ + return self._get_attribute( + ConfigAttributes.CONFIG_PATH, + ) + + def get_wallet_config_path(self): + return self._get_attribute( + ConfigAttributes.LOCAL_WALLET_CONFIG, + ConfigAttributes.WALLET_CONFIG, + ) + + def get_wallet_public_key(self): + storage_wallet_path = self.get_wallet_path() + storage_wallet_pass = self.get_wallet_password() + return wallet_utils.get_wallet_public_key(storage_wallet_path, storage_wallet_pass) + + def _get_attribute( + self, attribute_name: str, default_attribute_name: Optional[str] = None + ) -> str: + config = self.host.get_service_config(self.name) + + if attribute_name not in config.attributes: + if default_attribute_name is None: + raise RuntimeError( + f"Service {self.name} has no {attribute_name} in config and fallback attribute isn't set either" + ) + + return config.attributes[default_attribute_name] + + return config.attributes[attribute_name] + + def _get_service_config(self) -> ServiceConfig: + return self.host.get_service_config(self.name) + + +ServiceClass = TypeVar("ServiceClass", bound=NodeBase) + + +class NodeClassDict(TypedDict): + hosting_service_name: str + cls: type[NodeBase] diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py new file mode 100644 index 0000000..dd46740 --- /dev/null +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -0,0 +1,25 @@ +from dataclasses import dataclass +from typing import Optional + + +@dataclass +class ObjectRef: + cid: str + oid: str + + +@dataclass +class LockObjectInfo(ObjectRef): + lifetime: Optional[int] = None + expire_at: Optional[int] = None + + +@dataclass +class StorageObjectInfo(ObjectRef): + size: Optional[int] = None + wallet_file_path: Optional[str] = None + file_path: Optional[str] = None + file_hash: Optional[str] = None + attributes: Optional[list[dict[str, str]]] = None + tombstone: Optional[str] = None + locks: Optional[list[LockObjectInfo]] = None diff --git a/src/frostfs_testlib/storage/dataclasses/wallet.py b/src/frostfs_testlib/storage/dataclasses/wallet.py new file mode 100644 index 0000000..1d66c4b --- /dev/null +++ b/src/frostfs_testlib/storage/dataclasses/wallet.py @@ -0,0 +1,90 @@ +import json +import logging +import os +import uuid +from dataclasses import dataclass +from typing import Optional + +from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG, DEFAULT_WALLET_PASS +from frostfs_testlib.shell import Shell +from frostfs_testlib.storage.cluster import Cluster, NodeBase +from frostfs_testlib.utils.wallet_utils import get_last_address_from_wallet, init_wallet + +logger = logging.getLogger("frostfs.testlib.utils") + + +@dataclass +class WalletInfo: + path: str + password: str = DEFAULT_WALLET_PASS + config_path: str = DEFAULT_WALLET_CONFIG + + @staticmethod + def from_node(node: NodeBase): + return WalletInfo( + node.get_wallet_path(), node.get_wallet_password(), node.get_wallet_config_path() + ) + + def get_address(self) -> str: + """ + Extracts the last address from wallet via neo3 lib. + + Returns: + The address of the wallet. + """ + return get_last_address_from_wallet(self.path, self.password) + + def get_address_from_json(self, account_id: int = 0) -> str: + """ + Extracts address of the given account id from wallet using json lookup. + (Useful if neo3 fails for some reason and can't be used). + + Args: + account_id: id of the account to get address. + + Returns: + address string. + """ + with open(self.path, "r") as wallet: + wallet_json = json.load(wallet) + assert abs(account_id) + 1 <= len( + wallet_json["accounts"] + ), f"There is no index '{account_id}' in wallet: {wallet_json}" + + return wallet_json["accounts"][account_id]["address"] + + +class WalletFactory: + def __init__(self, wallets_dir: str, shell: Shell, cluster: Cluster) -> None: + self.shell = shell + self.wallets_dir = wallets_dir + self.cluster = cluster + + def create_wallet( + self, file_name: Optional[str] = None, password: Optional[str] = None + ) -> WalletInfo: + """ + Creates new default wallet. + + Args: + file_name: output wallet file name. + password: wallet password. + + Returns: + WalletInfo object of new wallet. + """ + + if file_name is None: + file_name = str(uuid.uuid4()) + if password is None: + password = "" + + base_path = os.path.join(self.wallets_dir, file_name) + wallet_path = f"{base_path}.json" + wallet_config_path = f"{base_path}.yaml" + init_wallet(wallet_path, password) + + with open(wallet_config_path, "w") as config_file: + config_file.write(f'password: "{password}"') + + return WalletInfo(wallet_path, password, wallet_config_path) diff --git a/src/frostfs_testlib/storage/service_registry.py b/src/frostfs_testlib/storage/service_registry.py new file mode 100644 index 0000000..3154dc7 --- /dev/null +++ b/src/frostfs_testlib/storage/service_registry.py @@ -0,0 +1,21 @@ +from frostfs_testlib.storage.dataclasses.node_base import NodeBase, NodeClassDict, ServiceClass + + +class ServiceRegistry: + _class_mapping: dict[str, NodeClassDict] = {} + + def get_entry(self, service_type: type[ServiceClass]) -> NodeClassDict: + key = service_type.__name__ + + if key not in self._class_mapping: + raise RuntimeError( + f"Unregistered service type requested: {key}. At this moment registered services are: {self._class_mapping.keys()}" + ) + + return self._class_mapping[key] + + def register_service(self, service_name: str, service_class: type[NodeBase]): + self._class_mapping[service_class.__name__] = { + "cls": service_class, + "hosting_service_name": service_name, + } diff --git a/src/frostfs_testlib/testing/cluster_test_base.py b/src/frostfs_testlib/testing/cluster_test_base.py new file mode 100644 index 0000000..11f67f0 --- /dev/null +++ b/src/frostfs_testlib/testing/cluster_test_base.py @@ -0,0 +1,32 @@ +from typing import Optional + +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.shell import Shell +from frostfs_testlib.steps import epoch +from frostfs_testlib.storage.cluster import Cluster +from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode + +reporter = get_reporter() + + +# To skip adding every mandatory singleton dependency to EACH test function +class ClusterTestBase: + shell: Shell + cluster: Cluster + + @reporter.step_deco("Tick {epochs_to_tick} epochs") + def tick_epochs(self, epochs_to_tick: int, alive_node: Optional[StorageNode] = None): + for _ in range(epochs_to_tick): + self.tick_epoch(alive_node) + + def tick_epoch(self, alive_node: Optional[StorageNode] = None): + epoch.tick_epoch(self.shell, self.cluster, alive_node=alive_node) + + def wait_for_epochs_align(self): + epoch.wait_for_epochs_align(self.shell, self.cluster) + + def get_epoch(self): + return epoch.get_epoch(self.shell, self.cluster) + + def ensure_fresh_epoch(self): + return epoch.ensure_fresh_epoch(self.shell, self.cluster) diff --git a/src/frostfs_testlib/testing/test_control.py b/src/frostfs_testlib/testing/test_control.py new file mode 100644 index 0000000..5621a61 --- /dev/null +++ b/src/frostfs_testlib/testing/test_control.py @@ -0,0 +1,164 @@ +import inspect +import logging +from functools import wraps +from time import sleep, time +from typing import Any + +from _pytest.outcomes import Failed +from pytest import fail + +logger = logging.getLogger("NeoLogger") + +# TODO: we may consider deprecating some methods here and use tenacity instead + + +class expect_not_raises: + """ + Decorator/Context manager check that some action, method or test does not raise exceptions + + Useful to set proper state of failed test cases in allure + + Example: + def do_stuff(): + raise Exception("Fail") + + def test_yellow(): <- this test is marked yellow (Test Defect) in allure + do_stuff() + + def test_red(): <- this test is marked red (Failed) in allure + with expect_not_raises(): + do_stuff() + + @expect_not_raises() + def test_also_red(): <- this test is also marked red (Failed) in allure + do_stuff() + """ + + def __enter__(self): + pass + + def __exit__(self, exception_type, exception_value, exception_traceback): + if exception_value: + fail(str(exception_value)) + + def __call__(self, func): + @wraps(func) + def impl(*a, **kw): + with expect_not_raises(): + func(*a, **kw) + + return impl + + +def retry(max_attempts: int, sleep_interval: int = 1, expected_result: Any = None): + """ + Decorator to wait for some conditions/functions to pass successfully. + This is useful if you don't know exact time when something should pass successfully and do not + want to use sleep(X) with too big X. + + Be careful though, wrapped function should only check the state of something, not change it. + """ + + assert max_attempts >= 1, "Cannot apply retry decorator with max_attempts < 1" + + def wrapper(func): + @wraps(func) + def impl(*a, **kw): + last_exception = None + for _ in range(max_attempts): + try: + actual_result = func(*a, **kw) + if expected_result is not None: + assert expected_result == actual_result + return actual_result + except Exception as ex: + logger.debug(ex) + last_exception = ex + sleep(sleep_interval) + except Failed as ex: + logger.debug(ex) + last_exception = ex + sleep(sleep_interval) + + # timeout exceeded with no success, raise last_exception + if last_exception is not None: + raise last_exception + + return impl + + return wrapper + + +def run_optionally(enabled: bool, mock_value: Any = True): + """ + Decorator to run something conditionally. + MUST be placed after @pytest.fixture and before @allure decorators. + + Args: + enabled: if true, decorated func will be called as usual. if false the decorated func will be skipped and mock_value will be returned. + mock_value: the value to be returned when decorated func is skipped. + """ + + def deco(func): + @wraps(func) + def func_impl(*a, **kw): + if enabled: + return func(*a, **kw) + return mock_value + + @wraps(func) + def gen_impl(*a, **kw): + if enabled: + yield from func(*a, **kw) + return + yield mock_value + + return gen_impl if inspect.isgeneratorfunction(func) else func_impl + + return deco + + +def wait_for_success( + max_wait_time: int = 60, + interval: int = 1, + expected_result: Any = None, + fail_testcase: bool = False, +): + """ + Decorator to wait for some conditions/functions to pass successfully. + This is useful if you don't know exact time when something should pass successfully and do not + want to use sleep(X) with too big X. + + Be careful though, wrapped function should only check the state of something, not change it. + """ + + def wrapper(func): + @wraps(func) + def impl(*a, **kw): + start = int(round(time())) + last_exception = None + while start + max_wait_time >= int(round(time())): + try: + actual_result = func(*a, **kw) + if expected_result is not None: + assert expected_result == actual_result + return actual_result + except Exception as ex: + logger.debug(ex) + last_exception = ex + sleep(interval) + except Failed as ex: + logger.debug(ex) + last_exception = ex + sleep(interval) + + if fail_testcase: + fail(str(last_exception)) + + # timeout exceeded with no success, raise last_exception + if last_exception is not None: + raise last_exception + + return impl + + return wrapper diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py new file mode 100644 index 0000000..7ed1a27 --- /dev/null +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -0,0 +1,135 @@ +#!/usr/bin/python3.10 + +# TODO: This file is deprecated and all code which uses these calls should be refactored to use shell classes + +""" +Helper functions to use with `frostfs-cli`, `neo-go` and other CLIs. +""" +import json +import logging +import subprocess +import sys +from contextlib import suppress +from datetime import datetime +from textwrap import shorten +from typing import TypedDict, Union + +import pexpect + +from frostfs_testlib.reporter import get_reporter + +reporter = get_reporter() +logger = logging.getLogger("NeoLogger") +COLOR_GREEN = "\033[92m" +COLOR_OFF = "\033[0m" + + +def _cmd_run(cmd: str, timeout: int = 90) -> str: + """ + Runs given shell command , in case of success returns its stdout, + in case of failure returns error message. + """ + compl_proc = None + start_time = datetime.now() + try: + logger.info(f"{COLOR_GREEN}Executing command: {cmd}{COLOR_OFF}") + start_time = datetime.utcnow() + compl_proc = subprocess.run( + cmd, + check=True, + universal_newlines=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + timeout=timeout, + shell=True, + ) + output = compl_proc.stdout + return_code = compl_proc.returncode + end_time = datetime.utcnow() + logger.info(f"{COLOR_GREEN}Output: {output}{COLOR_OFF}") + _attach_allure_log(cmd, output, return_code, start_time, end_time) + + return output + except subprocess.CalledProcessError as exc: + logger.info( + f"Command: {cmd}\n" f"Error:\nreturn code: {exc.returncode} " f"\nOutput: {exc.output}" + ) + end_time = datetime.now() + return_code, cmd_output = subprocess.getstatusoutput(cmd) + _attach_allure_log(cmd, cmd_output, return_code, start_time, end_time) + + raise RuntimeError( + f"Command: {cmd}\n" f"Error:\nreturn code: {exc.returncode}\n" f"Output: {exc.output}" + ) from exc + except OSError as exc: + raise RuntimeError(f"Command: {cmd}\n" f"Output: {exc.strerror}") from exc + except Exception as exc: + return_code, cmd_output = subprocess.getstatusoutput(cmd) + end_time = datetime.now() + _attach_allure_log(cmd, cmd_output, return_code, start_time, end_time) + logger.info( + f"Command: {cmd}\n" + f"Error:\nreturn code: {return_code}\n" + f"Output: {exc.output.decode('utf-8') if type(exc.output) is bytes else exc.output}" + ) + raise + + +def _run_with_passwd(cmd: str) -> str: + child = pexpect.spawn(cmd) + child.delaybeforesend = 1 + child.expect(".*") + child.sendline("\r") + if sys.platform == "darwin": + child.expect(pexpect.EOF) + cmd = child.before + else: + child.wait() + cmd = child.read() + return cmd.decode() + + +def _configure_aws_cli(cmd: str, key_id: str, access_key: str, out_format: str = "json") -> str: + child = pexpect.spawn(cmd) + child.delaybeforesend = 1 + + child.expect("AWS Access Key ID.*") + child.sendline(key_id) + + child.expect("AWS Secret Access Key.*") + child.sendline(access_key) + + child.expect("Default region name.*") + child.sendline("") + + child.expect("Default output format.*") + child.sendline(out_format) + + child.wait() + cmd = child.read() + # child.expect(pexpect.EOF) + # cmd = child.before + return cmd.decode() + + +def _attach_allure_log( + cmd: str, output: str, return_code: int, start_time: datetime, end_time: datetime +) -> None: + command_attachment = ( + f"COMMAND: '{cmd}'\n" + f"OUTPUT:\n {output}\n" + f"RC: {return_code}\n" + f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {end_time - start_time}" + ) + with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'): + reporter.attach(command_attachment, "Command execution") + + +def log_command_execution(cmd: str, output: Union[str, TypedDict]) -> None: + logger.info(f"{cmd}: {output}") + with suppress(Exception): + json_output = json.dumps(output, indent=4, sort_keys=True) + output = json_output + command_attachment = f"COMMAND: '{cmd}'\n" f"OUTPUT:\n {output}\n" + with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'): + reporter.attach(command_attachment, "Command execution") diff --git a/src/frostfs_testlib/utils/env_utils.py b/src/frostfs_testlib/utils/env_utils.py new file mode 100644 index 0000000..6b4fb40 --- /dev/null +++ b/src/frostfs_testlib/utils/env_utils.py @@ -0,0 +1,30 @@ +import logging +import re + +from frostfs_testlib.reporter import get_reporter + +reporter = get_reporter() +logger = logging.getLogger("NeoLogger") + + +@reporter.step_deco("Read environment.properties") +def read_env_properties(file_path: str) -> dict: + with open(file_path, "r") as file: + raw_content = file.read() + + env_properties = {} + for line in raw_content.split("\n"): + m = re.match("(.*?)=(.*)", line) + if not m: + logger.warning(f"Could not parse env property from {line}") + continue + key, value = m.group(1), m.group(2) + env_properties[key] = value + return env_properties + + +@reporter.step_deco("Update data in environment.properties") +def save_env_properties(file_path: str, env_data: dict) -> None: + with open(file_path, "a+") as env_file: + for env, env_value in env_data.items(): + env_file.write(f"{env}={env_value}\n") diff --git a/src/frostfs_testlib/utils/failover_utils.py b/src/frostfs_testlib/utils/failover_utils.py new file mode 100644 index 0000000..3910662 --- /dev/null +++ b/src/frostfs_testlib/utils/failover_utils.py @@ -0,0 +1,256 @@ +import logging +from dataclasses import dataclass +from time import sleep +from typing import Optional + +from frostfs_testlib.hosting import Host +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.common import SERVICE_MAX_STARTUP_TIME +from frostfs_testlib.shell import CommandOptions, Shell +from frostfs_testlib.steps.cli.object import neo_go_dump_keys +from frostfs_testlib.steps.node_management import storage_node_healthcheck +from frostfs_testlib.steps.storage_policy import get_nodes_with_object +from frostfs_testlib.storage.cluster import Cluster, ClusterNode, NodeBase, StorageNode +from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain +from frostfs_testlib.testing.test_control import retry, wait_for_success +from frostfs_testlib.utils.datetime_utils import parse_time + +reporter = get_reporter() + +logger = logging.getLogger("NeoLogger") + + +@reporter.step_deco("Ping node") +def ping_host(shell: Shell, host: Host): + options = CommandOptions(check=False) + return shell.exec(f"ping {host.config.address} -c 1", options).return_code + + +@reporter.step_deco("Wait for storage nodes returned to cluster") +def wait_all_storage_nodes_returned(shell: Shell, cluster: Cluster) -> None: + with reporter.step("Run health check for all storage nodes"): + for node in cluster.services(StorageNode): + wait_for_host_online(shell, node) + wait_for_node_online(node) + + +@retry(max_attempts=60, sleep_interval=5, expected_result=0) +@reporter.step_deco("Waiting for host of {node} to go online") +def wait_for_host_online(shell: Shell, node: StorageNode): + try: + # TODO: Quick solution for now, should be replaced by lib interactions + return ping_host(shell, node.host) + except Exception as err: + logger.warning(f"Host ping fails with error {err}") + return 1 + + +@retry(max_attempts=60, sleep_interval=5, expected_result=1) +@reporter.step_deco("Waiting for host of {node} to go offline") +def wait_for_host_offline(shell: Shell, node: StorageNode): + try: + # TODO: Quick solution for now, should be replaced by lib interactions + return ping_host(shell, node.host) + except Exception as err: + logger.warning(f"Host ping fails with error {err}") + return 0 + + +@retry(max_attempts=10, sleep_interval=15, expected_result=True) +@reporter.step_deco("Waiting for node {node} to go online") +def wait_for_node_online(node: StorageNode): + try: + health_check = storage_node_healthcheck(node) + except Exception as err: + logger.warning(f"Node healthcheck fails with error {err}") + return False + + return health_check.health_status == "READY" and health_check.network_status == "ONLINE" + + +@reporter.step_deco("Check and return status of given service") +def service_status(service: str, shell: Shell) -> str: + return shell.exec(f"sudo systemctl is-active {service}").stdout.rstrip() + + +@dataclass +class TopCommand: + """ + This class using `from_stdout` helps to parse result from `top command`, could return result only for one PID + pid: Process PID + output: stdout result from TOP command + """ + + pid: Optional[str] = None + user: Optional[str] = None + pr: Optional[str] = None + ni: Optional[str] = None + virt: Optional[str] = None + res: Optional[str] = None + shr: Optional[str] = None + status: Optional[str] = None + cpu_percent: Optional[str] = None + mem_percent: Optional[str] = None + time: Optional[str] = None + cmd: Optional[str] = None + STATUS_RUNNING = "R" + STATUS_SLEEP = "S" + STATUS_ZOMBIE = "Z" + STATUS_UNSLEEP = "D" + STATUS_TRACED = "T" + + @staticmethod + def from_stdout(output: str, requested_pid: int) -> "TopCommand": + list_var = [None for i in range(12)] + for line in output.split("\n"): + if str(requested_pid) in line: + list_var = line.split() + return TopCommand( + pid=list_var[0], + user=list_var[1], + pr=list_var[2], + ni=list_var[3], + virt=list_var[4], + res=list_var[5], + shr=list_var[6], + status=list_var[7], + cpu_percent=list_var[8], + mem_percent=list_var[9], + time=list_var[10], + cmd=list_var[11], + ) + + +@reporter.step_deco("Run `top` command with specified PID") +def service_status_top(service: str, shell: Shell) -> TopCommand: + pid = service_pid(service, shell) + output = shell.exec(f"sudo top -b -n 1 -p {pid}").stdout + return TopCommand.from_stdout(output, pid) + + +@reporter.step_deco("Restart service n times with sleep") +def multiple_restart( + service_type: type[NodeBase], + node: ClusterNode, + count: int = 5, + sleep_interval: int = 2, +): + service_systemctl_name = node.service(service_type).get_service_systemctl_name() + service_name = node.service(service_type).name + for _ in range(count): + node.host.restart_service(service_name) + logger.info( + f"Restart {service_systemctl_name}; sleep {sleep_interval} seconds and continue" + ) + sleep(sleep_interval) + + +@reporter.step_deco("Get status of list of services and check expected status") +@wait_for_success(60, 5) +def check_services_status(service_list: list[str], expected_status: str, shell: Shell): + cmd = "" + for service in service_list: + cmd += f' sudo systemctl status {service} --lines=0 | grep "Active:";' + result = shell.exec(cmd).stdout.rstrip() + statuses = list() + for line in result.split("\n"): + status_substring = line.split() + statuses.append(status_substring[1]) + unique_statuses = list(set(statuses)) + assert ( + len(unique_statuses) == 1 and expected_status in unique_statuses + ), f"Requested status={expected_status} not found in requested services={service_list}, list of statuses={result}" + + +@reporter.step_deco("Wait for active status of passed service") +@wait_for_success(60, 5) +def wait_service_in_desired_state( + service: str, shell: Shell, expected_status: Optional[str] = "active" +): + real_status = service_status(service=service, shell=shell) + assert ( + expected_status == real_status + ), f"Service {service}: expected status= {expected_status}, real status {real_status}" + + +@reporter.step_deco("Run healthcheck against passed service") +@wait_for_success(parse_time(SERVICE_MAX_STARTUP_TIME), 1) +def service_type_healthcheck( + service_type: type[NodeBase], + node: ClusterNode, +): + service = node.service(service_type) + assert ( + service.service_healthcheck() + ), f"Healthcheck failed for {service.get_service_systemctl_name()}, IP={node.host_ip}" + + +@reporter.step_deco("Kill by process name") +def kill_by_service_name(service_type: type[NodeBase], node: ClusterNode): + service_systemctl_name = node.service(service_type).get_service_systemctl_name() + pid = service_pid(service_systemctl_name, node.host.get_shell()) + node.host.get_shell().exec(f"sudo kill -9 {pid}") + + +@reporter.step_deco("Service {service} suspend") +def suspend_service(shell: Shell, service: str): + shell.exec(f"sudo kill -STOP {service_pid(service, shell)}") + + +@reporter.step_deco("Service {service} resume") +def resume_service(shell: Shell, service: str): + shell.exec(f"sudo kill -CONT {service_pid(service, shell)}") + + +@reporter.step_deco("Retrieve service's pid") +# retry mechanism cause when the task has been started recently '0' PID could be returned +@wait_for_success(10, 1) +def service_pid(service: str, shell: Shell) -> int: + output = shell.exec(f"systemctl show --property MainPID {service}").stdout.rstrip() + splitted = output.split("=") + PID = int(splitted[1]) + assert PID > 0, f"Service {service} has invalid PID={PID}" + return PID + + +@reporter.step_deco("Wrapper for neo-go dump keys command") +def dump_keys(shell: Shell, node: ClusterNode) -> dict: + host = node.host + service_config = host.get_service_config(node.service(MorphChain).name) + wallet = service_config.attributes["wallet_path"] + return neo_go_dump_keys(shell=shell, wallet=wallet) + + +@reporter.step_deco("Wait for object replication") +def wait_object_replication( + cid: str, + oid: str, + expected_copies: int, + shell: Shell, + nodes: list[StorageNode], + sleep_interval: int = 15, + attempts: int = 20, +) -> list[StorageNode]: + nodes_with_object = [] + for _ in range(attempts): + nodes_with_object = get_nodes_with_object(cid, oid, shell=shell, nodes=nodes) + if len(nodes_with_object) >= expected_copies: + return nodes_with_object + sleep(sleep_interval) + raise AssertionError( + f"Expected {expected_copies} copies of object, but found {len(nodes_with_object)}. " + f"Waiting time {sleep_interval * attempts}" + ) + + +def is_all_storage_nodes_returned(cluster: Cluster) -> bool: + with reporter.step("Run health check for all storage nodes"): + for node in cluster.services(StorageNode): + try: + health_check = storage_node_healthcheck(node) + except Exception as err: + logger.warning(f"Node healthcheck fails with error {err}") + return False + if health_check.health_status != "READY" or health_check.network_status != "ONLINE": + return False + return True diff --git a/src/frostfs_testlib/utils/file_utils.py b/src/frostfs_testlib/utils/file_utils.py new file mode 100644 index 0000000..a41665e --- /dev/null +++ b/src/frostfs_testlib/utils/file_utils.py @@ -0,0 +1,168 @@ +import hashlib +import logging +import os +import uuid +from typing import Any, Optional + +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.common import ASSETS_DIR + +reporter = get_reporter() +logger = logging.getLogger("NeoLogger") + + +def generate_file(size: int) -> str: + """Generates a binary file with the specified size in bytes. + + Args: + size: Size in bytes, can be declared as 6e+6 for example. + + Returns: + The path to the generated file. + """ + file_path = os.path.join(ASSETS_DIR, str(uuid.uuid4())) + with open(file_path, "wb") as file: + file.write(os.urandom(size)) + logger.info(f"File with size {size} bytes has been generated: {file_path}") + + return file_path + + +def generate_file_with_content( + size: int, + file_path: Optional[str] = None, + content: Optional[str] = None, +) -> str: + """Creates a new file with specified content. + + Args: + file_path: Path to the file that should be created. If not specified, then random file + path will be generated. + content: Content that should be stored in the file. If not specified, then random binary + content will be generated. + + Returns: + Path to the generated file. + """ + mode = "w+" + if content is None: + content = os.urandom(size) + mode = "wb" + + if not file_path: + file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) + else: + if not os.path.exists(os.path.dirname(file_path)): + os.makedirs(os.path.dirname(file_path)) + + with open(file_path, mode) as file: + file.write(content) + + return file_path + + +@reporter.step_deco("Get File Hash") +def get_file_hash(file_path: str, len: Optional[int] = None, offset: Optional[int] = None) -> str: + """Generates hash for the specified file. + + Args: + file_path: Path to the file to generate hash for. + len: How many bytes to read. + offset: Position to start reading from. + + Returns: + Hash of the file as hex-encoded string. + """ + file_hash = hashlib.sha256() + with open(file_path, "rb") as out: + if len and not offset: + file_hash.update(out.read(len)) + elif len and offset: + out.seek(offset, 0) + file_hash.update(out.read(len)) + elif offset and not len: + out.seek(offset, 0) + file_hash.update(out.read()) + else: + file_hash.update(out.read()) + return file_hash.hexdigest() + + +@reporter.step_deco("Concatenation set of files to one file") +def concat_files(file_paths: list, resulting_file_path: Optional[str] = None) -> str: + """Concatenates several files into a single file. + + Args: + file_paths: Paths to the files to concatenate. + resulting_file_path: Path to the file where concatenated content should be stored. + + Returns: + Path to the resulting file. + """ + if not resulting_file_path: + resulting_file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) + with open(resulting_file_path, "wb") as f: + for file in file_paths: + with open(file, "rb") as part_file: + f.write(part_file.read()) + return resulting_file_path + + +def split_file(file_path: str, parts: int) -> list[str]: + """Splits specified file into several specified number of parts. + + Each part is saved under name `{original_file}_part_{i}`. + + Args: + file_path: Path to the file that should be split. + parts: Number of parts the file should be split into. + + Returns: + Paths to the part files. + """ + with open(file_path, "rb") as file: + content = file.read() + + content_size = len(content) + chunk_size = int((content_size + parts) / parts) + + part_id = 1 + part_file_paths = [] + for content_offset in range(0, content_size + 1, chunk_size): + part_file_name = f"{file_path}_part_{part_id}" + part_file_paths.append(part_file_name) + with open(part_file_name, "wb") as out_file: + out_file.write(content[content_offset : content_offset + chunk_size]) + part_id += 1 + + return part_file_paths + + +def get_file_content( + file_path: str, content_len: Optional[int] = None, mode: str = "r", offset: Optional[int] = None +) -> Any: + """Returns content of specified file. + + Args: + file_path: Path to the file. + content_len: Limit of content length. If None, then entire file content is returned; + otherwise only the first content_len bytes of the content are returned. + mode: Mode of opening the file. + offset: Position to start reading from. + + Returns: + Content of the specified file. + """ + with open(file_path, mode) as file: + if content_len and not offset: + content = file.read(content_len) + elif content_len and offset: + file.seek(offset, 0) + content = file.read(content_len) + elif offset and not content_len: + file.seek(offset, 0) + content = file.read() + else: + content = file.read() + + return content diff --git a/src/frostfs_testlib/utils/version_utils.py b/src/frostfs_testlib/utils/version_utils.py new file mode 100644 index 0000000..68f8578 --- /dev/null +++ b/src/frostfs_testlib/utils/version_utils.py @@ -0,0 +1,79 @@ +import logging +import re + +from frostfs_testlib.cli import FrostfsAdm, FrostfsCli +from frostfs_testlib.hosting import Hosting +from frostfs_testlib.resources.cli import ( + FROSTFS_ADM_EXEC, + FROSTFS_AUTHMATE_EXEC, + FROSTFS_CLI_EXEC, + NEOGO_EXECUTABLE, +) +from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG +from frostfs_testlib.shell import Shell + +logger = logging.getLogger("NeoLogger") + + +def get_local_binaries_versions(shell: Shell) -> dict[str, str]: + versions = {} + + for binary in [NEOGO_EXECUTABLE, FROSTFS_AUTHMATE_EXEC]: + out = shell.exec(f"{binary} --version").stdout + versions[binary] = _parse_version(out) + + frostfs_cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) + versions[FROSTFS_CLI_EXEC] = _parse_version(frostfs_cli.version.get().stdout) + + try: + frostfs_adm = FrostfsAdm(shell, FROSTFS_ADM_EXEC) + versions[FROSTFS_ADM_EXEC] = _parse_version(frostfs_adm.version.get().stdout) + except RuntimeError: + logger.info(f"{FROSTFS_ADM_EXEC} not installed") + + out = shell.exec("aws --version").stdout + out_lines = out.split("\n") + versions["AWS"] = out_lines[0] if out_lines else "Unknown" + + return versions + + +def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]: + versions_by_host = {} + for host in hosting.hosts: + binary_path_by_name = {} # Maps binary name to executable path + for service_config in host.config.services: + exec_path = service_config.attributes.get("exec_path") + if exec_path: + binary_path_by_name[service_config.name] = exec_path + for cli_config in host.config.clis: + binary_path_by_name[cli_config.name] = cli_config.exec_path + + shell = host.get_shell() + versions_at_host = {} + for binary_name, binary_path in binary_path_by_name.items(): + try: + result = shell.exec(f"{binary_path} --version") + versions_at_host[binary_name] = _parse_version(result.stdout) + except Exception as exc: + logger.error(f"Cannot get version for {binary_path} because of\n{exc}") + versions_at_host[binary_name] = "Unknown" + versions_by_host[host.config.address] = versions_at_host + + # Consolidate versions across all hosts + versions = {} + for host, binary_versions in versions_by_host.items(): + for name, version in binary_versions.items(): + captured_version = versions.get(name) + if captured_version: + assert ( + captured_version == version + ), f"Binary {name} has inconsistent version on host {host}" + else: + versions[name] = version + return versions + + +def _parse_version(version_output: str) -> str: + version = re.search(r"version[:\s]*v?(.+)", version_output, re.IGNORECASE) + return version.group(1).strip() if version else "Unknown" From 863a1075cd29887e09b1c8f8870a8239b69a9a1c Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 15 May 2023 14:23:40 +0300 Subject: [PATCH 076/363] Various fixes Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/load_report.py | 6 +++--- src/frostfs_testlib/processes/remote_process.py | 8 ++++++-- src/frostfs_testlib/storage/cluster.py | 2 +- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py index 500a6e6..2771df5 100644 --- a/src/frostfs_testlib/load/load_report.py +++ b/src/frostfs_testlib/load/load_report.py @@ -141,13 +141,13 @@ class LoadReport: html = f""" - - + + {self._row("Total operations", total_operations)} {self._row("OP/sec", f"{total_rate:.2f}")} {throughput_html} - + {per_node_errors_html} {self._row("Total", f"{total_errors} ({total_errors/total_operations*100.0:.2f}%)")}
{short_summary}Metrics
{short_summary}
Metrics
Errors
Errors


diff --git a/src/frostfs_testlib/processes/remote_process.py b/src/frostfs_testlib/processes/remote_process.py index c5b40bc..7f49000 100644 --- a/src/frostfs_testlib/processes/remote_process.py +++ b/src/frostfs_testlib/processes/remote_process.py @@ -72,7 +72,9 @@ class RemoteProcess: if self.saved_stdout is not None: cur_stdout = self.saved_stdout else: - terminal = self.shell.exec(f"cat {self.process_dir}/stdout") + terminal = self.shell.exec( + f"cat {self.process_dir}/stdout", options=CommandOptions(no_log=True) + ) if self.proc_rc is not None: self.saved_stdout = terminal.stdout cur_stdout = terminal.stdout @@ -101,7 +103,9 @@ class RemoteProcess: if self.saved_stderr is not None: cur_stderr = self.saved_stderr else: - terminal = self.shell.exec(f"cat {self.process_dir}/stderr") + terminal = self.shell.exec( + f"cat {self.process_dir}/stderr", options=CommandOptions(no_log=True) + ) if self.proc_rc is not None: self.saved_stderr = terminal.stdout cur_stderr = terminal.stdout diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index db2ea37..ffca778 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -88,7 +88,7 @@ class ClusterNode: service_entry = self.class_registry.get_entry(service_type) service_name = service_entry["hosting_service_name"] - pattern = f"{service_name}{self.id}" + pattern = f"{service_name}{self.id:02}" config = self.host.get_service_config(pattern) return service_type( From c8e527e9ecbbff5bb34fc61c15edbab597d51eb1 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 15 May 2023 17:07:17 +0300 Subject: [PATCH 077/363] Correct param name for skip in querry Signed-off-by: Andrey Berezin --- src/frostfs_testlib/cli/neogo/query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/cli/neogo/query.py b/src/frostfs_testlib/cli/neogo/query.py index 6627790..1422daf 100644 --- a/src/frostfs_testlib/cli/neogo/query.py +++ b/src/frostfs_testlib/cli/neogo/query.py @@ -76,7 +76,7 @@ class NeoGoQuery(CliCommand): **{ param: param_value for param, param_value in locals().items() - if param not in ["self", "hash"] + if param not in ["self", "tx_hash"] }, ) From 70416d40c216edbe561cae42fc0656ee5356e6e8 Mon Sep 17 00:00:00 2001 From: Vladimir Avdeev Date: Thu, 18 May 2023 09:55:12 +0300 Subject: [PATCH 078/363] Add optional parameter "no wait for return" in panic_reboot_host --- .../storage/controllers/cluster_state_controller.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 23d1a6c..35072f2 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -94,18 +94,19 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Hard reboot host {node} via magic SysRq option") - def panic_reboot_host(self, node: ClusterNode): + def panic_reboot_host(self, node: ClusterNode, wait_for_return: bool = True): shell = node.host.get_shell() shell.exec('sudo sh -c "echo 1 > /proc/sys/kernel/sysrq"') options = CommandOptions(close_stdin=True, timeout=1, check=False) shell.exec('sudo sh -c "echo b > /proc/sysrq-trigger"', options) - # Let the things to be settled - # A little wait here to prevent ssh stuck during panic - time.sleep(10) - wait_for_host_online(self.shell, node.storage_node) - wait_for_node_online(node.storage_node) + if wait_for_return: + # Let the things to be settled + # A little wait here to prevent ssh stuck during panic + time.sleep(10) + wait_for_host_online(self.shell, node.storage_node) + wait_for_node_online(node.storage_node) @reporter.step_deco("Wait up to {timeout} seconds for nodes on cluster to align epochs") def wait_for_epochs_align(self, timeout=60): From 7399cc9a8efeb6a4a5ccd2d8b108ba386d2ca8de Mon Sep 17 00:00:00 2001 From: Vladimir Avdeev Date: Thu, 18 May 2023 10:18:09 +0300 Subject: [PATCH 079/363] Bump version 2.0.0 -> 2.0.1 Signed-off-by: Vladimir Avdeev --- pyproject.toml | 4 ++-- src/frostfs_testlib/__init__.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index fd5d8b7..69f8e29 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "frostfs-testlib" -version = "2.0.0" +version = "2.0.1" description = "Building blocks and utilities to facilitate development of automated tests for FrostFS system" readme = "README.md" authors = [{ name = "Yadro", email = "info@yadro.com" }] @@ -54,7 +54,7 @@ line-length = 100 target-version = ["py310"] [tool.bumpver] -current_version = "2.0.0" +current_version = "2.0.1" version_pattern = "MAJOR.MINOR.PATCH" commit_message = "Bump version {old_version} -> {new_version}" commit = true diff --git a/src/frostfs_testlib/__init__.py b/src/frostfs_testlib/__init__.py index 8c0d5d5..159d48b 100644 --- a/src/frostfs_testlib/__init__.py +++ b/src/frostfs_testlib/__init__.py @@ -1 +1 @@ -__version__ = "2.0.0" +__version__ = "2.0.1" From d30d3d5cfd6645c24f47fbfbda6427edf203e914 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Thu, 18 May 2023 14:30:25 +0300 Subject: [PATCH 080/363] make dir on prepare step Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/k6.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index b3534d4..a748f67 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -153,6 +153,9 @@ class K6: @reporter.step_deco("Start K6 on initiator") def start(self) -> None: + # Make working_dir directory + self.shell.exec(f"mkdir -p {self.load_params.working_dir}") + command = ( f"{self._k6_dir}/k6 run {self._generate_env_variables()} " f"{self._k6_dir}/scenarios/{self.scenario.value}.js" From d9504697ba78b9c9d179fee6aa83274dddade7a4 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Fri, 12 May 2023 09:20:04 +0300 Subject: [PATCH 081/363] Add interfaces for deleting fstree, blobovnocza and pilorama.db --- src/frostfs_testlib/hosting/docker_host.py | 9 ++++++ src/frostfs_testlib/hosting/interfaces.py | 30 +++++++++++++++++++ .../storage/dataclasses/frostfs_services.py | 9 ++++++ 3 files changed, 48 insertions(+) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 7b22438..ccf1b64 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -129,6 +129,15 @@ class DockerHost(Host): timeout=service_attributes.start_timeout, ) + def delete_fstree(self, service_name: str) -> None: + raise NotImplementedError("Not implemented for docker") + + def delete_blobovnicza(self, service_name: str) -> None: + raise NotImplementedError("Not implemented for docker") + + def delete_pilorama(self, service_name: str) -> None: + raise NotImplementedError("Not implemented for docker") + def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None: service_attributes = self._get_service_attributes(service_name) diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 73f4954..8a617e9 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -121,6 +121,36 @@ class Host(ABC): cache_only: To delete cache only. """ + @abstractmethod + def delete_fstree(self, service_name: str) -> None: + """ + Deletes all fstrees in the node. + + Args: + service_name: Name of storage node service. + + """ + + @abstractmethod + def delete_blobovnicza(self, service_name: str) -> None: + """ + Deletes all blobovniczas in the node. + + Args: + service_name: Name of storage node service. + + """ + + @abstractmethod + def delete_pilorama(self, service_name: str) -> None: + """ + Deletes all pilorama.db files in the node. + + Args: + service_name: Name of storage node service. + + """ + @abstractmethod def detach_disk(self, device: str) -> DiskInfo: """Detaches disk device to simulate disk offline/failover scenario. diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index 1871aa3..e87d264 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -168,6 +168,15 @@ class StorageNode(NodeBase): def get_un_locode(self): return self._get_attribute(ConfigAttributes.UN_LOCODE) + def delete_blobovnicza(self): + self.host.delete_blobovnicza(self.name) + + def delete_fstree(self): + self.host.delete_fstree(self.name) + + def delete_pilorama(self): + self.host.delete_pilorama(self.name) + @property def label(self) -> str: return f"{self.name}: {self.get_rpc_endpoint()}" From 10468fa5450adb64f3978b3be8091cd090652f70 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 19 May 2023 11:16:34 +0300 Subject: [PATCH 082/363] Use root to create dir and then transfer it to load user Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/k6.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index a748f67..d3939ae 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -13,6 +13,7 @@ from frostfs_testlib.load.load_config import ( ) from frostfs_testlib.processes.remote_process import RemoteProcess from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.load_params import LOAD_NODE_SSH_USER from frostfs_testlib.shell import Shell from frostfs_testlib.storage.dataclasses.wallet import WalletInfo @@ -154,7 +155,8 @@ class K6: @reporter.step_deco("Start K6 on initiator") def start(self) -> None: # Make working_dir directory - self.shell.exec(f"mkdir -p {self.load_params.working_dir}") + self.shell.exec(f"sudo mkdir -p {self.load_params.working_dir}") + self.shell.exec(f"sudo chown {LOAD_NODE_SSH_USER} {self.load_params.working_dir}") command = ( f"{self._k6_dir}/k6 run {self._generate_env_variables()} " From a34c34991f67b59bd99e9988a4715e22a8d087ea Mon Sep 17 00:00:00 2001 From: sstovbyra Date: Mon, 22 May 2023 23:27:42 +0300 Subject: [PATCH 083/363] Update payment_neogo.py --- src/frostfs_testlib/steps/payment_neogo.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/steps/payment_neogo.py b/src/frostfs_testlib/steps/payment_neogo.py index 07dddd2..6a64a5a 100644 --- a/src/frostfs_testlib/steps/payment_neogo.py +++ b/src/frostfs_testlib/steps/payment_neogo.py @@ -79,7 +79,8 @@ def transaction_accepted(main_chain: MainChain, tx_id: str): try: for _ in range(0, TX_PERSIST_TIMEOUT): time.sleep(1) - resp = main_chain.rpc_client.get_transaction_height(tx_id) + neogo = NeoGo(shell=main_chain.host.get_shell(), neo_go_exec_path=NEOGO_EXECUTABLE) + resp = neogo.query.tx(tx_hash=tx_id, rpc_endpoint=main_chain.get_endpoint()) if resp is not None: logger.info(f"TX is accepted in block: {resp}") return True, resp From 6cdeb497c0ab09228f9927fce729da657a18bc04 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 24 May 2023 11:15:12 +0300 Subject: [PATCH 084/363] Various updates for failover and to cases Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/k6.py | 2 +- src/frostfs_testlib/load/load_verifiers.py | 41 ++++++++++++++++---- src/frostfs_testlib/resources/load_params.py | 4 +- 3 files changed, 37 insertions(+), 10 deletions(-) diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index d3939ae..24ca447 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -239,4 +239,4 @@ class K6: def __log_output(self) -> None: reporter.attach(self._k6_process.stdout(full=True), "K6 stdout") - reporter.attach(self._k6_process.stderr(full=True), "K6 stderr") + reporter.attach(f"{self._k6_process.process_dir}/stderr", "K6 stderr path") diff --git a/src/frostfs_testlib/load/load_verifiers.py b/src/frostfs_testlib/load/load_verifiers.py index becfcf7..69e9f1f 100644 --- a/src/frostfs_testlib/load/load_verifiers.py +++ b/src/frostfs_testlib/load/load_verifiers.py @@ -11,26 +11,53 @@ class LoadVerifier: self.load_params = load_params def verify_summaries(self, load_summary, verification_summary) -> None: + exceptions = [] + if not verification_summary or not load_summary: logger.info("Can't check load results due to missing summary") load_metrics = get_metrics_object(self.load_params.scenario, load_summary) - writers = self.load_params.writers or 0 + + writers = self.load_params.writers or self.load_params.preallocated_writers or 0 + readers = self.load_params.readers or self.load_params.preallocated_readers or 0 + deleters = self.load_params.deleters or self.load_params.preallocated_deleters or 0 objects_count = load_metrics.write_success_iterations fails_count = load_metrics.write_failed_iterations if writers > 0: - assert objects_count > 0, "Total put objects should be greater than 0" - assert fails_count == 0, f"There were {fails_count} failed put objects operations" + if objects_count < 1: + exceptions.append("Total put objects should be greater than 0") + if fails_count > 0: + exceptions.append(f"There were {fails_count} failed write operations") + + if readers > 0: + read_count = load_metrics.read_success_iterations + read_fails_count = load_metrics.read_failed_iterations + if read_count < 1: + exceptions.append("Total read operations should be greater than 0") + if read_fails_count > 0: + exceptions.append(f"There were {read_fails_count} failed read operations") + + if deleters > 0: + delete_count = load_metrics.delete_success_iterations + delete_fails_count = load_metrics.delete_failed_iterations + if delete_count < 1: + exceptions.append("Total delete operations should be greater than 0") + if delete_fails_count > 0: + exceptions.append(f"There were {delete_fails_count} failed delete operations") if verification_summary: verify_metrics = get_metrics_object(LoadScenario.VERIFY, verification_summary) verified_objects = verify_metrics.read_success_iterations invalid_objects = verify_metrics.read_failed_iterations - assert invalid_objects == 0, f"There were {invalid_objects} verification fails" + if invalid_objects > 0: + exceptions.append(f"There were {invalid_objects} verification fails") # Due to interruptions we may see total verified objects to be less than written on writers count - assert ( - abs(objects_count - verified_objects) <= writers - ), f"Verified objects is less than total objects. Total: {objects_count}, Verified: {verified_objects}. Writers: {writers}." + if abs(objects_count - verified_objects) > writers: + exceptions.append( + f"Verified objects is less than total objects. Total: {objects_count}, Verified: {verified_objects}. Writers: {writers}." + ) + + assert not exceptions, "\n".join(exceptions) diff --git a/src/frostfs_testlib/resources/load_params.py b/src/frostfs_testlib/resources/load_params.py index a43d20b..64f1aa4 100644 --- a/src/frostfs_testlib/resources/load_params.py +++ b/src/frostfs_testlib/resources/load_params.py @@ -7,8 +7,8 @@ LOAD_NODE_SSH_USER = os.getenv("LOAD_NODE_SSH_USER", "service") LOAD_NODE_SSH_PASSWORD = os.getenv("LOAD_NODE_SSH_PASSWORD") LOAD_NODE_SSH_PRIVATE_KEY_PATH = os.getenv("LOAD_NODE_SSH_PRIVATE_KEY_PATH") LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE = os.getenv("LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE") -BACKGROUND_WRITERS_COUNT = os.getenv("BACKGROUND_WRITERS_COUNT", 4) -BACKGROUND_READERS_COUNT = os.getenv("BACKGROUND_READERS_COUNT", 4) +BACKGROUND_WRITERS_COUNT = os.getenv("BACKGROUND_WRITERS_COUNT", 0) +BACKGROUND_READERS_COUNT = os.getenv("BACKGROUND_READERS_COUNT", 0) BACKGROUND_DELETERS_COUNT = os.getenv("BACKGROUND_DELETERS_COUNT", 0) BACKGROUND_LOAD_DEFAULT_TIME = os.getenv("BACKGROUND_LOAD_DEFAULT_TIME", 600) BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE = os.getenv("BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE", 32) From 32a8c5274aad38c012bb2fc127b2849047e44a84 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Thu, 25 May 2023 11:59:09 +0300 Subject: [PATCH 085/363] Fix time block Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/resources/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/resources/common.py b/src/frostfs_testlib/resources/common.py index 47aa976..dfbb3a1 100644 --- a/src/frostfs_testlib/resources/common.py +++ b/src/frostfs_testlib/resources/common.py @@ -11,7 +11,7 @@ COMPLEX_OBJECT_TAIL_SIZE = os.getenv("COMPLEX_OBJECT_TAIL_SIZE", "1000") SERVICE_MAX_STARTUP_TIME = os.getenv("SERVICE_MAX_STARTUP_TIME", "5m") MORPH_TIMEOUT = os.getenv("MORPH_BLOCK_TIME", "8s") -MORPH_BLOCK_TIME = os.getenv("MORPH_BLOCK_TIME", "1s") +MORPH_BLOCK_TIME = os.getenv("MORPH_BLOCK_TIME", "8s") FROSTFS_CONTRACT_CACHE_TIMEOUT = os.getenv("FROSTFS_CONTRACT_CACHE_TIMEOUT", "30s") # Time interval that allows a GC pass on storage node (this includes GC sleep interval From 123b5425a86ea7424cefb13ddeadf4727749e969 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 24 May 2023 20:29:10 +0300 Subject: [PATCH 086/363] 1. Increase wait time for k6 teardown after stop signal 2. Remove duplicated code Signed-off-by: Andrey Berezin --- .../controllers/background_load_controller.py | 207 ------------------ .../controllers/cluster_state_controller.py | 130 ----------- src/frostfs_testlib/load/k6.py | 44 ++-- src/frostfs_testlib/load/load_config.py | 5 +- src/frostfs_testlib/resources/load_params.py | 3 + .../storage/controllers/__init__.py | 4 + .../controllers/background_load_controller.py | 12 +- src/frostfs_testlib/testing/test_control.py | 3 +- src/frostfs_testlib/utils/datetime_utils.py | 3 + 9 files changed, 40 insertions(+), 371 deletions(-) delete mode 100644 src/frostfs_testlib/controllers/background_load_controller.py delete mode 100644 src/frostfs_testlib/controllers/cluster_state_controller.py create mode 100644 src/frostfs_testlib/storage/controllers/__init__.py diff --git a/src/frostfs_testlib/controllers/background_load_controller.py b/src/frostfs_testlib/controllers/background_load_controller.py deleted file mode 100644 index 4a97c29..0000000 --- a/src/frostfs_testlib/controllers/background_load_controller.py +++ /dev/null @@ -1,207 +0,0 @@ -import frostfs_testlib.resources.optionals as optionals -from frostfs_testlib.load.k6 import K6 -from frostfs_testlib.load.load_config import ( - EndpointSelectionStrategy, - K6ProcessAllocationStrategy, - LoadParams, - LoadScenario, - LoadType, -) -from frostfs_testlib.load.load_steps import init_s3_client, prepare_k6_instances -from frostfs_testlib.reporter import get_reporter -from frostfs_testlib.resources.load_params import ( - K6_TEARDOWN_PERIOD, - LOAD_NODE_SSH_PASSWORD, - LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE, - LOAD_NODE_SSH_PRIVATE_KEY_PATH, - LOAD_NODE_SSH_USER, - LOAD_NODES, -) -from frostfs_testlib.shell.interfaces import SshCredentials -from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.storage.cluster.frostfs_services import S3Gate, StorageNode -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo -from frostfs_testlib.testing.test_control import run_optionally - -reporter = get_reporter() - - -class BackgroundLoadController: - k6_instances: list[K6] - k6_dir: str - load_params: LoadParams - load_nodes: list[str] - verification_params: LoadParams - nodes_under_load: list[ClusterNode] - ssh_credentials: SshCredentials - loaders_wallet: WalletInfo - endpoints: list[str] - - def __init__( - self, - k6_dir: str, - load_params: LoadParams, - loaders_wallet: WalletInfo, - nodes_under_load: list[ClusterNode], - ) -> None: - self.k6_dir = k6_dir - self.load_params = load_params - self.nodes_under_load = nodes_under_load - self.load_nodes = LOAD_NODES - self.loaders_wallet = loaders_wallet - - if load_params.endpoint_selection_strategy is None: - raise RuntimeError("endpoint_selection_strategy should not be None") - - self.endpoints = self._get_endpoints( - load_params.load_type, load_params.endpoint_selection_strategy - ) - self.verification_params = LoadParams( - clients=load_params.readers, - scenario=LoadScenario.VERIFY, - registry_file=load_params.registry_file, - verify_time=load_params.verify_time, - load_type=load_params.load_type, - load_id=load_params.load_id, - working_dir=load_params.working_dir, - endpoint_selection_strategy=load_params.endpoint_selection_strategy, - k6_process_allocation_strategy=load_params.k6_process_allocation_strategy, - ) - self.ssh_credentials = SshCredentials( - LOAD_NODE_SSH_USER, - LOAD_NODE_SSH_PASSWORD, - LOAD_NODE_SSH_PRIVATE_KEY_PATH, - LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE, - ) - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, []) - def _get_endpoints( - self, load_type: LoadType, endpoint_selection_strategy: EndpointSelectionStrategy - ): - all_endpoints = { - LoadType.gRPC: { - EndpointSelectionStrategy.ALL: list( - set( - endpoint - for node_under_load in self.nodes_under_load - for endpoint in node_under_load.service(StorageNode).get_all_rpc_endpoint() - ) - ), - EndpointSelectionStrategy.FIRST: list( - set( - node_under_load.service(StorageNode).get_rpc_endpoint() - for node_under_load in self.nodes_under_load - ) - ), - }, - # for some reason xk6 appends http protocol on its own - LoadType.S3: { - EndpointSelectionStrategy.ALL: list( - set( - endpoint.replace("http://", "") - for node_under_load in self.nodes_under_load - for endpoint in node_under_load.service(S3Gate).get_all_endpoints() - ) - ), - EndpointSelectionStrategy.FIRST: list( - set( - node_under_load.service(S3Gate).get_endpoint().replace("http://", "") - for node_under_load in self.nodes_under_load - ) - ), - }, - } - - return all_endpoints[load_type][endpoint_selection_strategy] - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Prepare background load instances") - def prepare(self): - if self.load_params.load_type == LoadType.S3: - init_s3_client( - self.load_nodes, - self.load_params, - self.k6_dir, - self.ssh_credentials, - self.nodes_under_load, - self.loaders_wallet, - ) - - self._prepare(self.load_params) - - def _prepare(self, load_params: LoadParams): - self.k6_instances = prepare_k6_instances( - load_nodes=LOAD_NODES, - ssh_credentials=self.ssh_credentials, - k6_dir=self.k6_dir, - load_params=load_params, - endpoints=self.endpoints, - loaders_wallet=self.loaders_wallet, - ) - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Start background load") - def start(self): - if self.load_params.preset is None: - raise RuntimeError("Preset should not be none at the moment of start") - - with reporter.step( - f"Start background load on nodes {self.nodes_under_load}: " - f"writers = {self.load_params.writers}, " - f"obj_size = {self.load_params.object_size}, " - f"load_time = {self.load_params.load_time}, " - f"prepare_json = {self.load_params.preset.pregen_json}, " - f"endpoints = {self.endpoints}" - ): - for k6_load_instance in self.k6_instances: - k6_load_instance.start() - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Stop background load") - def stop(self): - for k6_load_instance in self.k6_instances: - k6_load_instance.stop() - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, True) - def is_running(self): - for k6_load_instance in self.k6_instances: - if not k6_load_instance.is_running: - return False - - return True - - def wait_until_finish(self): - if self.load_params.load_time is None: - raise RuntimeError("LoadTime should not be none") - - for k6_instance in self.k6_instances: - k6_instance.wait_until_finished(self.load_params.load_time + int(K6_TEARDOWN_PERIOD)) - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - def verify(self): - if self.verification_params.verify_time is None: - raise RuntimeError("verify_time should not be none") - - self._prepare(self.verification_params) - with reporter.step("Run verify background load data"): - for k6_verify_instance in self.k6_instances: - k6_verify_instance.start() - k6_verify_instance.wait_until_finished(self.verification_params.verify_time) - - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("K6 run results") - def get_results(self) -> dict: - results = {} - for k6_instance in self.k6_instances: - if k6_instance.load_params.k6_process_allocation_strategy is None: - raise RuntimeError("k6_process_allocation_strategy should not be none") - - result = k6_instance.get_results() - keys_map = { - K6ProcessAllocationStrategy.PER_LOAD_NODE: k6_instance.load_node, - K6ProcessAllocationStrategy.PER_ENDPOINT: k6_instance.endpoints[0], - } - key = keys_map[k6_instance.load_params.k6_process_allocation_strategy] - results[key] = result - - return results diff --git a/src/frostfs_testlib/controllers/cluster_state_controller.py b/src/frostfs_testlib/controllers/cluster_state_controller.py deleted file mode 100644 index 23d1a6c..0000000 --- a/src/frostfs_testlib/controllers/cluster_state_controller.py +++ /dev/null @@ -1,130 +0,0 @@ -import time - -import allure - -import frostfs_testlib.resources.optionals as optionals -from frostfs_testlib.reporter import get_reporter -from frostfs_testlib.shell import CommandOptions, Shell -from frostfs_testlib.steps import epoch -from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode -from frostfs_testlib.storage.controllers.disk_controller import DiskController -from frostfs_testlib.testing.test_control import run_optionally, wait_for_success -from frostfs_testlib.utils.failover_utils import ( - wait_all_storage_nodes_returned, - wait_for_host_offline, - wait_for_host_online, - wait_for_node_online, -) - -reporter = get_reporter() - - -class ClusterStateController: - def __init__(self, shell: Shell, cluster: Cluster) -> None: - self.stopped_nodes: list[ClusterNode] = [] - self.detached_disks: dict[str, DiskController] = {} - self.stopped_storage_nodes: list[StorageNode] = [] - self.cluster = cluster - self.shell = shell - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop host of node {node}") - def stop_node_host(self, node: ClusterNode, mode: str): - with allure.step(f"Stop host {node.host.config.address}"): - node.host.stop_host(mode=mode) - wait_for_host_offline(self.shell, node.storage_node) - self.stopped_nodes.append(node) - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start host of node {node}") - def start_node_host(self, node: ClusterNode): - with allure.step(f"Start host {node.host.config.address}"): - node.host.start_host() - wait_for_host_online(self.shell, node.storage_node) - wait_for_node_online(node.storage_node) - self.stopped_nodes.remove(node) - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start stopped hosts") - def start_stopped_hosts(self): - for node in self.stopped_nodes: - node.host.start_host() - self.stopped_nodes = [] - wait_all_storage_nodes_returned(self.shell, self.cluster) - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Detach disk {device} at {mountpoint} on node {node}") - def detach_disk(self, node: StorageNode, device: str, mountpoint: str): - disk_controller = self._get_disk_controller(node, device, mountpoint) - self.detached_disks[disk_controller.id] = disk_controller - disk_controller.detach() - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Attach disk {device} at {mountpoint} on node {node}") - def attach_disk(self, node: StorageNode, device: str, mountpoint: str): - disk_controller = self._get_disk_controller(node, device, mountpoint) - disk_controller.attach() - self.detached_disks.pop(disk_controller.id, None) - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Restore detached disks") - def restore_disks(self): - for disk_controller in self.detached_disks.values(): - disk_controller.attach() - self.detached_disks = {} - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop storage service on {node}") - def stop_storage_service(self, node: ClusterNode): - node.storage_node.stop_service() - self.stopped_storage_nodes.append(node.storage_node) - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start storage service on {node}") - def start_storage_service(self, node: ClusterNode): - node.storage_node.start_service() - self.stopped_storage_nodes.remove(node.storage_node) - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start stopped storage services") - def start_stopped_storage_services(self): - for node in self.stopped_storage_nodes: - node.start_service() - self.stopped_storage_nodes = [] - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Hard reboot host {node} via magic SysRq option") - def panic_reboot_host(self, node: ClusterNode): - shell = node.host.get_shell() - shell.exec('sudo sh -c "echo 1 > /proc/sys/kernel/sysrq"') - - options = CommandOptions(close_stdin=True, timeout=1, check=False) - shell.exec('sudo sh -c "echo b > /proc/sysrq-trigger"', options) - - # Let the things to be settled - # A little wait here to prevent ssh stuck during panic - time.sleep(10) - wait_for_host_online(self.shell, node.storage_node) - wait_for_node_online(node.storage_node) - - @reporter.step_deco("Wait up to {timeout} seconds for nodes on cluster to align epochs") - def wait_for_epochs_align(self, timeout=60): - @wait_for_success(timeout, 5, None, True) - def check_epochs(): - epochs_by_node = epoch.get_epochs_from_nodes(self.shell, self.cluster) - assert ( - len(set(epochs_by_node.values())) == 1 - ), f"unaligned epochs found: {epochs_by_node}" - - check_epochs() - - def _get_disk_controller( - self, node: StorageNode, device: str, mountpoint: str - ) -> DiskController: - disk_controller_id = DiskController.get_id(node, device) - if disk_controller_id in self.detached_disks.keys(): - disk_controller = self.detached_disks[disk_controller_id] - else: - disk_controller = DiskController(node, device, mountpoint) - - return disk_controller diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index 24ca447..9a8b1d9 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -13,9 +13,10 @@ from frostfs_testlib.load.load_config import ( ) from frostfs_testlib.processes.remote_process import RemoteProcess from frostfs_testlib.reporter import get_reporter -from frostfs_testlib.resources.load_params import LOAD_NODE_SSH_USER +from frostfs_testlib.resources.load_params import K6_STOP_SIGNAL_TIMEOUT, LOAD_NODE_SSH_USER from frostfs_testlib.shell import Shell from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +from frostfs_testlib.testing.test_control import wait_for_success EXIT_RESULT_CODE = 0 @@ -34,8 +35,6 @@ class LoadResults: class K6: _k6_process: RemoteProcess - _k6_stop_attempts: int = 5 - _k6_stop_check_interval: int = 15 def __init__( self, @@ -178,7 +177,7 @@ class K6: if timeout > 0: sleep(wait_interval) timeout -= wait_interval - self._stop() + self.stop() raise TimeoutError(f"Expected K6 finished in {timeout} sec.") def get_results(self) -> Any: @@ -200,21 +199,12 @@ class K6: reporter.attach(summary_text, allure_filename) return summary_json - @reporter.step_deco("Assert K6 should be finished") - def _k6_should_be_finished(self) -> None: - k6_rc = self._k6_process.rc() - assert k6_rc == 0, f"K6 unexpectedly finished with RC {k6_rc}" - - @reporter.step_deco("Terminate K6 on initiator") + @reporter.step_deco("Stop K6") def stop(self) -> None: - if not self.is_running: - self.get_results() - raise AssertionError("K6 unexpectedly finished") + if self.is_running: + self._k6_process.stop() - self._stop() - - k6_rc = self._k6_process.rc() - assert k6_rc == EXIT_RESULT_CODE, f"Return code of K6 job should be 0, but {k6_rc}" + self._wait_until_process_end() @property def is_running(self) -> bool: @@ -222,20 +212,12 @@ class K6: return self._k6_process.running() return False - @reporter.step_deco("Try to stop K6 with SIGTERM") - def _stop(self) -> None: - self._k6_process.stop() - with reporter.step("Wait until process end"): - for _ in range(self._k6_stop_attempts): - if not self._k6_process.running(): - break - - sleep(self._k6_stop_check_interval) - else: - raise AssertionError("Can not stop K6 process within timeout") - - def _kill(self) -> None: - self._k6_process.kill() + @reporter.step_deco("Wait until process end") + @wait_for_success( + K6_STOP_SIGNAL_TIMEOUT, 15, False, False, "Can not stop K6 process within timeout" + ) + def _wait_until_process_end(self): + return self._k6_process.running() def __log_output(self) -> None: reporter.attach(self._k6_process.stdout(full=True), "K6 stdout") diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index fd2fdef..4e67321 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -143,6 +143,9 @@ class LoadParams: min_iteration_duration: Optional[str] = metadata_field( all_load_scenarios, None, "K6_MIN_ITERATION_DURATION" ) + # Specifies K6 setupTimeout time. Currently hardcoded in xk6 as 5 seconds for all scenarios + # https://k6.io/docs/using-k6/k6-options/reference/#setup-timeout + setup_timeout: Optional[str] = metadata_field(all_scenarios, None, "K6_SETUP_TIMEOUT") # ------- CONSTANT VUS SCENARIO PARAMS ------- # Amount of Writers VU. @@ -202,7 +205,7 @@ class LoadParams: # Maximum verification time for k6 to verify objects. Default is BACKGROUND_LOAD_MAX_VERIFY_TIME (3600). verify_time: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "TIME_LIMIT") # Amount of Verification VU. - clients: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "CLIENTS") + verify_clients: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "CLIENTS", True) def set_id(self, load_id): self.load_id = load_id diff --git a/src/frostfs_testlib/resources/load_params.py b/src/frostfs_testlib/resources/load_params.py index 64f1aa4..6699207 100644 --- a/src/frostfs_testlib/resources/load_params.py +++ b/src/frostfs_testlib/resources/load_params.py @@ -10,8 +10,10 @@ LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE = os.getenv("LOAD_NODE_SSH_PRIVATE_KEY_PASS BACKGROUND_WRITERS_COUNT = os.getenv("BACKGROUND_WRITERS_COUNT", 0) BACKGROUND_READERS_COUNT = os.getenv("BACKGROUND_READERS_COUNT", 0) BACKGROUND_DELETERS_COUNT = os.getenv("BACKGROUND_DELETERS_COUNT", 0) +BACKGROUND_VERIFIERS_COUNT = os.getenv("BACKGROUND_VERIFIERS_COUNT", 0) BACKGROUND_LOAD_DEFAULT_TIME = os.getenv("BACKGROUND_LOAD_DEFAULT_TIME", 600) BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE = os.getenv("BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE", 32) +BACKGROUND_LOAD_SETUP_TIMEOUT = os.getenv("BACKGROUND_LOAD_SETUP_TIMEOUT", "5s") # This will decrease load params for some weak environments BACKGROUND_LOAD_VUS_COUNT_DIVISOR = os.getenv("BACKGROUND_LOAD_VUS_COUNT_DIVISOR", 1) @@ -27,4 +29,5 @@ PRESET_CONTAINERS_COUNT = os.getenv("CONTAINERS_COUNT", "40") PRESET_OBJECTS_COUNT = os.getenv("OBJ_COUNT", "10") K6_DIRECTORY = os.getenv("K6_DIRECTORY", "/etc/k6") K6_TEARDOWN_PERIOD = os.getenv("K6_TEARDOWN_PERIOD", "30") +K6_STOP_SIGNAL_TIMEOUT = int(os.getenv("K6_STOP_SIGNAL_TIMEOUT", 300)) LOAD_CONFIG_YAML_PATH = os.getenv("LOAD_CONFIG_YAML_PATH", "load_config_yaml_file.yml") diff --git a/src/frostfs_testlib/storage/controllers/__init__.py b/src/frostfs_testlib/storage/controllers/__init__.py new file mode 100644 index 0000000..65268f4 --- /dev/null +++ b/src/frostfs_testlib/storage/controllers/__init__.py @@ -0,0 +1,4 @@ +from frostfs_testlib.storage.controllers.background_load_controller import BackgroundLoadController +from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController +from frostfs_testlib.storage.controllers.disk_controller import DiskController, DiskInfo +from frostfs_testlib.storage.controllers.shards_watcher import ShardsWatcher diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index fedda19..f9cf0e5 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -1,3 +1,5 @@ +import time + import frostfs_testlib.resources.optionals as optionals from frostfs_testlib.load.k6 import K6 from frostfs_testlib.load.load_config import ( @@ -22,6 +24,7 @@ from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.test_control import run_optionally +from frostfs_testlib.utils import datetime_utils reporter = get_reporter() @@ -57,7 +60,7 @@ class BackgroundLoadController: load_params.load_type, load_params.endpoint_selection_strategy ) self.verification_params = LoadParams( - clients=load_params.readers, + verify_clients=load_params.verify_clients, scenario=LoadScenario.VERIFY, registry_file=load_params.registry_file, verify_time=load_params.verify_time, @@ -156,6 +159,12 @@ class BackgroundLoadController: for k6_load_instance in self.k6_instances: k6_load_instance.start() + wait_after_start_time = datetime_utils.parse_time(self.load_params.setup_timeout) + 5 + with reporter.step( + f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on" + ): + time.sleep(wait_after_start_time) + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @reporter.step_deco("Stop background load") def stop(self): @@ -170,6 +179,7 @@ class BackgroundLoadController: return True + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) def wait_until_finish(self): if self.load_params.load_time is None: raise RuntimeError("LoadTime should not be none") diff --git a/src/frostfs_testlib/testing/test_control.py b/src/frostfs_testlib/testing/test_control.py index 5621a61..ed74f6a 100644 --- a/src/frostfs_testlib/testing/test_control.py +++ b/src/frostfs_testlib/testing/test_control.py @@ -123,6 +123,7 @@ def wait_for_success( interval: int = 1, expected_result: Any = None, fail_testcase: bool = False, + fail_message: str = "", ): """ Decorator to wait for some conditions/functions to pass successfully. @@ -141,7 +142,7 @@ def wait_for_success( try: actual_result = func(*a, **kw) if expected_result is not None: - assert expected_result == actual_result + assert expected_result == actual_result, fail_message return actual_result except Exception as ex: logger.debug(ex) diff --git a/src/frostfs_testlib/utils/datetime_utils.py b/src/frostfs_testlib/utils/datetime_utils.py index a357d8a..830178f 100644 --- a/src/frostfs_testlib/utils/datetime_utils.py +++ b/src/frostfs_testlib/utils/datetime_utils.py @@ -10,6 +10,9 @@ def parse_time(value: str) -> int: Returns: Number of seconds in the parsed time interval. """ + if value is None: + return 0 + value = value.lower() for suffix in ["s", "sec"]: From cc35b2e6da4aee957a0e1d8540f4a0731e1a2d07 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Thu, 25 May 2023 23:09:07 +0300 Subject: [PATCH 087/363] Changes required to run multiple loads during one test Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/k6.py | 11 +- src/frostfs_testlib/load/load_report.py | 212 +++++++++--------- src/frostfs_testlib/load/load_steps.py | 17 +- src/frostfs_testlib/load/load_verifiers.py | 2 +- .../controllers/background_load_controller.py | 85 +++++-- .../controllers/cluster_state_controller.py | 28 ++- 6 files changed, 217 insertions(+), 138 deletions(-) diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index 9a8b1d9..2fa2c00 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -153,10 +153,6 @@ class K6: @reporter.step_deco("Start K6 on initiator") def start(self) -> None: - # Make working_dir directory - self.shell.exec(f"sudo mkdir -p {self.load_params.working_dir}") - self.shell.exec(f"sudo chown {LOAD_NODE_SSH_USER} {self.load_params.working_dir}") - command = ( f"{self._k6_dir}/k6 run {self._generate_env_variables()} " f"{self._k6_dir}/scenarios/{self.scenario.value}.js" @@ -170,13 +166,12 @@ class K6: assert "No k6 instances were executed" if k6_should_be_running: assert self._k6_process.running(), "k6 should be running." - while timeout >= 0: + while timeout > 0: if not self._k6_process.running(): return logger.info(f"K6 is running. Waiting {wait_interval} seconds...") - if timeout > 0: - sleep(wait_interval) - timeout -= wait_interval + sleep(wait_interval) + timeout -= wait_interval self.stop() raise TimeoutError(f"Expected K6 finished in {timeout} sec.") diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py index 2771df5..c9c23c7 100644 --- a/src/frostfs_testlib/load/load_report.py +++ b/src/frostfs_testlib/load/load_report.py @@ -10,7 +10,8 @@ from frostfs_testlib.load.load_metrics import get_metrics_object class LoadReport: def __init__(self, load_test) -> None: self.load_test = load_test - self.load_summaries: Optional[dict] = None + # List of load summaries dict + self.load_summaries_list: Optional[list[dict]] = [] self.load_params: Optional[LoadParams] = None self.start_time: Optional[datetime] = None self.end_time: Optional[datetime] = None @@ -21,8 +22,8 @@ class LoadReport: def set_end_time(self): self.end_time = datetime.utcnow() - def set_summaries(self, load_summaries: dict): - self.load_summaries = load_summaries + def add_summaries(self, load_summaries: dict): + self.load_summaries_list.append(load_summaries) def set_load_params(self, load_params: LoadParams): self.load_params = load_params @@ -30,7 +31,7 @@ class LoadReport: def get_report_html(self): report_sections = [ [self.load_test, self._get_load_params_section_html], - [self.load_summaries, self._get_totals_section_html], + [self.load_summaries_list, self._get_totals_section_html], [self.end_time, self._get_test_time_html], ] @@ -156,110 +157,113 @@ class LoadReport: return html def _get_totals_section_html(self): + html = "" + for i, load_summaries in enumerate(self.load_summaries_list, 1): + html += f"

Load Results for load #{i}

" - html = "

Load Results

" - - write_operations = 0 - write_op_sec = 0 - write_throughput = 0 - write_errors = {} - requested_write_rate = self.load_params.write_rate - requested_write_rate_str = f"{requested_write_rate}op/sec" if requested_write_rate else "" - - read_operations = 0 - read_op_sec = 0 - read_throughput = 0 - read_errors = {} - requested_read_rate = self.load_params.read_rate - requested_read_rate_str = f"{requested_read_rate}op/sec" if requested_read_rate else "" - - delete_operations = 0 - delete_op_sec = 0 - delete_errors = {} - requested_delete_rate = self.load_params.delete_rate - requested_delete_rate_str = ( - f"{requested_delete_rate}op/sec" if requested_delete_rate else "" - ) - - if self.load_params.scenario in [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]: - delete_vus = max( - self.load_params.preallocated_deleters or 0, self.load_params.max_deleters or 0 - ) - write_vus = max( - self.load_params.preallocated_writers or 0, self.load_params.max_writers or 0 - ) - read_vus = max( - self.load_params.preallocated_readers or 0, self.load_params.max_readers or 0 - ) - else: - write_vus = self.load_params.writers - read_vus = self.load_params.readers - delete_vus = self.load_params.deleters - - write_vus_str = f"{write_vus}th" - read_vus_str = f"{read_vus}th" - delete_vus_str = f"{delete_vus}th" - - write_section_required = False - read_section_required = False - delete_section_required = False - - for node_key, load_summary in self.load_summaries.items(): - metrics = get_metrics_object(self.load_params.scenario, load_summary) - write_operations += metrics.write_total_iterations - if write_operations: - write_section_required = True - write_op_sec += metrics.write_rate - write_throughput += metrics.write_throughput - if metrics.write_failed_iterations: - write_errors[node_key] = metrics.write_failed_iterations - - read_operations += metrics.read_total_iterations - if read_operations: - read_section_required = True - read_op_sec += metrics.read_rate - read_throughput += metrics.read_throughput - if metrics.read_failed_iterations: - read_errors[node_key] = metrics.read_failed_iterations - - delete_operations += metrics.delete_total_iterations - if delete_operations: - delete_section_required = True - delete_op_sec += metrics.delete_rate - if metrics.delete_failed_iterations: - delete_errors[node_key] = metrics.delete_failed_iterations - - if write_section_required: - html += self._get_oprations_sub_section_html( - "Write", - write_operations, - requested_write_rate_str, - write_vus_str, - write_op_sec, - write_throughput, - write_errors, + write_operations = 0 + write_op_sec = 0 + write_throughput = 0 + write_errors = {} + requested_write_rate = self.load_params.write_rate + requested_write_rate_str = ( + f"{requested_write_rate}op/sec" if requested_write_rate else "" ) - if read_section_required: - html += self._get_oprations_sub_section_html( - "Read", - read_operations, - requested_read_rate_str, - read_vus_str, - read_op_sec, - read_throughput, - read_errors, + read_operations = 0 + read_op_sec = 0 + read_throughput = 0 + read_errors = {} + requested_read_rate = self.load_params.read_rate + requested_read_rate_str = f"{requested_read_rate}op/sec" if requested_read_rate else "" + + delete_operations = 0 + delete_op_sec = 0 + delete_errors = {} + requested_delete_rate = self.load_params.delete_rate + requested_delete_rate_str = ( + f"{requested_delete_rate}op/sec" if requested_delete_rate else "" ) - if delete_section_required: - html += self._get_oprations_sub_section_html( - "Delete", - delete_operations, - requested_delete_rate_str, - delete_vus_str, - delete_op_sec, - 0, - delete_errors, - ) + if self.load_params.scenario in [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]: + delete_vus = max( + self.load_params.preallocated_deleters or 0, self.load_params.max_deleters or 0 + ) + write_vus = max( + self.load_params.preallocated_writers or 0, self.load_params.max_writers or 0 + ) + read_vus = max( + self.load_params.preallocated_readers or 0, self.load_params.max_readers or 0 + ) + else: + write_vus = self.load_params.writers + read_vus = self.load_params.readers + delete_vus = self.load_params.deleters + + write_vus_str = f"{write_vus}th" + read_vus_str = f"{read_vus}th" + delete_vus_str = f"{delete_vus}th" + + write_section_required = False + read_section_required = False + delete_section_required = False + + for node_key, load_summary in load_summaries.items(): + metrics = get_metrics_object(self.load_params.scenario, load_summary) + write_operations += metrics.write_total_iterations + if write_operations: + write_section_required = True + write_op_sec += metrics.write_rate + write_throughput += metrics.write_throughput + if metrics.write_failed_iterations: + write_errors[node_key] = metrics.write_failed_iterations + + read_operations += metrics.read_total_iterations + if read_operations: + read_section_required = True + read_op_sec += metrics.read_rate + read_throughput += metrics.read_throughput + if metrics.read_failed_iterations: + read_errors[node_key] = metrics.read_failed_iterations + + delete_operations += metrics.delete_total_iterations + if delete_operations: + delete_section_required = True + delete_op_sec += metrics.delete_rate + if metrics.delete_failed_iterations: + delete_errors[node_key] = metrics.delete_failed_iterations + + if write_section_required: + html += self._get_oprations_sub_section_html( + "Write", + write_operations, + requested_write_rate_str, + write_vus_str, + write_op_sec, + write_throughput, + write_errors, + ) + + if read_section_required: + html += self._get_oprations_sub_section_html( + "Read", + read_operations, + requested_read_rate_str, + read_vus_str, + read_op_sec, + read_throughput, + read_errors, + ) + + if delete_section_required: + html += self._get_oprations_sub_section_html( + "Delete", + delete_operations, + requested_delete_rate_str, + delete_vus_str, + delete_op_sec, + 0, + delete_errors, + ) return html diff --git a/src/frostfs_testlib/load/load_steps.py b/src/frostfs_testlib/load/load_steps.py index 5d935aa..b55ff22 100644 --- a/src/frostfs_testlib/load/load_steps.py +++ b/src/frostfs_testlib/load/load_steps.py @@ -9,7 +9,10 @@ from frostfs_testlib.load.k6 import K6 from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC -from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_VUS_COUNT_DIVISOR +from frostfs_testlib.resources.load_params import ( + BACKGROUND_LOAD_VUS_COUNT_DIVISOR, + LOAD_NODE_SSH_USER, +) from frostfs_testlib.shell import CommandOptions, SSHShell from frostfs_testlib.shell.interfaces import InteractiveInput, SshCredentials from frostfs_testlib.storage.cluster import ClusterNode @@ -35,7 +38,7 @@ def init_s3_client( grpc_peer = storage_node.get_rpc_endpoint() for load_node in load_nodes: - ssh_client = _get_ssh_client(ssh_credentials, load_node) + ssh_client = _get_shell(ssh_credentials, load_node) frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(ssh_client, FROSTFS_AUTHMATE_EXEC) issue_secret_output = frostfs_authmate_exec.secret.issue( wallet=wallet.path, @@ -99,12 +102,16 @@ def prepare_k6_instances( for distributed_load_params in distributed_load_params_list: load_node = next(nodes) - ssh_client = _get_ssh_client(ssh_credentials, load_node) + shell = _get_shell(ssh_credentials, load_node) + # Make working_dir directory + shell.exec(f"sudo mkdir -p {distributed_load_params.working_dir}") + shell.exec(f"sudo chown {LOAD_NODE_SSH_USER} {distributed_load_params.working_dir}") + k6_load_object = K6( distributed_load_params, next(endpoints_gen), k6_dir, - ssh_client, + shell, load_node, loaders_wallet, ) @@ -115,7 +122,7 @@ def prepare_k6_instances( return k6_load_objects -def _get_ssh_client(ssh_credentials: SshCredentials, load_node: str): +def _get_shell(ssh_credentials: SshCredentials, load_node: str) -> SSHShell: ssh_client = SSHShell( host=load_node, login=ssh_credentials.ssh_login, diff --git a/src/frostfs_testlib/load/load_verifiers.py b/src/frostfs_testlib/load/load_verifiers.py index 69e9f1f..1ff63ae 100644 --- a/src/frostfs_testlib/load/load_verifiers.py +++ b/src/frostfs_testlib/load/load_verifiers.py @@ -57,7 +57,7 @@ class LoadVerifier: # Due to interruptions we may see total verified objects to be less than written on writers count if abs(objects_count - verified_objects) > writers: exceptions.append( - f"Verified objects is less than total objects. Total: {objects_count}, Verified: {verified_objects}. Writers: {writers}." + f"Verified objects mismatch. Total: {objects_count}, Verified: {verified_objects}. Writers: {writers}." ) assert not exceptions, "\n".join(exceptions) diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index f9cf0e5..a2336be 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -1,3 +1,4 @@ +import copy import time import frostfs_testlib.resources.optionals as optionals @@ -9,7 +10,9 @@ from frostfs_testlib.load.load_config import ( LoadScenario, LoadType, ) +from frostfs_testlib.load.load_report import LoadReport from frostfs_testlib.load.load_steps import init_s3_client, prepare_k6_instances +from frostfs_testlib.load.load_verifiers import LoadVerifier from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.load_params import ( K6_TEARDOWN_PERIOD, @@ -33,11 +36,14 @@ class BackgroundLoadController: k6_instances: list[K6] k6_dir: str load_params: LoadParams + original_load_params: LoadParams load_nodes: list[str] verification_params: LoadParams nodes_under_load: list[ClusterNode] + load_counter: int ssh_credentials: SshCredentials loaders_wallet: WalletInfo + load_summaries: dict endpoints: list[str] def __init__( @@ -48,8 +54,10 @@ class BackgroundLoadController: nodes_under_load: list[ClusterNode], ) -> None: self.k6_dir = k6_dir - self.load_params = load_params + self.original_load_params = load_params + self.load_params = copy.deepcopy(self.original_load_params) self.nodes_under_load = nodes_under_load + self.load_counter = 1 self.load_nodes = LOAD_NODES self.loaders_wallet = loaders_wallet @@ -59,17 +67,7 @@ class BackgroundLoadController: self.endpoints = self._get_endpoints( load_params.load_type, load_params.endpoint_selection_strategy ) - self.verification_params = LoadParams( - verify_clients=load_params.verify_clients, - scenario=LoadScenario.VERIFY, - registry_file=load_params.registry_file, - verify_time=load_params.verify_time, - load_type=load_params.load_type, - load_id=load_params.load_id, - working_dir=load_params.working_dir, - endpoint_selection_strategy=load_params.endpoint_selection_strategy, - k6_process_allocation_strategy=load_params.k6_process_allocation_strategy, - ) + self.ssh_credentials = SshCredentials( LOAD_NODE_SSH_USER, LOAD_NODE_SSH_PASSWORD, @@ -179,6 +177,66 @@ class BackgroundLoadController: return True + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step_deco("Reset background load") + def _reset_for_consequent_load(self): + """This method is required if we want to run multiple loads during test run. + Raise load counter by 1 and append it to load_id + """ + self.load_counter += 1 + self.load_params = copy.deepcopy(self.original_load_params) + self.load_params.set_id(f"{self.load_params.load_id}_{self.load_counter}") + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step_deco("Startup background load") + def startup(self): + self.prepare() + self.start() + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step_deco("Stop and get results of background load") + def teardown(self, load_report: LoadReport = None): + if not self.k6_instances: + return + + self.stop() + self.load_summaries = self.get_results() + self.k6_instances = [] + if load_report: + load_report.add_summaries(self.load_summaries) + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step_deco("Verify results of background load") + def verify(self): + try: + if self.load_params.verify: + self.verification_params = LoadParams( + verify_clients=self.load_params.verify_clients, + scenario=LoadScenario.VERIFY, + registry_file=self.load_params.registry_file, + verify_time=self.load_params.verify_time, + load_type=self.load_params.load_type, + load_id=self.load_params.load_id, + working_dir=self.load_params.working_dir, + endpoint_selection_strategy=self.load_params.endpoint_selection_strategy, + k6_process_allocation_strategy=self.load_params.k6_process_allocation_strategy, + ) + self._run_verify_scenario() + verification_summaries = self.get_results() + self.verify_summaries(self.load_summaries, verification_summaries) + finally: + self._reset_for_consequent_load() + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step_deco("Verify summaries from k6") + def verify_summaries(self, load_summaries: dict, verification_summaries: dict): + verifier = LoadVerifier(self.load_params) + for node_or_endpoint in load_summaries: + with reporter.step(f"Verify load summaries for {node_or_endpoint}"): + verifier.verify_summaries( + load_summaries[node_or_endpoint], verification_summaries[node_or_endpoint] + ) + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) def wait_until_finish(self): if self.load_params.load_time is None: @@ -188,7 +246,8 @@ class BackgroundLoadController: k6_instance.wait_until_finished(self.load_params.load_time + int(K6_TEARDOWN_PERIOD)) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - def verify(self): + @reporter.step_deco("Run verify scenario for background load") + def _run_verify_scenario(self): if self.verification_params.verify_time is None: raise RuntimeError("verify_time should not be none") diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 35072f2..34f027f 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -1,7 +1,5 @@ import time -import allure - import frostfs_testlib.resources.optionals as optionals from frostfs_testlib.reporter import get_reporter from frostfs_testlib.shell import CommandOptions, Shell @@ -30,15 +28,29 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop host of node {node}") def stop_node_host(self, node: ClusterNode, mode: str): - with allure.step(f"Stop host {node.host.config.address}"): + with reporter.step(f"Stop host {node.host.config.address}"): node.host.stop_host(mode=mode) wait_for_host_offline(self.shell, node.storage_node) self.stopped_nodes.append(node) + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Shutdown whole cluster") + def shutdown_cluster(self, mode: str, reversed_order: bool = False): + nodes = ( + reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes + ) + for node in nodes: + with reporter.step(f"Stop host {node.host.config.address}"): + self.stopped_nodes.append(node) + node.host.stop_host(mode=mode) + + for node in nodes: + wait_for_host_offline(self.shell, node.storage_node) + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start host of node {node}") def start_node_host(self, node: ClusterNode): - with allure.step(f"Start host {node.host.config.address}"): + with reporter.step(f"Start host {node.host.config.address}"): node.host.start_host() wait_for_host_online(self.shell, node.storage_node) wait_for_node_online(node.storage_node) @@ -46,9 +58,11 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start stopped hosts") - def start_stopped_hosts(self): - for node in self.stopped_nodes: - node.host.start_host() + def start_stopped_hosts(self, reversed_order: bool = False): + nodes = reversed(self.stopped_nodes) if reversed_order else self.stopped_nodes + for node in nodes: + with reporter.step(f"Start host {node.host.config.address}"): + node.host.start_host() self.stopped_nodes = [] wait_all_storage_nodes_returned(self.shell, self.cluster) From a26f8e9c8008f306e874eb74dad1134c49b7949d Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Fri, 26 May 2023 10:05:12 +0300 Subject: [PATCH 088/363] Added wait_for_node_to_be_ready to starting stopped storage nodes --- .../storage/controllers/cluster_state_controller.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 34f027f..7782e64 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -5,6 +5,7 @@ from frostfs_testlib.reporter import get_reporter from frostfs_testlib.shell import CommandOptions, Shell from frostfs_testlib.steps import epoch from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode +from frostfs_testlib.steps.node_management import wait_for_node_to_be_ready from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.testing.test_control import run_optionally, wait_for_success from frostfs_testlib.utils.failover_utils import ( @@ -104,6 +105,7 @@ class ClusterStateController: def start_stopped_storage_services(self): for node in self.stopped_storage_nodes: node.start_service() + wait_all_storage_nodes_returned(self.shell, self.cluster) self.stopped_storage_nodes = [] @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) From 2bad0f1db644c6e6941806e288af05bf0ad95056 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 30 May 2023 16:32:38 +0300 Subject: [PATCH 089/363] Add metabase and write_cache operations Signed-off-by: Andrey Berezin --- pyproject.toml | 2 +- src/frostfs_testlib/hosting/docker_host.py | 8 ++- src/frostfs_testlib/hosting/interfaces.py | 20 ++++++++ src/frostfs_testlib/load/load_report.py | 2 +- src/frostfs_testlib/storage/cluster.py | 15 ++++++ .../controllers/cluster_state_controller.py | 34 ++++++++++--- .../storage/dataclasses/frostfs_services.py | 6 +++ .../storage/dataclasses/node_base.py | 23 ++++++++- src/frostfs_testlib/utils/failover_utils.py | 6 +-- src/frostfs_testlib/utils/file_keeper.py | 50 +++++++++++++++++++ 10 files changed, 152 insertions(+), 14 deletions(-) create mode 100644 src/frostfs_testlib/utils/file_keeper.py diff --git a/pyproject.toml b/pyproject.toml index 69f8e29..9140ee0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,7 +16,7 @@ classifiers = [ ] keywords = ["frostfs", "test"] dependencies = [ - "allure-python-commons>=2.9.45", + "allure-python-commons>=2.13.2", "docker>=4.4.0", "importlib_metadata>=5.0; python_version < '3.10'", "neo-mamba==1.0.0", diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index ccf1b64..b7f4852 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -129,12 +129,18 @@ class DockerHost(Host): timeout=service_attributes.start_timeout, ) + def delete_metabase(self, service_name: str) -> None: + raise NotImplementedError("Not implemented for docker") + + def delete_write_cache(self, service_name: str) -> None: + raise NotImplementedError("Not implemented for docker") + def delete_fstree(self, service_name: str) -> None: raise NotImplementedError("Not implemented for docker") def delete_blobovnicza(self, service_name: str) -> None: raise NotImplementedError("Not implemented for docker") - + def delete_pilorama(self, service_name: str) -> None: raise NotImplementedError("Not implemented for docker") diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 8a617e9..9178523 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -131,6 +131,26 @@ class Host(ABC): """ + @abstractmethod + def delete_metabase(self, service_name: str) -> None: + """ + Deletes all metabase*.db in the node. + + Args: + service_name: Name of storage node service. + + """ + + @abstractmethod + def delete_write_cache(self, service_name: str) -> None: + """ + Deletes all write_cache in the node. + + Args: + service_name: Name of storage node service. + + """ + @abstractmethod def delete_blobovnicza(self, service_name: str) -> None: """ diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py index c9c23c7..5f22515 100644 --- a/src/frostfs_testlib/load/load_report.py +++ b/src/frostfs_testlib/load/load_report.py @@ -61,7 +61,7 @@ class LoadReport: return html def _calc_unit(self, value: float, skip_units: int = 0) -> Tuple[float, str]: - units = ["B", "KB", "MB", "GB", "TB"] + units = ["B", "KiB", "MiB", "GiB", "TiB"] for unit in units[skip_units:]: if value < 1024: diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index ffca778..2158dc2 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -73,6 +73,21 @@ class ClusterNode: def s3_gate(self) -> S3Gate: return self.service(S3Gate) + def get_config(self, config_file_path: str) -> dict: + shell = self.host.get_shell() + + result = shell.exec(f"cat {config_file_path}") + config_text = result.stdout + + config = yaml.safe_load(config_text) + return config + + def save_config(self, new_config: dict, config_file_path: str) -> None: + shell = self.host.get_shell() + + config_str = yaml.dump(new_config) + shell.exec(f"echo '{config_str}' | sudo tee {config_file_path}") + def service(self, service_type: type[ServiceClass]) -> ServiceClass: """ Get a service cluster node of specified type. diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 7782e64..705caf0 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -1,11 +1,11 @@ import time +from concurrent.futures import ThreadPoolExecutor import frostfs_testlib.resources.optionals as optionals from frostfs_testlib.reporter import get_reporter from frostfs_testlib.shell import CommandOptions, Shell from frostfs_testlib.steps import epoch from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode -from frostfs_testlib.steps.node_management import wait_for_node_to_be_ready from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.testing.test_control import run_optionally, wait_for_success from frostfs_testlib.utils.failover_utils import ( @@ -22,7 +22,7 @@ class ClusterStateController: def __init__(self, shell: Shell, cluster: Cluster) -> None: self.stopped_nodes: list[ClusterNode] = [] self.detached_disks: dict[str, DiskController] = {} - self.stopped_storage_nodes: list[StorageNode] = [] + self.stopped_storage_nodes: list[ClusterNode] = [] self.cluster = cluster self.shell = shell @@ -48,6 +48,16 @@ class ClusterStateController: for node in nodes: wait_for_host_offline(self.shell, node.storage_node) + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Stop all storage services on cluster") + def stop_all_storage_services(self, reversed_order: bool = False): + nodes = ( + reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes + ) + + for node in nodes: + self.stop_storage_service(node) + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start host of node {node}") def start_node_host(self, node: ClusterNode): @@ -92,19 +102,31 @@ class ClusterStateController: @reporter.step_deco("Stop storage service on {node}") def stop_storage_service(self, node: ClusterNode): node.storage_node.stop_service() - self.stopped_storage_nodes.append(node.storage_node) + self.stopped_storage_nodes.append(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start storage service on {node}") def start_storage_service(self, node: ClusterNode): node.storage_node.start_service() - self.stopped_storage_nodes.remove(node.storage_node) + self.stopped_storage_nodes.remove(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start stopped storage services") def start_stopped_storage_services(self): - for node in self.stopped_storage_nodes: - node.start_service() + if self.stopped_storage_nodes: + # In case if we stopped couple services, for example (s01-s04): + # After starting only s01, it may require connections to s02-s04, which is still down, and fail to start. + # Also, if something goes wrong here, we might skip s02-s04 start at all, and cluster will be left in a bad state. + # So in order to make sure that services are at least attempted to be started, using threads here. + with ThreadPoolExecutor(max_workers=len(self.stopped_storage_nodes)) as executor: + start_result = executor.map(self.start_storage_service, self.stopped_storage_nodes) + + # Looks tricky, but if exception is raised in any thread, it will be "eaten" by ThreadPoolExecutor, + # But will be thrown here. + # Not ideal solution, but okay for now + for _ in start_result: + pass + wait_all_storage_nodes_returned(self.shell, self.cluster) self.stopped_storage_nodes = [] diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index e87d264..7bb4c2b 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -177,6 +177,12 @@ class StorageNode(NodeBase): def delete_pilorama(self): self.host.delete_pilorama(self.name) + def delete_metabase(self): + self.host.delete_metabase(self.name) + + def delete_write_cache(self): + self.host.delete_write_cache(self.name) + @property def label(self) -> str: return f"{self.name}: {self.get_rpc_endpoint()}" diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 0d96775..8fcb03b 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -1,6 +1,8 @@ from abc import ABC, abstractmethod from dataclasses import dataclass -from typing import Optional, TypedDict, TypeVar +from typing import Optional, Tuple, TypedDict, TypeVar + +import yaml from frostfs_testlib.hosting.config import ServiceConfig from frostfs_testlib.hosting.interfaces import Host @@ -84,12 +86,29 @@ class NodeBase(ABC): ConfigAttributes.CONFIG_PATH, ) - def get_wallet_config_path(self): + def get_wallet_config_path(self) -> str: return self._get_attribute( ConfigAttributes.LOCAL_WALLET_CONFIG, ConfigAttributes.WALLET_CONFIG, ) + def get_config(self) -> Tuple[str, dict]: + config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH) + shell = self.host.get_shell() + + result = shell.exec(f"cat {config_file_path}") + config_text = result.stdout + + config = yaml.safe_load(config_text) + return config_file_path, config + + def save_config(self, new_config: dict) -> None: + config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH) + shell = self.host.get_shell() + + config_str = yaml.dump(new_config) + shell.exec(f"echo '{config_str}' | sudo tee {config_file_path}") + def get_wallet_public_key(self): storage_wallet_path = self.get_wallet_path() storage_wallet_pass = self.get_wallet_password() diff --git a/src/frostfs_testlib/utils/failover_utils.py b/src/frostfs_testlib/utils/failover_utils.py index 3910662..8c6062f 100644 --- a/src/frostfs_testlib/utils/failover_utils.py +++ b/src/frostfs_testlib/utils/failover_utils.py @@ -28,8 +28,8 @@ def ping_host(shell: Shell, host: Host): @reporter.step_deco("Wait for storage nodes returned to cluster") def wait_all_storage_nodes_returned(shell: Shell, cluster: Cluster) -> None: - with reporter.step("Run health check for all storage nodes"): - for node in cluster.services(StorageNode): + for node in cluster.services(StorageNode): + with reporter.step(f"Run health check for storage at '{node}'"): wait_for_host_online(shell, node) wait_for_node_online(node) @@ -56,7 +56,7 @@ def wait_for_host_offline(shell: Shell, node: StorageNode): return 0 -@retry(max_attempts=10, sleep_interval=15, expected_result=True) +@retry(max_attempts=20, sleep_interval=30, expected_result=True) @reporter.step_deco("Waiting for node {node} to go online") def wait_for_node_online(node: StorageNode): try: diff --git a/src/frostfs_testlib/utils/file_keeper.py b/src/frostfs_testlib/utils/file_keeper.py new file mode 100644 index 0000000..ad6836b --- /dev/null +++ b/src/frostfs_testlib/utils/file_keeper.py @@ -0,0 +1,50 @@ +from concurrent.futures import ThreadPoolExecutor + +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.storage.dataclasses.node_base import NodeBase + +reporter = get_reporter() + + +class FileKeeper: + """This class is responsible to make backup copy of modified file and restore when required (mostly after the test)""" + + files_to_restore: dict[NodeBase, list[str]] = {} + + @reporter.step_deco("Adding {file_to_restore} from node {node} to restore list") + def add(self, node: NodeBase, file_to_restore: str): + if node in self.files_to_restore and file_to_restore in self.files_to_restore[node]: + # Already added + return + + if node not in self.files_to_restore: + self.files_to_restore[node] = [] + + if file_to_restore not in self.files_to_restore[node]: + self.files_to_restore[node].append(file_to_restore) + + shell = node.host.get_shell() + shell.exec(f"cp {file_to_restore} {file_to_restore}.bak") + + @reporter.step_deco("Restore files") + def restore_files(self): + nodes = self.files_to_restore.keys() + if not nodes: + return + + with ThreadPoolExecutor(max_workers=len(nodes)) as executor: + results = executor.map(self._restore_files_on_node, nodes) + + self.files_to_restore.clear() + + for _ in results: + # Iterate through results for exception check if any + pass + + @reporter.step_deco("Restore files on node {node}") + def _restore_files_on_node(self, node: NodeBase): + shell = node.host.get_shell() + for file_to_restore in self.files_to_restore[node]: + with reporter.step(f"Restore file {file_to_restore} on node {node}"): + shell.exec(f"cp {file_to_restore}.bak {file_to_restore}") + shell.exec(f"rm {file_to_restore}.bak") From 584ba5f0d1bc9b00ffffc3c93f2ad7a7089b5304 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 31 May 2023 16:02:26 +0300 Subject: [PATCH 090/363] Update shards list command Signed-off-by: Andrey Berezin --- src/frostfs_testlib/cli/frostfs_cli/shards.py | 4 ++-- src/frostfs_testlib/storage/controllers/shards_watcher.py | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/shards.py b/src/frostfs_testlib/cli/frostfs_cli/shards.py index 6b47ac2..ffa0652 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/shards.py +++ b/src/frostfs_testlib/cli/frostfs_cli/shards.py @@ -118,7 +118,7 @@ class FrostfsCliShards(CliCommand): wallet: str, wallet_password: str, address: Optional[str] = None, - json_mode: bool = False, + json: bool = False, timeout: Optional[str] = None, ) -> CommandResult: """ @@ -126,7 +126,7 @@ class FrostfsCliShards(CliCommand): Args: address: Address of wallet account. - json_mode: Print shard info as a JSON array. + json: Print shard info as a JSON array. endpoint: Remote node address (as 'multiaddr' or ':'). wallet: WIF (NEP-2) string or path to the wallet or binary key. wallet_password: Wallet password. diff --git a/src/frostfs_testlib/storage/controllers/shards_watcher.py b/src/frostfs_testlib/storage/controllers/shards_watcher.py index 6607824..bd7c8cd 100644 --- a/src/frostfs_testlib/storage/controllers/shards_watcher.py +++ b/src/frostfs_testlib/storage/controllers/shards_watcher.py @@ -99,6 +99,7 @@ class ShardsWatcher: endpoint=self.storage_node.get_control_endpoint(), wallet=self.storage_node.get_remote_wallet_path(), wallet_password=self.storage_node.get_wallet_password(), + json=True, ) return json.loads(response.stdout.split(">", 1)[1]) From 987e7f2a3062d9f5e6acb2ef2617c177b8a0d9a6 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 2 Jun 2023 13:03:28 +0300 Subject: [PATCH 091/363] Revert "Update shards list command" This reverts commit 584ba5f0d1bc9b00ffffc3c93f2ad7a7089b5304. --- src/frostfs_testlib/cli/frostfs_cli/shards.py | 4 ++-- src/frostfs_testlib/storage/controllers/shards_watcher.py | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/shards.py b/src/frostfs_testlib/cli/frostfs_cli/shards.py index ffa0652..6b47ac2 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/shards.py +++ b/src/frostfs_testlib/cli/frostfs_cli/shards.py @@ -118,7 +118,7 @@ class FrostfsCliShards(CliCommand): wallet: str, wallet_password: str, address: Optional[str] = None, - json: bool = False, + json_mode: bool = False, timeout: Optional[str] = None, ) -> CommandResult: """ @@ -126,7 +126,7 @@ class FrostfsCliShards(CliCommand): Args: address: Address of wallet account. - json: Print shard info as a JSON array. + json_mode: Print shard info as a JSON array. endpoint: Remote node address (as 'multiaddr' or ':'). wallet: WIF (NEP-2) string or path to the wallet or binary key. wallet_password: Wallet password. diff --git a/src/frostfs_testlib/storage/controllers/shards_watcher.py b/src/frostfs_testlib/storage/controllers/shards_watcher.py index bd7c8cd..6607824 100644 --- a/src/frostfs_testlib/storage/controllers/shards_watcher.py +++ b/src/frostfs_testlib/storage/controllers/shards_watcher.py @@ -99,7 +99,6 @@ class ShardsWatcher: endpoint=self.storage_node.get_control_endpoint(), wallet=self.storage_node.get_remote_wallet_path(), wallet_password=self.storage_node.get_wallet_password(), - json=True, ) return json.loads(response.stdout.split(">", 1)[1]) From e3c0f768960c30441ee74fe90b1215fe3ef63dce Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 2 Jun 2023 13:08:17 +0300 Subject: [PATCH 092/363] Proper usage for shards_watcher Signed-off-by: Andrey Berezin --- src/frostfs_testlib/storage/controllers/shards_watcher.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/frostfs_testlib/storage/controllers/shards_watcher.py b/src/frostfs_testlib/storage/controllers/shards_watcher.py index 6607824..95a419e 100644 --- a/src/frostfs_testlib/storage/controllers/shards_watcher.py +++ b/src/frostfs_testlib/storage/controllers/shards_watcher.py @@ -99,6 +99,7 @@ class ShardsWatcher: endpoint=self.storage_node.get_control_endpoint(), wallet=self.storage_node.get_remote_wallet_path(), wallet_password=self.storage_node.get_wallet_password(), + json_mode=True, ) return json.loads(response.stdout.split(">", 1)[1]) From e9777b63cde2175f585ba52ff0771782d8e69a1d Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 2 Jun 2023 15:56:23 +0300 Subject: [PATCH 093/363] update allure in requirements.txt Signed-off-by: Andrey Berezin --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index c653f7b..5b47640 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -allure-python-commons==2.9.45 +allure-python-commons==2.13.2 docker==4.4.0 importlib_metadata==5.0.0 neo-mamba==1.0.0 From 26a78c0eae3d0350f2714c128f36fa15a70f90ff Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 5 Jun 2023 12:00:06 +0300 Subject: [PATCH 094/363] New methods with nodes Signed-off-by: Dmitriy Zayakin --- .../cli/frostfs_cli/container.py | 42 +++++++++++++++++++ .../controllers/cluster_state_controller.py | 0 src/frostfs_testlib/hosting/docker_host.py | 6 +++ src/frostfs_testlib/hosting/interfaces.py | 14 +++++++ src/frostfs_testlib/steps/cli/container.py | 27 +++++++++++- src/frostfs_testlib/steps/http/http_gate.py | 28 +++++++++---- src/frostfs_testlib/steps/s3/s3_helper.py | 22 +++++++++- src/frostfs_testlib/storage/cluster.py | 11 +++++ .../controllers/cluster_state_controller.py | 26 ++++++++++++ .../storage/dataclasses/node_base.py | 4 ++ 10 files changed, 170 insertions(+), 10 deletions(-) create mode 100644 src/frostfs_testlib/controllers/cluster_state_controller.py diff --git a/src/frostfs_testlib/cli/frostfs_cli/container.py b/src/frostfs_testlib/cli/frostfs_cli/container.py index 533ff1a..5ea8ba8 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/container.py +++ b/src/frostfs_testlib/cli/frostfs_cli/container.py @@ -262,3 +262,45 @@ class FrostfsCliContainer(CliCommand): "container set-eacl", **{param: value for param, value in locals().items() if param not in ["self"]}, ) + + def search_node( + self, + rpc_endpoint: str, + wallet: str, + cid: str, + address: Optional[str] = None, + ttl: Optional[int] = None, + from_file: Optional[str] = None, + short: Optional[bool] = True, + xhdr: Optional[dict] = None, + generate_key: Optional[bool] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """ + Show the nodes participating in the container in the current epoch. + + Args: + rpc_endpoint: string Remote host address (as 'multiaddr' or ':') + wallet: WIF (NEP-2) string or path to the wallet or binary key. + cid: Container ID. + address: Address of wallet account. + ttl: TTL value in request meta header (default 2). + from_file: string File path with encoded container + timeout: duration Timeout for the operation (default 15 s) + short: shorten the output of node information. + xhdr: Dict with request X-Headers. + generate_key: Generate a new private key + + Returns: + + """ + from_str = f"--from {from_file}" if from_file else "" + + return self._execute( + f"container nodes {from_str}", + **{ + param: value + for param, value in locals().items() + if param not in ["self", "from_file", "from_str"] + }, + ) diff --git a/src/frostfs_testlib/controllers/cluster_state_controller.py b/src/frostfs_testlib/controllers/cluster_state_controller.py new file mode 100644 index 0000000..e69de29 diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index b7f4852..1e32340 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -117,6 +117,12 @@ class DockerHost(Host): timeout=service_attributes.stop_timeout, ) + def wait_success_suspend_process(self, service_name: str): + raise NotImplementedError("Not supported for docker") + + def wait_success_resume_process(self, service_name: str): + raise NotImplementedError("Not supported for docker") + def restart_service(self, service_name: str) -> None: service_attributes = self._get_service_attributes(service_name) diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 9178523..95536c6 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -112,6 +112,20 @@ class Host(ABC): service_name: Name of the service to restart. """ + @abstractmethod + def wait_success_suspend_process(self, process_name: str) -> None: + """Search for a service ID by its name and stop the process + Args: + process_name: Name + """ + + @abstractmethod + def wait_success_resume_process(self, process_name: str) -> None: + """Search for a service by its ID and start the process + Args: + process_name: Name + """ + @abstractmethod def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None: """Erases all data of the storage node with specified name. diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index 89070c4..74f445a 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -1,5 +1,6 @@ import json import logging +import re from dataclasses import dataclass from time import sleep from typing import Optional, Union @@ -10,7 +11,7 @@ from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import put_object, put_object_to_random_node -from frostfs_testlib.storage.cluster import Cluster +from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.utils import json_utils @@ -357,3 +358,27 @@ def search_container_by_name(wallet: str, name: str, shell: Shell, endpoint: str if cont_info.get("attributes", {}).get("Name", None) == name: return cid return None + + +@reporter.step_deco("Search for nodes with a container") +def search_nodes_with_container( + wallet: str, + cid: str, + shell: Shell, + endpoint: str, + cluster: Cluster, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> list[ClusterNode]: + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) + result = cli.container.search_node( + rpc_endpoint=endpoint, wallet=wallet, cid=cid, timeout=timeout + ) + + pattern = r"[0-9]+(?:\.[0-9]+){3}" + nodes_ip = list(set(re.findall(pattern, result.stdout))) + + with reporter.step(f"nodes ips = {nodes_ip}"): + nodes_list = cluster.get_nodes_by_ip(nodes_ip) + + with reporter.step(f"Return nodes - {nodes_list}"): + return nodes_list diff --git a/src/frostfs_testlib/steps/http/http_gate.py b/src/frostfs_testlib/steps/http/http_gate.py index c9769fb..64bb5ce 100644 --- a/src/frostfs_testlib/steps/http/http_gate.py +++ b/src/frostfs_testlib/steps/http/http_gate.py @@ -28,7 +28,13 @@ ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/") @reporter.step_deco("Get via HTTP Gate") -def get_via_http_gate(cid: str, oid: str, endpoint: str, request_path: Optional[str] = None): +def get_via_http_gate( + cid: str, + oid: str, + endpoint: str, + request_path: Optional[str] = None, + timeout: Optional[int] = 300, +): """ This function gets given object from HTTP gate cid: container id to get object from @@ -43,7 +49,7 @@ def get_via_http_gate(cid: str, oid: str, endpoint: str, request_path: Optional[ else: request = f"{endpoint}{request_path}" - resp = requests.get(request, stream=True) + resp = requests.get(request, stream=True, timeout=timeout) if not resp.ok: raise Exception( @@ -63,7 +69,7 @@ def get_via_http_gate(cid: str, oid: str, endpoint: str, request_path: Optional[ @reporter.step_deco("Get via Zip HTTP Gate") -def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str): +def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str, timeout: Optional[int] = 300): """ This function gets given object from HTTP gate cid: container id to get object from @@ -71,7 +77,7 @@ def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str): endpoint: http gate endpoint """ request = f"{endpoint}/zip/{cid}/{prefix}" - resp = requests.get(request, stream=True) + resp = requests.get(request, stream=True, timeout=timeout) if not resp.ok: raise Exception( @@ -96,7 +102,11 @@ def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str): @reporter.step_deco("Get via HTTP Gate by attribute") def get_via_http_gate_by_attribute( - cid: str, attribute: dict, endpoint: str, request_path: Optional[str] = None + cid: str, + attribute: dict, + endpoint: str, + request_path: Optional[str] = None, + timeout: Optional[int] = 300, ): """ This function gets given object from HTTP gate @@ -113,7 +123,7 @@ def get_via_http_gate_by_attribute( else: request = f"{endpoint}{request_path}" - resp = requests.get(request, stream=True) + resp = requests.get(request, stream=True, timeout=timeout) if not resp.ok: raise Exception( @@ -133,7 +143,9 @@ def get_via_http_gate_by_attribute( @reporter.step_deco("Upload via HTTP Gate") -def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: Optional[dict] = None) -> str: +def upload_via_http_gate( + cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300 +) -> str: """ This function upload given object through HTTP gate cid: CID to get object from @@ -144,7 +156,7 @@ def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: Optional[d request = f"{endpoint}/upload/{cid}" files = {"upload_file": open(path, "rb")} body = {"filename": path} - resp = requests.post(request, files=files, data=body, headers=headers) + resp = requests.post(request, files=files, data=body, headers=headers, timeout=timeout) if not resp.ok: raise Exception( diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index 87f929e..0c6c448 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -12,7 +12,12 @@ from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC from frostfs_testlib.resources.common import CREDENTIALS_CREATE_TIMEOUT from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus -from frostfs_testlib.storage.cluster import Cluster +from frostfs_testlib.shell import Shell +from frostfs_testlib.steps.cli.container import ( + search_container_by_name, + search_nodes_with_container, +) +from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate from frostfs_testlib.utils.cli_utils import _run_with_passwd @@ -245,3 +250,18 @@ def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str): # Delete the bucket itself s3_client.delete_bucket(bucket) + + +@reporter.step_deco("Search nodes bucket") +def search_nodes_with_bucket( + cluster: Cluster, + bucket_name: str, + wallet: str, + shell: Shell, + endpoint: str, +) -> list[ClusterNode]: + cid = search_container_by_name(wallet=wallet, name=bucket_name, shell=shell, endpoint=endpoint) + nodes_list = search_nodes_with_container( + wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster + ) + return nodes_list diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index 2158dc2..91487c9 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -2,9 +2,11 @@ import random import re import yaml +from yarl import URL from frostfs_testlib.hosting import Host, Hosting from frostfs_testlib.hosting.config import ServiceConfig +from frostfs_testlib.reporter import get_reporter from frostfs_testlib.storage import get_service_registry from frostfs_testlib.storage.constants import ConfigAttributes from frostfs_testlib.storage.dataclasses.frostfs_services import ( @@ -17,6 +19,8 @@ from frostfs_testlib.storage.dataclasses.frostfs_services import ( from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass from frostfs_testlib.storage.service_registry import ServiceRegistry +reporter = get_reporter() + class ClusterNode: """ @@ -250,3 +254,10 @@ class Cluster: def get_morph_endpoints(self) -> list[str]: nodes: list[MorphChain] = self.services(MorphChain) return [node.get_endpoint() for node in nodes] + + def get_nodes_by_ip(self, ips: list[str]) -> list[ClusterNode]: + cluster_nodes = [ + node for node in self.cluster_nodes if URL(node.morph_chain.get_endpoint()).host in ips + ] + with reporter.step(f"Return cluster nodes - {cluster_nodes}"): + return cluster_nodes diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 705caf0..70f3e21 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -25,6 +25,7 @@ class ClusterStateController: self.stopped_storage_nodes: list[ClusterNode] = [] self.cluster = cluster self.shell = shell + self.suspended_services: dict[str, list[ClusterNode]] = {} @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop host of node {node}") @@ -130,6 +131,31 @@ class ClusterStateController: wait_all_storage_nodes_returned(self.shell, self.cluster) self.stopped_storage_nodes = [] + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Suspend {process_name} service in {node}") + def suspend_service(self, process_name: str, node: ClusterNode): + node.host.wait_success_suspend_process(process_name) + if self.suspended_services.get(process_name): + self.suspended_services[process_name].append(node) + else: + self.suspended_services[process_name] = [node] + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Resume {process_name} service in {node}") + def resume_service(self, process_name: str, node: ClusterNode): + node.host.wait_success_resume_process(process_name) + if self.suspended_services.get(process_name): + self.suspended_services[process_name].append(node) + else: + self.suspended_services[process_name] = [node] + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Start suspend processes services") + def resume_suspended_services(self): + for process_name, list_nodes in self.suspended_services.items(): + [node.host.wait_success_resume_process(process_name) for node in list_nodes] + self.suspended_services = {} + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Hard reboot host {node} via magic SysRq option") def panic_reboot_host(self, node: ClusterNode, wait_for_return: bool = True): diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 8fcb03b..150b963 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -19,6 +19,7 @@ class NodeBase(ABC): id: str name: str host: Host + _process_name: str def __init__(self, id, name, host) -> None: self.id = id @@ -48,6 +49,9 @@ class NodeBase(ABC): def get_service_systemctl_name(self) -> str: return self._get_attribute(ConfigAttributes.SERVICE_NAME) + def get_process_name(self) -> str: + return self._process_name + def start_service(self): self.host.start_service(self.name) From f2f3d3c8e3814ababf38158a97d94cfad0d386a5 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Mon, 5 Jun 2023 12:10:32 +0300 Subject: [PATCH 095/363] Add get_data_directory function --- src/frostfs_testlib/hosting/docker_host.py | 4 ++++ src/frostfs_testlib/hosting/interfaces.py | 12 ++++++++++++ .../storage/dataclasses/frostfs_services.py | 3 +++ 3 files changed, 19 insertions(+) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 1e32340..5dcac9e 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -135,6 +135,10 @@ class DockerHost(Host): timeout=service_attributes.start_timeout, ) + def get_data_directory(self, service_name: str) -> str: + service_attributes = self._get_service_attributes(service_name) + return service_attributes.data_directory_path + def delete_metabase(self, service_name: str) -> None: raise NotImplementedError("Not implemented for docker") diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 95536c6..8d889da 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -112,6 +112,18 @@ class Host(ABC): service_name: Name of the service to restart. """ + + @abstractmethod + def get_data_directory(self, service_name: str) -> str: + """ + Getting path to data directory on node for further usage + (example: list databases pilorama.db) + + Args: + service_name: Name of storage node service. + """ + + @abstractmethod def wait_success_suspend_process(self, process_name: str) -> None: """Search for a service ID by its name and stop the process diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index 7bb4c2b..2b52c1f 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -167,6 +167,9 @@ class StorageNode(NodeBase): def get_un_locode(self): return self._get_attribute(ConfigAttributes.UN_LOCODE) + + def get_data_directory(self) -> str: + return self.host.get_data_directory(self.name) def delete_blobovnicza(self): self.host.delete_blobovnicza(self.name) From 98f5075715123cfd62d726e49d09c5d5746ff5f2 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Tue, 13 Jun 2023 12:07:21 +0300 Subject: [PATCH 096/363] Functions for stop/start s3 gateway in cluster_state_controller --- .../controllers/cluster_state_controller.py | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 70f3e21..1084552 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -23,6 +23,7 @@ class ClusterStateController: self.stopped_nodes: list[ClusterNode] = [] self.detached_disks: dict[str, DiskController] = {} self.stopped_storage_nodes: list[ClusterNode] = [] + self.stopped_s3_gate: list[ClusterNode] = [] self.cluster = cluster self.shell = shell self.suspended_services: dict[str, list[ClusterNode]] = {} @@ -131,6 +132,28 @@ class ClusterStateController: wait_all_storage_nodes_returned(self.shell, self.cluster) self.stopped_storage_nodes = [] + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Stop s3 gate on {node}") + def stop_s3_gate(self, node: ClusterNode): + node.s3_gate.stop_service() + self.stopped_s3_gate.append(node) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Start s3 gate on {node}") + def start_s3_gate(self, node: ClusterNode): + node.s3_gate.start_service() + self.stopped_s3_gate.remove(node) + + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Start stopped S3 gates") + def start_stopped_s3_gate(self): + # not sure if we need here to use threads like in start_stopped_storage_services + for s3_gate in self.stopped_s3_gate: + s3_gate.start_service() + self.stopped_s3_gate = [] + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Suspend {process_name} service in {node}") def suspend_service(self, process_name: str, node: ClusterNode): From c0f63e378354f643cccaea2fdf97a15cd7058c95 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 19 Jun 2023 13:39:35 +0300 Subject: [PATCH 097/363] New methods S3 client Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/s3/aws_cli_client.py | 4 ++++ src/frostfs_testlib/s3/boto3_client.py | 24 ++++++++++++++++++------ src/frostfs_testlib/s3/interfaces.py | 4 ++++ 3 files changed, 26 insertions(+), 6 deletions(-) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 054a1e8..a9aeb37 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -39,6 +39,10 @@ class AwsCliClient(S3ClientWrapper): except Exception as err: raise RuntimeError("Error while configuring AwsCliClient") from err + @reporter.step_deco("Set endpoint S3 to {s3gate_endpoint}") + def set_endpoint(self, s3gate_endpoint: str): + self.s3gate_endpoint = s3gate_endpoint + @reporter.step_deco("Create bucket S3") def create_bucket( self, diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index 07c693f..6d6fc74 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -47,19 +47,31 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step_deco("Configure S3 client (boto3)") @report_error def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None: - session = boto3.Session() - config = Config( + self.boto3_client: S3Client = None + self.session = boto3.Session() + self.config = Config( retries={ "max_attempts": MAX_REQUEST_ATTEMPTS, "mode": RETRY_MODE, } ) + self.access_key_id: str = access_key_id + self.secret_access_key: str = secret_access_key + self.s3gate_endpoint: str = "" + self.set_endpoint(s3gate_endpoint) - self.boto3_client: S3Client = session.client( + @reporter.step_deco("Set endpoint S3 to {s3gate_endpoint}") + def set_endpoint(self, s3gate_endpoint: str): + if self.s3gate_endpoint == s3gate_endpoint: + return + + self.s3gate_endpoint = s3gate_endpoint + + self.boto3_client: S3Client = self.session.client( service_name="s3", - aws_access_key_id=access_key_id, - aws_secret_access_key=secret_access_key, - config=config, + aws_access_key_id=self.access_key_id, + aws_secret_access_key=self.secret_access_key, + config=self.config, endpoint_url=s3gate_endpoint, verify=False, ) diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index bd1379c..3f31395 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -34,6 +34,10 @@ class S3ClientWrapper(ABC): def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None: pass + @abstractmethod + def set_endpoint(self, s3gate_endpoint: str): + """Set endpoint""" + @abstractmethod def create_bucket( self, From 13ea25bff5252a24b4cbda21c458260367bfc8b6 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Wed, 21 Jun 2023 13:02:16 +0300 Subject: [PATCH 098/363] Change s3 auth func Signed-off-by: Dmitriy Zayakin --- .../controllers/cluster_state_controller.py | 0 src/frostfs_testlib/steps/s3/s3_helper.py | 74 +++++++++---------- 2 files changed, 37 insertions(+), 37 deletions(-) delete mode 100644 src/frostfs_testlib/controllers/cluster_state_controller.py diff --git a/src/frostfs_testlib/controllers/cluster_state_controller.py b/src/frostfs_testlib/controllers/cluster_state_controller.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index 0c6c448..d6c2095 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -8,17 +8,20 @@ from typing import Optional from dateutil.parser import parse +from frostfs_testlib.cli import FrostfsAuthmate from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC from frostfs_testlib.resources.common import CREDENTIALS_CREATE_TIMEOUT from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus -from frostfs_testlib.shell import Shell +from frostfs_testlib.shell import CommandOptions, InteractiveInput, Shell +from frostfs_testlib.shell.interfaces import SshCredentials from frostfs_testlib.steps.cli.container import ( search_container_by_name, search_nodes_with_container, ) from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.utils.cli_utils import _run_with_passwd reporter = get_reporter() @@ -183,48 +186,45 @@ def assert_s3_acl(acl_grants: list, permitted_users: str): @reporter.step_deco("Init S3 Credentials") def init_s3_credentials( - wallet_path: str, + wallet: WalletInfo, + shell: Shell, cluster: Cluster, s3_bearer_rules_file: str, policy: Optional[dict] = None, + s3gates: Optional[list[S3Gate]] = None, ): + gate_public_keys = [] bucket = str(uuid.uuid4()) - - s3gate_node = cluster.services(S3Gate)[0] - gate_public_key = s3gate_node.get_wallet_public_key() - cmd = ( - f"{FROSTFS_AUTHMATE_EXEC} --debug --with-log --timeout {CREDENTIALS_CREATE_TIMEOUT} " - f"issue-secret --wallet {wallet_path} --gate-public-key={gate_public_key} " - f"--peer {cluster.default_rpc_endpoint} --container-friendly-name {bucket} " - f"--bearer-rules {s3_bearer_rules_file}" + if not s3gates: + s3gates = [cluster.s3_gates[0]] + for s3gate in s3gates: + gate_public_keys.append(s3gate.get_wallet_public_key()) + frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC) + issue_secret_output = frostfs_authmate_exec.secret.issue( + wallet=wallet.path, + peer=cluster.default_rpc_endpoint, + bearer_rules=s3_bearer_rules_file, + gate_public_key=gate_public_keys, + wallet_password=wallet.password, + container_policy=policy, + container_friendly_name=bucket, + ).stdout + aws_access_key_id = str( + re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group( + "aws_access_key_id" + ) ) - if policy: - cmd += f" --container-policy {policy}'" - logger.info(f"Executing command: {cmd}") - - try: - output = _run_with_passwd(cmd) - logger.info(f"Command completed with output: {output}") - - # output contains some debug info and then several JSON structures, so we find each - # JSON structure by curly brackets (naive approach, but works while JSON is not nested) - # and then we take JSON containing secret_access_key - json_blocks = re.findall(r"\{.*?\}", output, re.DOTALL) - for json_block in json_blocks: - try: - parsed_json_block = json.loads(json_block) - if "secret_access_key" in parsed_json_block: - return ( - parsed_json_block["container_id"], - parsed_json_block["access_key_id"], - parsed_json_block["secret_access_key"], - ) - except json.JSONDecodeError: - raise AssertionError(f"Could not parse info from output\n{output}") - raise AssertionError(f"Could not find AWS credentials in output:\n{output}") - - except Exception as exc: - raise RuntimeError(f"Failed to init s3 credentials because of error\n{exc}") from exc + aws_secret_access_key = str( + re.search( + r"secret_access_key.*:\s.(?P\w*)", issue_secret_output + ).group("aws_secret_access_key") + ) + cid = str( + re.search(r"container_id.*:\s.(?P\w*)", issue_secret_output).group( + "container_id" + ) + ) + return cid, aws_access_key_id, aws_secret_access_key @reporter.step_deco("Delete bucket with all objects") From 182bd6ab367cb86342d578ab5d029818cbbf8d1d Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 26 Jun 2023 16:45:34 +0300 Subject: [PATCH 099/363] Add loader and sceanrio runner interfaces, add support for local scenario Signed-off-by: Andrey Berezin --- .../cli/frostfs_authmate/authmate.py | 4 +- src/frostfs_testlib/load/__init__.py | 13 + src/frostfs_testlib/load/interfaces.py | 53 +++ src/frostfs_testlib/load/k6.py | 91 ++-- src/frostfs_testlib/load/load_config.py | 17 +- src/frostfs_testlib/load/load_metrics.py | 12 + src/frostfs_testlib/load/load_report.py | 1 + src/frostfs_testlib/load/load_steps.py | 191 --------- src/frostfs_testlib/load/loaders.py | 60 +++ src/frostfs_testlib/load/runners.py | 398 ++++++++++++++++++ .../processes/remote_process.py | 73 +++- .../reporter/allure_handler.py | 2 +- src/frostfs_testlib/resources/common.py | 2 + .../shell/command_inspectors.py | 18 +- src/frostfs_testlib/shell/interfaces.py | 5 +- src/frostfs_testlib/shell/local_shell.py | 6 +- src/frostfs_testlib/shell/ssh_shell.py | 6 +- .../controllers/background_load_controller.py | 149 ++----- .../controllers/cluster_state_controller.py | 69 ++- 19 files changed, 786 insertions(+), 384 deletions(-) create mode 100644 src/frostfs_testlib/load/__init__.py create mode 100644 src/frostfs_testlib/load/interfaces.py delete mode 100644 src/frostfs_testlib/load/load_steps.py create mode 100644 src/frostfs_testlib/load/loaders.py create mode 100644 src/frostfs_testlib/load/runners.py diff --git a/src/frostfs_testlib/cli/frostfs_authmate/authmate.py b/src/frostfs_testlib/cli/frostfs_authmate/authmate.py index ba3a3b0..7912dae 100644 --- a/src/frostfs_testlib/cli/frostfs_authmate/authmate.py +++ b/src/frostfs_testlib/cli/frostfs_authmate/authmate.py @@ -6,8 +6,8 @@ from frostfs_testlib.shell import Shell class FrostfsAuthmate: - secret: Optional[FrostfsAuthmateSecret] = None - version: Optional[FrostfsAuthmateVersion] = None + secret: FrostfsAuthmateSecret + version: FrostfsAuthmateVersion def __init__(self, shell: Shell, frostfs_authmate_exec_path: str): self.secret = FrostfsAuthmateSecret(shell, frostfs_authmate_exec_path) diff --git a/src/frostfs_testlib/load/__init__.py b/src/frostfs_testlib/load/__init__.py new file mode 100644 index 0000000..e8ed75e --- /dev/null +++ b/src/frostfs_testlib/load/__init__.py @@ -0,0 +1,13 @@ +from frostfs_testlib.load.interfaces import Loader, ScenarioRunner +from frostfs_testlib.load.load_config import ( + EndpointSelectionStrategy, + K6ProcessAllocationStrategy, + LoadParams, + LoadScenario, + LoadType, + NodesSelectionStrategy, + Preset, +) +from frostfs_testlib.load.load_report import LoadReport +from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader +from frostfs_testlib.load.runners import DefaultRunner, LocalRunner diff --git a/src/frostfs_testlib/load/interfaces.py b/src/frostfs_testlib/load/interfaces.py new file mode 100644 index 0000000..fbbc20b --- /dev/null +++ b/src/frostfs_testlib/load/interfaces.py @@ -0,0 +1,53 @@ +from abc import ABC, abstractmethod + +from frostfs_testlib.load.load_config import LoadParams +from frostfs_testlib.shell.interfaces import Shell +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo + + +class Loader(ABC): + @abstractmethod + def get_shell(self) -> Shell: + """Get shell for the loader""" + + @property + @abstractmethod + def ip(self): + """Get address of the loader""" + + +class ScenarioRunner(ABC): + @abstractmethod + def prepare( + self, + load_params: LoadParams, + nodes_under_load: list[ClusterNode], + k6_dir: str, + ): + """Preparation steps before running the load""" + + @abstractmethod + def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): + """Init K6 instances""" + + @abstractmethod + def start(self): + """Start K6 instances""" + + @abstractmethod + def stop(self): + """Stop K6 instances""" + + @property + @abstractmethod + def is_running(self) -> bool: + """Returns True if load is running at the moment""" + + @abstractmethod + def wait_until_finish(self): + """Wait until load is finished""" + + @abstractmethod + def get_results(self) -> dict: + """Get results from K6 run""" diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index 2fa2c00..ca3f696 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -1,10 +1,12 @@ import json import logging +import math import os from dataclasses import dataclass, fields from time import sleep from typing import Any +from frostfs_testlib.load.interfaces import Loader from frostfs_testlib.load.load_config import ( K6ProcessAllocationStrategy, LoadParams, @@ -13,7 +15,12 @@ from frostfs_testlib.load.load_config import ( ) from frostfs_testlib.processes.remote_process import RemoteProcess from frostfs_testlib.reporter import get_reporter -from frostfs_testlib.resources.load_params import K6_STOP_SIGNAL_TIMEOUT, LOAD_NODE_SSH_USER +from frostfs_testlib.resources.common import STORAGE_USER_NAME +from frostfs_testlib.resources.load_params import ( + K6_STOP_SIGNAL_TIMEOUT, + K6_TEARDOWN_PERIOD, + LOAD_NODE_SSH_USER, +) from frostfs_testlib.shell import Shell from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.test_control import wait_for_success @@ -42,7 +49,7 @@ class K6: endpoints: list[str], k6_dir: str, shell: Shell, - load_node: str, + loader: Loader, wallet: WalletInfo, ): if load_params.scenario is None: @@ -50,7 +57,7 @@ class K6: self.load_params: LoadParams = load_params self.endpoints = endpoints - self.load_node: str = load_node + self.loader: Loader = loader self.shell: Shell = shell self.wallet = wallet self.scenario: LoadScenario = load_params.scenario @@ -151,32 +158,56 @@ class K6: [f"-e {param}='{value}'" for param, value in env_vars.items() if value is not None] ) - @reporter.step_deco("Start K6 on initiator") def start(self) -> None: - command = ( - f"{self._k6_dir}/k6 run {self._generate_env_variables()} " - f"{self._k6_dir}/scenarios/{self.scenario.value}.js" - ) - self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir) + with reporter.step( + f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}" + ): + command = ( + f"{self._k6_dir}/k6 run {self._generate_env_variables()} " + f"{self._k6_dir}/scenarios/{self.scenario.value}.js" + ) + user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None + self._k6_process = RemoteProcess.create( + command, self.shell, self.load_params.working_dir, user + ) + + def wait_until_finished(self) -> None: + with reporter.step( + f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}" + ): + if self.load_params.scenario == LoadScenario.VERIFY: + timeout = self.load_params.verify_time or 0 + else: + timeout = self.load_params.load_time or 0 + + timeout += int(K6_TEARDOWN_PERIOD) + original_timeout = timeout + + min_wait_interval = 10 + wait_interval = min_wait_interval + if self._k6_process is None: + assert "No k6 instances were executed" + while timeout > 0: + if not self._k6_process.running(): + return + logger.info(f"K6 is running. Waiting {wait_interval} seconds...") + sleep(wait_interval) + timeout -= min(timeout, wait_interval) + wait_interval = max( + min(timeout, int(math.log2(timeout + 1)) * 15) - min_wait_interval, + min_wait_interval, + ) - @reporter.step_deco("Wait until K6 is finished") - def wait_until_finished(self, timeout: int = 0, k6_should_be_running: bool = False) -> None: - wait_interval = 10 - if self._k6_process is None: - assert "No k6 instances were executed" - if k6_should_be_running: - assert self._k6_process.running(), "k6 should be running." - while timeout > 0: if not self._k6_process.running(): return - logger.info(f"K6 is running. Waiting {wait_interval} seconds...") - sleep(wait_interval) - timeout -= wait_interval - self.stop() - raise TimeoutError(f"Expected K6 finished in {timeout} sec.") + + self.stop() + raise TimeoutError(f"Expected K6 to finish after {original_timeout} sec.") def get_results(self) -> Any: - with reporter.step(f"K6 results from {self.load_node}"): + with reporter.step( + f"Get load results from loader {self.loader.ip} on endpoints {self.endpoints}" + ): self.__log_output() if not self.summary_json: @@ -186,20 +217,20 @@ class K6: summary_json = json.loads(summary_text) allure_filenames = { - K6ProcessAllocationStrategy.PER_LOAD_NODE: f"{self.load_node}_{self.scenario.value}_summary.json", - K6ProcessAllocationStrategy.PER_ENDPOINT: f"{self.load_node}_{self.scenario.value}_{self.endpoints[0]}_summary.json", + K6ProcessAllocationStrategy.PER_LOAD_NODE: f"{self.loader.ip}_{self.scenario.value}_summary.json", + K6ProcessAllocationStrategy.PER_ENDPOINT: f"{self.loader.ip}_{self.scenario.value}_{self.endpoints[0]}_summary.json", } allure_filename = allure_filenames[self.load_params.k6_process_allocation_strategy] reporter.attach(summary_text, allure_filename) return summary_json - @reporter.step_deco("Stop K6") def stop(self) -> None: - if self.is_running: - self._k6_process.stop() + with reporter.step(f"Stop load from loader {self.loader.ip} on endpoints {self.endpoints}"): + if self.is_running: + self._k6_process.stop() - self._wait_until_process_end() + self._wait_until_process_end() @property def is_running(self) -> bool: @@ -207,7 +238,7 @@ class K6: return self._k6_process.running() return False - @reporter.step_deco("Wait until process end") + @reporter.step_deco("Wait until K6 process end") @wait_for_success( K6_STOP_SIGNAL_TIMEOUT, 15, False, False, "Can not stop K6 process within timeout" ) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 4e67321..c337d7c 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -17,6 +17,7 @@ class LoadScenario(Enum): S3_CAR = "s3_car" HTTP = "http" VERIFY = "verify" + LOCAL = "local" all_load_scenarios = [ @@ -25,13 +26,19 @@ all_load_scenarios = [ LoadScenario.HTTP, LoadScenario.S3_CAR, LoadScenario.gRPC_CAR, + LoadScenario.LOCAL, ] all_scenarios = all_load_scenarios.copy() + [LoadScenario.VERIFY] -constant_vus_scenarios = [LoadScenario.gRPC, LoadScenario.S3, LoadScenario.HTTP] +constant_vus_scenarios = [LoadScenario.gRPC, LoadScenario.S3, LoadScenario.HTTP, LoadScenario.LOCAL] constant_arrival_rate_scenarios = [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR] -grpc_preset_scenarios = [LoadScenario.gRPC, LoadScenario.HTTP, LoadScenario.gRPC_CAR] +grpc_preset_scenarios = [ + LoadScenario.gRPC, + LoadScenario.HTTP, + LoadScenario.gRPC_CAR, + LoadScenario.LOCAL, +] s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR] @@ -129,6 +136,8 @@ class LoadParams: working_dir: Optional[str] = None # Preset for the k6 run preset: Optional[Preset] = None + # K6 download url + k6_url: Optional[str] = None # ------- COMMON SCENARIO PARAMS ------- # Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value. @@ -207,6 +216,10 @@ class LoadParams: # Amount of Verification VU. verify_clients: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "CLIENTS", True) + # ------- LOCAL SCENARIO PARAMS ------- + # Config file location (filled automatically) + config_file: Optional[str] = metadata_field([LoadScenario.LOCAL], None, "CONFIG_FILE") + def set_id(self, load_id): self.load_id = load_id self.registry_file = os.path.join(self.working_dir, f"{load_id}_registry.bolt") diff --git a/src/frostfs_testlib/load/load_metrics.py b/src/frostfs_testlib/load/load_metrics.py index 50d7b38..0b4e28e 100644 --- a/src/frostfs_testlib/load/load_metrics.py +++ b/src/frostfs_testlib/load/load_metrics.py @@ -138,6 +138,17 @@ class S3Metrics(MetricsBase): _DELETE_ERRORS = "aws_obj_delete_fails" +class LocalMetrics(MetricsBase): + _WRITE_SUCCESS = "local_obj_put_total" + _WRITE_ERRORS = "local_obj_put_fails" + + _READ_SUCCESS = "local_obj_get_total" + _READ_ERRORS = "local_obj_get_fails" + + _DELETE_SUCCESS = "local_obj_delete_total" + _DELETE_ERRORS = "local_obj_delete_fails" + + class VerifyMetrics(MetricsBase): _WRITE_SUCCESS = "N/A" _WRITE_ERRORS = "N/A" @@ -157,6 +168,7 @@ def get_metrics_object(load_type: LoadScenario, summary: dict[str, Any]) -> Metr LoadScenario.S3: S3Metrics, LoadScenario.S3_CAR: S3Metrics, LoadScenario.VERIFY: VerifyMetrics, + LoadScenario.LOCAL: LocalMetrics, } return class_map[load_type](summary) diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py index 5f22515..7f912e4 100644 --- a/src/frostfs_testlib/load/load_report.py +++ b/src/frostfs_testlib/load/load_report.py @@ -103,6 +103,7 @@ class LoadReport: LoadScenario.HTTP: "closed model", LoadScenario.gRPC_CAR: "open model", LoadScenario.S3_CAR: "open model", + LoadScenario.LOCAL: "local fill", } return model_map[self.load_params.scenario] diff --git a/src/frostfs_testlib/load/load_steps.py b/src/frostfs_testlib/load/load_steps.py deleted file mode 100644 index b55ff22..0000000 --- a/src/frostfs_testlib/load/load_steps.py +++ /dev/null @@ -1,191 +0,0 @@ -import copy -import itertools -import math -import re -from dataclasses import fields - -from frostfs_testlib.cli import FrostfsAuthmate -from frostfs_testlib.load.k6 import K6 -from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams -from frostfs_testlib.reporter import get_reporter -from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC -from frostfs_testlib.resources.load_params import ( - BACKGROUND_LOAD_VUS_COUNT_DIVISOR, - LOAD_NODE_SSH_USER, -) -from frostfs_testlib.shell import CommandOptions, SSHShell -from frostfs_testlib.shell.interfaces import InteractiveInput, SshCredentials -from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo - -reporter = get_reporter() - -STOPPED_HOSTS = [] - - -@reporter.step_deco("Init s3 client on load nodes") -def init_s3_client( - load_nodes: list[str], - load_params: LoadParams, - k6_directory: str, - ssh_credentials: SshCredentials, - nodes_under_load: list[ClusterNode], - wallet: WalletInfo, -): - storage_node = nodes_under_load[0].service(StorageNode) - s3_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in nodes_under_load] - grpc_peer = storage_node.get_rpc_endpoint() - - for load_node in load_nodes: - ssh_client = _get_shell(ssh_credentials, load_node) - frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(ssh_client, FROSTFS_AUTHMATE_EXEC) - issue_secret_output = frostfs_authmate_exec.secret.issue( - wallet=wallet.path, - peer=grpc_peer, - bearer_rules=f"{k6_directory}/scenarios/files/rules.json", - gate_public_key=s3_public_keys, - container_placement_policy=load_params.preset.container_placement_policy, - container_policy=f"{k6_directory}/scenarios/files/policy.json", - wallet_password=wallet.password, - ).stdout - aws_access_key_id = str( - re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group( - "aws_access_key_id" - ) - ) - aws_secret_access_key = str( - re.search( - r"secret_access_key.*:\s.(?P\w*)", issue_secret_output - ).group("aws_secret_access_key") - ) - # prompt_pattern doesn't work at the moment - configure_input = [ - InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id), - InteractiveInput( - prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key - ), - InteractiveInput(prompt_pattern=r".*", input=""), - InteractiveInput(prompt_pattern=r".*", input=""), - ] - ssh_client.exec("aws configure", CommandOptions(interactive_inputs=configure_input)) - - -@reporter.step_deco("Prepare K6 instances and objects") -def prepare_k6_instances( - load_nodes: list[str], - ssh_credentials: SshCredentials, - k6_dir: str, - load_params: LoadParams, - endpoints: list[str], - loaders_wallet: WalletInfo, -) -> list[K6]: - k6_load_objects: list[K6] = [] - nodes = itertools.cycle(load_nodes) - - k6_distribution_count = { - K6ProcessAllocationStrategy.PER_LOAD_NODE: len(load_nodes), - K6ProcessAllocationStrategy.PER_ENDPOINT: len(endpoints), - } - endpoints_generators = { - K6ProcessAllocationStrategy.PER_LOAD_NODE: itertools.cycle([endpoints]), - K6ProcessAllocationStrategy.PER_ENDPOINT: itertools.cycle( - [[endpoint] for endpoint in endpoints] - ), - } - k6_processes_count = k6_distribution_count[load_params.k6_process_allocation_strategy] - endpoints_gen = endpoints_generators[load_params.k6_process_allocation_strategy] - - distributed_load_params_list = _get_distributed_load_params_list( - load_params, k6_processes_count - ) - - for distributed_load_params in distributed_load_params_list: - load_node = next(nodes) - shell = _get_shell(ssh_credentials, load_node) - # Make working_dir directory - shell.exec(f"sudo mkdir -p {distributed_load_params.working_dir}") - shell.exec(f"sudo chown {LOAD_NODE_SSH_USER} {distributed_load_params.working_dir}") - - k6_load_object = K6( - distributed_load_params, - next(endpoints_gen), - k6_dir, - shell, - load_node, - loaders_wallet, - ) - k6_load_objects.append(k6_load_object) - if load_params.preset: - k6_load_object.preset() - - return k6_load_objects - - -def _get_shell(ssh_credentials: SshCredentials, load_node: str) -> SSHShell: - ssh_client = SSHShell( - host=load_node, - login=ssh_credentials.ssh_login, - password=ssh_credentials.ssh_password, - private_key_path=ssh_credentials.ssh_key_path, - private_key_passphrase=ssh_credentials.ssh_key_passphrase, - ) - - return ssh_client - - -def _get_distributed_load_params_list( - original_load_params: LoadParams, workers_count: int -) -> list[LoadParams]: - divisor = int(BACKGROUND_LOAD_VUS_COUNT_DIVISOR) - distributed_load_params: list[LoadParams] = [] - - for i in range(workers_count): - load_params = copy.deepcopy(original_load_params) - # Append #i here in case if multiple k6 processes goes into same load node - load_params.set_id(f"{load_params.load_id}_{i}") - distributed_load_params.append(load_params) - - load_fields = fields(original_load_params) - - for field in load_fields: - if ( - field.metadata - and original_load_params.scenario in field.metadata["applicable_scenarios"] - and field.metadata["distributed"] - and getattr(original_load_params, field.name) is not None - ): - original_value = getattr(original_load_params, field.name) - distribution = _get_distribution(math.ceil(original_value / divisor), workers_count) - for i in range(workers_count): - setattr(distributed_load_params[i], field.name, distribution[i]) - - return distributed_load_params - - -def _get_distribution(clients_count: int, workers_count: int) -> list[int]: - """ - This function will distribute evenly as possible X clients to Y workers. - For example if we have 150 readers (clients) and we want to spread it over 4 load nodes (workers) - this will return [38, 38, 37, 37]. - - Args: - clients_count: amount of things needs to be distributed. - workers_count: amount of workers. - - Returns: - list of distribution. - """ - if workers_count < 1: - raise Exception("Workers cannot be less then 1") - - # Amount of guaranteed payload on one worker - clients_per_worker = clients_count // workers_count - # Remainder of clients left to be distributed - remainder = clients_count - clients_per_worker * workers_count - - distribution = [ - clients_per_worker + 1 if i < remainder else clients_per_worker - for i in range(workers_count) - ] - return distribution diff --git a/src/frostfs_testlib/load/loaders.py b/src/frostfs_testlib/load/loaders.py new file mode 100644 index 0000000..9e92155 --- /dev/null +++ b/src/frostfs_testlib/load/loaders.py @@ -0,0 +1,60 @@ +from frostfs_testlib.load.interfaces import Loader +from frostfs_testlib.resources.load_params import ( + LOAD_NODE_SSH_PASSWORD, + LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE, + LOAD_NODE_SSH_PRIVATE_KEY_PATH, + LOAD_NODE_SSH_USER, +) +from frostfs_testlib.shell.interfaces import Shell, SshCredentials +from frostfs_testlib.shell.ssh_shell import SSHShell +from frostfs_testlib.storage.cluster import ClusterNode + + +class RemoteLoader(Loader): + def __init__(self, ssh_credentials: SshCredentials, ip: str) -> None: + self.ssh_credentials = ssh_credentials + self._ip = ip + + @property + def ip(self): + return self._ip + + def get_shell(self) -> Shell: + ssh_client = SSHShell( + host=self.ip, + login=self.ssh_credentials.ssh_login, + password=self.ssh_credentials.ssh_password, + private_key_path=self.ssh_credentials.ssh_key_path, + private_key_passphrase=self.ssh_credentials.ssh_key_passphrase, + ) + + return ssh_client + + @classmethod + def from_ip_list(cls, ip_list: list[str]) -> list[Loader]: + loaders: list[Loader] = [] + ssh_credentials = SshCredentials( + LOAD_NODE_SSH_USER, + LOAD_NODE_SSH_PASSWORD, + LOAD_NODE_SSH_PRIVATE_KEY_PATH, + LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE, + ) + + for ip in ip_list: + loaders.append(RemoteLoader(ssh_credentials, ip)) + + return loaders + + +class NodeLoader(Loader): + """When ClusterNode is the loader for itself (for Local scenario only).""" + + def __init__(self, cluster_node: ClusterNode) -> None: + self.cluster_node = cluster_node + + def get_shell(self) -> Shell: + return self.cluster_node.host.get_shell() + + @property + def ip(self): + return self.cluster_node.host_ip diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py new file mode 100644 index 0000000..6f9d046 --- /dev/null +++ b/src/frostfs_testlib/load/runners.py @@ -0,0 +1,398 @@ +import copy +import itertools +import math +import re +import time +from concurrent.futures import ThreadPoolExecutor +from dataclasses import fields +from typing import Optional + +import yaml + +from frostfs_testlib.cli.frostfs_authmate.authmate import FrostfsAuthmate +from frostfs_testlib.load.interfaces import Loader, ScenarioRunner +from frostfs_testlib.load.k6 import K6 +from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadType +from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources import optionals +from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC +from frostfs_testlib.resources.common import STORAGE_USER_NAME +from frostfs_testlib.resources.load_params import ( + BACKGROUND_LOAD_VUS_COUNT_DIVISOR, + LOAD_NODE_SSH_USER, + LOAD_NODES, +) +from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController +from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +from frostfs_testlib.testing.test_control import run_optionally +from frostfs_testlib.utils import datetime_utils +from frostfs_testlib.utils.file_keeper import FileKeeper + +reporter = get_reporter() + + +class DefaultRunner(ScenarioRunner): + k6_instances: list[K6] + loaders: list[Loader] + loaders_wallet: WalletInfo + + def __init__( + self, + loaders_wallet: WalletInfo, + load_ip_list: Optional[list[str]] = None, + ) -> None: + if load_ip_list is None: + load_ip_list = LOAD_NODES + self.loaders = RemoteLoader.from_ip_list(load_ip_list) + self.loaders_wallet = loaders_wallet + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step_deco("Prepare load instances") + def prepare( + self, + load_params: LoadParams, + nodes_under_load: list[ClusterNode], + k6_dir: str, + ): + if load_params.load_type != LoadType.S3: + return + + with reporter.step("Init s3 client on loaders"): + storage_node = nodes_under_load[0].service(StorageNode) + s3_public_keys = [ + node.service(S3Gate).get_wallet_public_key() for node in nodes_under_load + ] + grpc_peer = storage_node.get_rpc_endpoint() + + for loader in self.loaders: + with reporter.step(f"Init s3 client on {loader.ip}"): + shell = loader.get_shell() + frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate( + shell, FROSTFS_AUTHMATE_EXEC + ) + issue_secret_output = frostfs_authmate_exec.secret.issue( + wallet=self.loaders_wallet.path, + peer=grpc_peer, + bearer_rules=f"{k6_dir}/scenarios/files/rules.json", + gate_public_key=s3_public_keys, + container_placement_policy=load_params.preset.container_placement_policy, + container_policy=f"{k6_dir}/scenarios/files/policy.json", + wallet_password=self.loaders_wallet.password, + ).stdout + aws_access_key_id = str( + re.search( + r"access_key_id.*:\s.(?P\w*)", issue_secret_output + ).group("aws_access_key_id") + ) + aws_secret_access_key = str( + re.search( + r"secret_access_key.*:\s.(?P\w*)", + issue_secret_output, + ).group("aws_secret_access_key") + ) + + configure_input = [ + InteractiveInput( + prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id + ), + InteractiveInput( + prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key + ), + InteractiveInput(prompt_pattern=r".*", input=""), + InteractiveInput(prompt_pattern=r".*", input=""), + ] + shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input)) + + def wait_until_finish(self): + for k6_instance in self.k6_instances: + k6_instance.wait_until_finished() + + def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): + self.k6_instances = [] + cycled_loaders = itertools.cycle(self.loaders) + + k6_distribution_count = { + K6ProcessAllocationStrategy.PER_LOAD_NODE: len(self.loaders), + K6ProcessAllocationStrategy.PER_ENDPOINT: len(endpoints), + } + endpoints_generators = { + K6ProcessAllocationStrategy.PER_LOAD_NODE: itertools.cycle([endpoints]), + K6ProcessAllocationStrategy.PER_ENDPOINT: itertools.cycle( + [[endpoint] for endpoint in endpoints] + ), + } + k6_processes_count = k6_distribution_count[load_params.k6_process_allocation_strategy] + endpoints_gen = endpoints_generators[load_params.k6_process_allocation_strategy] + + distributed_load_params_list = self._get_distributed_load_params_list( + load_params, k6_processes_count + ) + + for distributed_load_params in distributed_load_params_list: + loader = next(cycled_loaders) + shell = loader.get_shell() + with reporter.step( + f"Init K6 instances on {loader.ip} for load id {distributed_load_params.load_id}" + ): + with reporter.step(f"Make working directory"): + shell.exec(f"sudo mkdir -p {distributed_load_params.working_dir}") + shell.exec( + f"sudo chown {LOAD_NODE_SSH_USER} {distributed_load_params.working_dir}" + ) + + k6_instance = K6( + distributed_load_params, + next(endpoints_gen), + k6_dir, + shell, + loader, + self.loaders_wallet, + ) + self.k6_instances.append(k6_instance) + if load_params.preset: + k6_instance.preset() + + def _get_distributed_load_params_list( + self, original_load_params: LoadParams, workers_count: int + ) -> list[LoadParams]: + divisor = int(BACKGROUND_LOAD_VUS_COUNT_DIVISOR) + distributed_load_params: list[LoadParams] = [] + + for i in range(workers_count): + load_params = copy.deepcopy(original_load_params) + # Append #i here in case if multiple k6 processes goes into same load node + load_params.set_id(f"{load_params.load_id}_{i}") + distributed_load_params.append(load_params) + + load_fields = fields(original_load_params) + + for field in load_fields: + if ( + field.metadata + and original_load_params.scenario in field.metadata["applicable_scenarios"] + and field.metadata["distributed"] + and getattr(original_load_params, field.name) is not None + ): + original_value = getattr(original_load_params, field.name) + distribution = self._get_distribution( + math.ceil(original_value / divisor), workers_count + ) + for i in range(workers_count): + setattr(distributed_load_params[i], field.name, distribution[i]) + + return distributed_load_params + + def _get_distribution(self, clients_count: int, workers_count: int) -> list[int]: + """ + This function will distribute evenly as possible X clients to Y workers. + For example if we have 150 readers (clients) and we want to spread it over 4 load nodes (workers) + this will return [38, 38, 37, 37]. + + Args: + clients_count: amount of things needs to be distributed. + workers_count: amount of workers. + + Returns: + list of distribution. + """ + if workers_count < 1: + raise Exception("Workers cannot be less then 1") + + # Amount of guaranteed payload on one worker + clients_per_worker = clients_count // workers_count + # Remainder of clients left to be distributed + remainder = clients_count - clients_per_worker * workers_count + + distribution = [ + clients_per_worker + 1 if i < remainder else clients_per_worker + for i in range(workers_count) + ] + return distribution + + def start(self): + load_params = self.k6_instances[0].load_params + + with ThreadPoolExecutor(max_workers=len(self.k6_instances)) as executor: + futures = [executor.submit(k6.start) for k6 in self.k6_instances] + + # Check for exceptions + exceptions = [future.exception() for future in futures if future.exception()] + if exceptions: + raise RuntimeError( + f"The following exceptions occured during start of k6: {exceptions}" + ) + + wait_after_start_time = datetime_utils.parse_time(load_params.setup_timeout) + 5 + with reporter.step( + f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on" + ): + time.sleep(wait_after_start_time) + + def stop(self): + for k6_instance in self.k6_instances: + k6_instance.stop() + + def get_results(self) -> dict: + results = {} + for k6_instance in self.k6_instances: + if k6_instance.load_params.k6_process_allocation_strategy is None: + raise RuntimeError("k6_process_allocation_strategy should not be none") + + result = k6_instance.get_results() + keys_map = { + K6ProcessAllocationStrategy.PER_LOAD_NODE: k6_instance.loader.ip, + K6ProcessAllocationStrategy.PER_ENDPOINT: k6_instance.endpoints[0], + } + key = keys_map[k6_instance.load_params.k6_process_allocation_strategy] + results[key] = result + + return results + + @property + def is_running(self): + for k6_instance in self.k6_instances: + if not k6_instance.is_running: + return False + + return True + + +class LocalRunner(ScenarioRunner): + k6_instances: list[K6] + loaders: list[Loader] + cluster_state_controller: ClusterStateController + file_keeper: FileKeeper + wallet: WalletInfo + + def __init__( + self, + cluster_state_controller: ClusterStateController, + file_keeper: FileKeeper, + nodes_under_load: list[ClusterNode], + ) -> None: + self.cluster_state_controller = cluster_state_controller + self.file_keeper = file_keeper + self.loaders = [NodeLoader(node) for node in nodes_under_load] + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step_deco("Prepare load instances") + def prepare( + self, + load_params: LoadParams, + nodes_under_load: list[ClusterNode], + k6_dir: str, + ): + @reporter.step_deco("Prepare node {cluster_node}") + def prepare_node(cluster_node: ClusterNode): + shell = cluster_node.host.get_shell() + + with reporter.step("Allow storage user to login into system"): + shell.exec(f"sudo chsh -s /bin/bash {STORAGE_USER_NAME}") + + with reporter.step("Update limits.conf"): + limits_path = "/etc/security/limits.conf" + self.file_keeper.add(cluster_node.storage_node, limits_path) + content = f"{STORAGE_USER_NAME} hard nofile 65536\n{STORAGE_USER_NAME} soft nofile 65536\n" + shell.exec(f"echo '{content}' | sudo tee {limits_path}") + + with reporter.step("Download K6"): + shell.exec(f"sudo rm -rf {k6_dir};sudo mkdir {k6_dir}") + shell.exec(f"sudo curl -so {k6_dir}/k6.tar.gz {load_params.k6_url}") + shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz -C {k6_dir}") + shell.exec(f"sudo chmod -R 777 {k6_dir}") + + with reporter.step("Create empty_passwd"): + self.wallet = WalletInfo( + f"{k6_dir}/scenarios/files/wallet.json", "", "/tmp/empty_passwd.yml" + ) + content = yaml.dump({"password": ""}) + shell.exec(f'echo "{content}" | sudo tee {self.wallet.config_path}') + shell.exec(f"sudo chmod -R 777 {self.wallet.config_path}") + + with ThreadPoolExecutor(max_workers=len(nodes_under_load)) as executor: + result = executor.map(prepare_node, nodes_under_load) + + # Check for exceptions + for _ in result: + pass + + def wait_until_finish(self): + for k6_instance in self.k6_instances: + k6_instance.wait_until_finished() + + def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): + self.k6_instances = [] + for loader in self.loaders: + shell = loader.get_shell() + with reporter.step(f"Init K6 instances on {loader.ip}"): + with reporter.step(f"Make working directory"): + shell.exec(f"sudo mkdir -p {load_params.working_dir}") + # If we chmod /home/ folder we can no longer ssh to the node + # !! IMPORTANT !! + if ( + load_params.working_dir + and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}" + and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}/" + ): + shell.exec(f"sudo chmod -R 777 {load_params.working_dir}") + + k6_instance = K6( + load_params, + ["localhost:8080"], + k6_dir, + shell, + loader, + self.wallet, + ) + self.k6_instances.append(k6_instance) + if load_params.preset: + k6_instance.preset() + + def start(self): + load_params = self.k6_instances[0].load_params + + self.cluster_state_controller.stop_all_s3_gates() + self.cluster_state_controller.stop_all_storage_services() + + with ThreadPoolExecutor(max_workers=len(self.k6_instances)) as executor: + futures = [executor.submit(k6.start) for k6 in self.k6_instances] + + # Check for exceptions + exceptions = [future.exception() for future in futures if future.exception()] + if exceptions: + raise RuntimeError( + f"The following exceptions occured during start of k6: {exceptions}" + ) + + wait_after_start_time = datetime_utils.parse_time(load_params.setup_timeout) + 5 + with reporter.step( + f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on" + ): + time.sleep(wait_after_start_time) + + def stop(self): + for k6_instance in self.k6_instances: + k6_instance.stop() + + self.cluster_state_controller.start_stopped_storage_services() + self.cluster_state_controller.start_stopped_s3_gates() + + def get_results(self) -> dict: + results = {} + for k6_instance in self.k6_instances: + result = k6_instance.get_results() + results[k6_instance.loader.ip] = result + + return results + + @property + def is_running(self): + for k6_instance in self.k6_instances: + if not k6_instance.is_running: + return False + + return True diff --git a/src/frostfs_testlib/processes/remote_process.py b/src/frostfs_testlib/processes/remote_process.py index 7f49000..d92d77a 100644 --- a/src/frostfs_testlib/processes/remote_process.py +++ b/src/frostfs_testlib/processes/remote_process.py @@ -10,13 +10,16 @@ from tenacity.wait import wait_fixed from frostfs_testlib.reporter import get_reporter from frostfs_testlib.shell import Shell -from frostfs_testlib.shell.interfaces import CommandOptions +from frostfs_testlib.shell.command_inspectors import SuInspector +from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions reporter = get_reporter() class RemoteProcess: - def __init__(self, cmd: str, process_dir: str, shell: Shell): + def __init__( + self, cmd: str, process_dir: str, shell: Shell, cmd_inspector: Optional[CommandInspector] + ): self.process_dir = process_dir self.cmd = cmd self.stdout_last_line_number = 0 @@ -26,10 +29,13 @@ class RemoteProcess: self.saved_stdout: Optional[str] = None self.saved_stderr: Optional[str] = None self.shell = shell + self.cmd_inspectors: list[CommandInspector] = [cmd_inspector] if cmd_inspector else [] @classmethod @reporter.step_deco("Create remote process") - def create(cls, command: str, shell: Shell, working_dir: str = "/tmp") -> RemoteProcess: + def create( + cls, command: str, shell: Shell, working_dir: str = "/tmp", user: Optional[str] = None + ) -> RemoteProcess: """ Create a process on a remote host. @@ -39,6 +45,7 @@ class RemoteProcess: rc: contains script return code stderr: contains script errors stdout: contains script output + user: user on behalf whom command will be executed Args: shell: Shell instance @@ -48,8 +55,12 @@ class RemoteProcess: Returns: RemoteProcess instance for further examination """ + cmd_inspector = SuInspector(user) if user else None remote_process = cls( - cmd=command, process_dir=os.path.join(working_dir, f"proc_{uuid.uuid4()}"), shell=shell + cmd=command, + process_dir=os.path.join(working_dir, f"proc_{uuid.uuid4()}"), + shell=shell, + cmd_inspector=cmd_inspector, ) remote_process._create_process_dir() remote_process._generate_command_script(command) @@ -73,7 +84,8 @@ class RemoteProcess: cur_stdout = self.saved_stdout else: terminal = self.shell.exec( - f"cat {self.process_dir}/stdout", options=CommandOptions(no_log=True) + f"cat {self.process_dir}/stdout", + options=CommandOptions(no_log=True, extra_inspectors=self.cmd_inspectors), ) if self.proc_rc is not None: self.saved_stdout = terminal.stdout @@ -104,7 +116,8 @@ class RemoteProcess: cur_stderr = self.saved_stderr else: terminal = self.shell.exec( - f"cat {self.process_dir}/stderr", options=CommandOptions(no_log=True) + f"cat {self.process_dir}/stderr", + options=CommandOptions(no_log=True, extra_inspectors=self.cmd_inspectors), ) if self.proc_rc is not None: self.saved_stderr = terminal.stdout @@ -123,7 +136,10 @@ class RemoteProcess: if self.proc_rc is not None: return self.proc_rc - terminal = self.shell.exec(f"cat {self.process_dir}/rc", CommandOptions(check=False)) + terminal = self.shell.exec( + f"cat {self.process_dir}/rc", + CommandOptions(check=False, extra_inspectors=self.cmd_inspectors, no_log=True), + ) if "No such file or directory" in terminal.stderr: return None elif terminal.stderr or terminal.return_code != 0: @@ -138,7 +154,10 @@ class RemoteProcess: @reporter.step_deco("Send signal to process") def send_signal(self, signal: int) -> None: - kill_res = self.shell.exec(f"kill -{signal} {self.pid}", CommandOptions(check=False)) + kill_res = self.shell.exec( + f"kill -{signal} {self.pid}", + CommandOptions(check=False, extra_inspectors=self.cmd_inspectors), + ) if "No such process" in kill_res.stderr: return if kill_res.return_code: @@ -158,27 +177,38 @@ class RemoteProcess: def clear(self) -> None: if self.process_dir == "/": raise AssertionError(f"Invalid path to delete: {self.process_dir}") - self.shell.exec(f"rm -rf {self.process_dir}") + self.shell.exec( + f"rm -rf {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors) + ) @reporter.step_deco("Start remote process") def _start_process(self) -> None: self.shell.exec( f"nohup {self.process_dir}/command.sh {self.process_dir}/stdout " - f"2>{self.process_dir}/stderr &" + f"2>{self.process_dir}/stderr &", + CommandOptions(extra_inspectors=self.cmd_inspectors), ) @reporter.step_deco("Create process directory") def _create_process_dir(self) -> None: - self.shell.exec(f"mkdir {self.process_dir}") - self.shell.exec(f"chmod 777 {self.process_dir}") - terminal = self.shell.exec(f"realpath {self.process_dir}") + self.shell.exec( + f"mkdir -p {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors) + ) + self.shell.exec( + f"chmod 777 {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors) + ) + terminal = self.shell.exec( + f"realpath {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors) + ) self.process_dir = terminal.stdout.strip() @reporter.step_deco("Get pid") @retry(wait=wait_fixed(10), stop=stop_after_attempt(5), reraise=True) def _get_pid(self) -> str: - terminal = self.shell.exec(f"cat {self.process_dir}/pid") + terminal = self.shell.exec( + f"cat {self.process_dir}/pid", CommandOptions(extra_inspectors=self.cmd_inspectors) + ) assert terminal.stdout, f"invalid pid: {terminal.stdout}" return terminal.stdout.strip() @@ -196,6 +226,15 @@ class RemoteProcess: f"echo $? > {self.process_dir}/rc" ) - self.shell.exec(f'echo "{script}" > {self.process_dir}/command.sh') - self.shell.exec(f"cat {self.process_dir}/command.sh") - self.shell.exec(f"chmod +x {self.process_dir}/command.sh") + self.shell.exec( + f'echo "{script}" > {self.process_dir}/command.sh', + CommandOptions(extra_inspectors=self.cmd_inspectors), + ) + self.shell.exec( + f"cat {self.process_dir}/command.sh", + CommandOptions(extra_inspectors=self.cmd_inspectors), + ) + self.shell.exec( + f"chmod +x {self.process_dir}/command.sh", + CommandOptions(extra_inspectors=self.cmd_inspectors), + ) diff --git a/src/frostfs_testlib/reporter/allure_handler.py b/src/frostfs_testlib/reporter/allure_handler.py index 8e00b26..fef815d 100644 --- a/src/frostfs_testlib/reporter/allure_handler.py +++ b/src/frostfs_testlib/reporter/allure_handler.py @@ -13,7 +13,7 @@ class AllureHandler(ReporterHandler): """Handler that stores test artifacts in Allure report.""" def step(self, name: str) -> AbstractContextManager: - name = shorten(name, width=70, placeholder="...") + name = shorten(name, width=140, placeholder="...") return allure.step(name) def step_decorator(self, name: str) -> Callable: diff --git a/src/frostfs_testlib/resources/common.py b/src/frostfs_testlib/resources/common.py index dfbb3a1..131bf8a 100644 --- a/src/frostfs_testlib/resources/common.py +++ b/src/frostfs_testlib/resources/common.py @@ -10,6 +10,8 @@ COMPLEX_OBJECT_TAIL_SIZE = os.getenv("COMPLEX_OBJECT_TAIL_SIZE", "1000") SERVICE_MAX_STARTUP_TIME = os.getenv("SERVICE_MAX_STARTUP_TIME", "5m") +STORAGE_USER_NAME = "frostfs-storage" + MORPH_TIMEOUT = os.getenv("MORPH_BLOCK_TIME", "8s") MORPH_BLOCK_TIME = os.getenv("MORPH_BLOCK_TIME", "8s") FROSTFS_CONTRACT_CACHE_TIMEOUT = os.getenv("FROSTFS_CONTRACT_CACHE_TIMEOUT", "30s") diff --git a/src/frostfs_testlib/shell/command_inspectors.py b/src/frostfs_testlib/shell/command_inspectors.py index 8486f43..8fe2f34 100644 --- a/src/frostfs_testlib/shell/command_inspectors.py +++ b/src/frostfs_testlib/shell/command_inspectors.py @@ -7,7 +7,23 @@ class SudoInspector(CommandInspector): If command is already prepended with sudo, then has no effect. """ - def inspect(self, command: str) -> str: + def inspect(self, original_command: str, command: str) -> str: if not command.startswith("sudo"): return f"sudo {command}" return command + + +class SuInspector(CommandInspector): + """Allows to run command as another user via sudo su call + + If command is already prepended with sudo su, then has no effect. + """ + + def __init__(self, user: str) -> None: + self.user = user + + def inspect(self, original_command: str, command: str) -> str: + if not original_command.startswith("sudo su"): + cmd = original_command.replace('"', '\\"').replace("\$", "\\\\\\$") + return f'sudo su - {self.user} -c "{cmd}"' + return original_command diff --git a/src/frostfs_testlib/shell/interfaces.py b/src/frostfs_testlib/shell/interfaces.py index 219bc7c..a8d3325 100644 --- a/src/frostfs_testlib/shell/interfaces.py +++ b/src/frostfs_testlib/shell/interfaces.py @@ -22,11 +22,12 @@ class CommandInspector(ABC): """Interface of inspector that processes command text before execution.""" @abstractmethod - def inspect(self, command: str) -> str: + def inspect(self, original_command: str, command: str) -> str: """Transforms command text and returns modified command. Args: command: Command to transform with this inspector. + original_command: Untransformed command to transform with this inspector. Depending on type of the inspector it might be required to modify original command Returns: Transformed command text. @@ -47,6 +48,7 @@ class CommandOptions: check: Controls whether to check return code of the command. Set to False to ignore non-zero return codes. no_log: Do not print output to logger if True. + extra_inspectors: Exctra command inspectors to process command """ interactive_inputs: Optional[list[InteractiveInput]] = None @@ -54,6 +56,7 @@ class CommandOptions: timeout: Optional[int] = None check: bool = True no_log: bool = False + extra_inspectors: Optional[list[CommandInspector]] = None def __post_init__(self): if self.timeout is None: diff --git a/src/frostfs_testlib/shell/local_shell.py b/src/frostfs_testlib/shell/local_shell.py index 12f450a..56d19b2 100644 --- a/src/frostfs_testlib/shell/local_shell.py +++ b/src/frostfs_testlib/shell/local_shell.py @@ -24,8 +24,10 @@ class LocalShell(Shell): # If no options were provided, use default options options = options or CommandOptions() - for inspector in self.command_inspectors: - command = inspector.inspect(command) + original_command = command + extra_inspectors = options.extra_inspectors if options.extra_inspectors else [] + for inspector in [*self.command_inspectors, *extra_inspectors]: + command = inspector.inspect(original_command, command) logger.info(f"Executing command: {command}") if options.interactive_inputs: diff --git a/src/frostfs_testlib/shell/ssh_shell.py b/src/frostfs_testlib/shell/ssh_shell.py index 6ef3dfb..5771274 100644 --- a/src/frostfs_testlib/shell/ssh_shell.py +++ b/src/frostfs_testlib/shell/ssh_shell.py @@ -126,8 +126,10 @@ class SSHShell(Shell): def exec(self, command: str, options: Optional[CommandOptions] = None) -> CommandResult: options = options or CommandOptions() - for inspector in self.command_inspectors: - command = inspector.inspect(command) + original_command = command + extra_inspectors = options.extra_inspectors if options.extra_inspectors else [] + for inspector in [*self.command_inspectors, *extra_inspectors]: + command = inspector.inspect(original_command, command) if options.interactive_inputs: result = self._exec_interactive(command, options) diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index a2336be..6cedd0f 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -1,50 +1,37 @@ import copy -import time +from typing import Optional import frostfs_testlib.resources.optionals as optionals -from frostfs_testlib.load.k6 import K6 +from frostfs_testlib.load.interfaces import ScenarioRunner from frostfs_testlib.load.load_config import ( EndpointSelectionStrategy, - K6ProcessAllocationStrategy, LoadParams, LoadScenario, LoadType, ) from frostfs_testlib.load.load_report import LoadReport -from frostfs_testlib.load.load_steps import init_s3_client, prepare_k6_instances from frostfs_testlib.load.load_verifiers import LoadVerifier from frostfs_testlib.reporter import get_reporter -from frostfs_testlib.resources.load_params import ( - K6_TEARDOWN_PERIOD, - LOAD_NODE_SSH_PASSWORD, - LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE, - LOAD_NODE_SSH_PRIVATE_KEY_PATH, - LOAD_NODE_SSH_USER, - LOAD_NODES, -) -from frostfs_testlib.shell.interfaces import SshCredentials from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.test_control import run_optionally -from frostfs_testlib.utils import datetime_utils reporter = get_reporter() class BackgroundLoadController: - k6_instances: list[K6] k6_dir: str load_params: LoadParams original_load_params: LoadParams - load_nodes: list[str] verification_params: LoadParams nodes_under_load: list[ClusterNode] load_counter: int - ssh_credentials: SshCredentials loaders_wallet: WalletInfo load_summaries: dict endpoints: list[str] + runner: ScenarioRunner + started: bool def __init__( self, @@ -52,15 +39,16 @@ class BackgroundLoadController: load_params: LoadParams, loaders_wallet: WalletInfo, nodes_under_load: list[ClusterNode], + runner: ScenarioRunner, ) -> None: self.k6_dir = k6_dir self.original_load_params = load_params self.load_params = copy.deepcopy(self.original_load_params) self.nodes_under_load = nodes_under_load self.load_counter = 1 - self.load_nodes = LOAD_NODES self.loaders_wallet = loaders_wallet - + self.runner = runner + self.started = False if load_params.endpoint_selection_strategy is None: raise RuntimeError("endpoint_selection_strategy should not be None") @@ -68,13 +56,6 @@ class BackgroundLoadController: load_params.load_type, load_params.endpoint_selection_strategy ) - self.ssh_credentials = SshCredentials( - LOAD_NODE_SSH_USER, - LOAD_NODE_SSH_PASSWORD, - LOAD_NODE_SSH_PRIVATE_KEY_PATH, - LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE, - ) - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, []) def _get_endpoints( self, load_type: LoadType, endpoint_selection_strategy: EndpointSelectionStrategy @@ -116,69 +97,28 @@ class BackgroundLoadController: return all_endpoints[load_type][endpoint_selection_strategy] @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Prepare background load instances") + @reporter.step_deco("Prepare load instances") def prepare(self): - if self.load_params.load_type == LoadType.S3: - init_s3_client( - self.load_nodes, - self.load_params, - self.k6_dir, - self.ssh_credentials, - self.nodes_under_load, - self.loaders_wallet, - ) - - self._prepare(self.load_params) - - def _prepare(self, load_params: LoadParams): - self.k6_instances = prepare_k6_instances( - load_nodes=LOAD_NODES, - ssh_credentials=self.ssh_credentials, - k6_dir=self.k6_dir, - load_params=load_params, - endpoints=self.endpoints, - loaders_wallet=self.loaders_wallet, - ) + self.runner.prepare(self.load_params, self.nodes_under_load, self.k6_dir) + self.runner.init_k6_instances(self.load_params, self.endpoints, self.k6_dir) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Start background load") def start(self): - if self.load_params.preset is None: - raise RuntimeError("Preset should not be none at the moment of start") - - with reporter.step( - f"Start background load on nodes {self.nodes_under_load}: " - f"writers = {self.load_params.writers}, " - f"obj_size = {self.load_params.object_size}, " - f"load_time = {self.load_params.load_time}, " - f"prepare_json = {self.load_params.preset.pregen_json}, " - f"endpoints = {self.endpoints}" - ): - for k6_load_instance in self.k6_instances: - k6_load_instance.start() - - wait_after_start_time = datetime_utils.parse_time(self.load_params.setup_timeout) + 5 - with reporter.step( - f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on" - ): - time.sleep(wait_after_start_time) + with reporter.step(f"Start load on nodes {self.nodes_under_load}"): + self.runner.start() + self.started = True @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Stop background load") + @reporter.step_deco("Stop load") def stop(self): - for k6_load_instance in self.k6_instances: - k6_load_instance.stop() + self.runner.stop() @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, True) - def is_running(self): - for k6_load_instance in self.k6_instances: - if not k6_load_instance.is_running: - return False - - return True + def is_running(self) -> bool: + return self.runner.is_running @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Reset background load") + @reporter.step_deco("Reset load") def _reset_for_consequent_load(self): """This method is required if we want to run multiple loads during test run. Raise load counter by 1 and append it to load_id @@ -188,25 +128,25 @@ class BackgroundLoadController: self.load_params.set_id(f"{self.load_params.load_id}_{self.load_counter}") @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Startup background load") + @reporter.step_deco("Startup load") def startup(self): self.prepare() self.start() @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Stop and get results of background load") - def teardown(self, load_report: LoadReport = None): - if not self.k6_instances: + @reporter.step_deco("Stop and get results of load") + def teardown(self, load_report: Optional[LoadReport] = None): + if not self.started: return self.stop() - self.load_summaries = self.get_results() - self.k6_instances = [] + self.load_summaries = self._get_results() + self.started = False if load_report: load_report.add_summaries(self.load_summaries) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Verify results of background load") + @reporter.step_deco("Verify results of load") def verify(self): try: if self.load_params.verify: @@ -220,9 +160,10 @@ class BackgroundLoadController: working_dir=self.load_params.working_dir, endpoint_selection_strategy=self.load_params.endpoint_selection_strategy, k6_process_allocation_strategy=self.load_params.k6_process_allocation_strategy, + setup_timeout="1s", ) self._run_verify_scenario() - verification_summaries = self.get_results() + verification_summaries = self._get_results() self.verify_summaries(self.load_summaries, verification_summaries) finally: self._reset_for_consequent_load() @@ -239,38 +180,20 @@ class BackgroundLoadController: @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) def wait_until_finish(self): - if self.load_params.load_time is None: - raise RuntimeError("LoadTime should not be none") - - for k6_instance in self.k6_instances: - k6_instance.wait_until_finished(self.load_params.load_time + int(K6_TEARDOWN_PERIOD)) + self.runner.wait_until_finish() @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Run verify scenario for background load") + @reporter.step_deco("Run verify scenario") def _run_verify_scenario(self): if self.verification_params.verify_time is None: raise RuntimeError("verify_time should not be none") - self._prepare(self.verification_params) - with reporter.step("Run verify background load data"): - for k6_verify_instance in self.k6_instances: - k6_verify_instance.start() - k6_verify_instance.wait_until_finished(self.verification_params.verify_time) + self.runner.init_k6_instances(self.verification_params, self.endpoints, self.k6_dir) + with reporter.step("Run verify load data"): + self.runner.start() + self.runner.wait_until_finish() @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("K6 run results") - def get_results(self) -> dict: - results = {} - for k6_instance in self.k6_instances: - if k6_instance.load_params.k6_process_allocation_strategy is None: - raise RuntimeError("k6_process_allocation_strategy should not be none") - - result = k6_instance.get_results() - keys_map = { - K6ProcessAllocationStrategy.PER_LOAD_NODE: k6_instance.load_node, - K6ProcessAllocationStrategy.PER_ENDPOINT: k6_instance.endpoints[0], - } - key = keys_map[k6_instance.load_params.k6_process_allocation_strategy] - results[key] = result - - return results + @reporter.step_deco("Get load results") + def _get_results(self) -> dict: + return self.runner.get_results() diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 1084552..6126f9d 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -23,7 +23,7 @@ class ClusterStateController: self.stopped_nodes: list[ClusterNode] = [] self.detached_disks: dict[str, DiskController] = {} self.stopped_storage_nodes: list[ClusterNode] = [] - self.stopped_s3_gate: list[ClusterNode] = [] + self.stopped_s3_gates: list[ClusterNode] = [] self.cluster = cluster self.shell = shell self.suspended_services: dict[str, list[ClusterNode]] = {} @@ -60,6 +60,16 @@ class ClusterStateController: for node in nodes: self.stop_storage_service(node) + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Stop all S3 gates on cluster") + def stop_all_s3_gates(self, reversed_order: bool = False): + nodes = ( + reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes + ) + + for node in nodes: + self.stop_s3_gate(node) + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start host of node {node}") def start_node_host(self, node: ClusterNode): @@ -72,10 +82,18 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start stopped hosts") def start_stopped_hosts(self, reversed_order: bool = False): + if not self.stopped_nodes: + return + nodes = reversed(self.stopped_nodes) if reversed_order else self.stopped_nodes for node in nodes: with reporter.step(f"Start host {node.host.config.address}"): node.host.start_host() + if node in self.stopped_storage_nodes: + self.stopped_storage_nodes.remove(node) + + if node in self.stopped_s3_gates: + self.stopped_s3_gates.remove(node) self.stopped_nodes = [] wait_all_storage_nodes_returned(self.shell, self.cluster) @@ -115,44 +133,51 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start stopped storage services") def start_stopped_storage_services(self): - if self.stopped_storage_nodes: - # In case if we stopped couple services, for example (s01-s04): - # After starting only s01, it may require connections to s02-s04, which is still down, and fail to start. - # Also, if something goes wrong here, we might skip s02-s04 start at all, and cluster will be left in a bad state. - # So in order to make sure that services are at least attempted to be started, using threads here. - with ThreadPoolExecutor(max_workers=len(self.stopped_storage_nodes)) as executor: - start_result = executor.map(self.start_storage_service, self.stopped_storage_nodes) + if not self.stopped_storage_nodes: + return - # Looks tricky, but if exception is raised in any thread, it will be "eaten" by ThreadPoolExecutor, - # But will be thrown here. - # Not ideal solution, but okay for now - for _ in start_result: - pass + # In case if we stopped couple services, for example (s01-s04): + # After starting only s01, it may require connections to s02-s04, which is still down, and fail to start. + # Also, if something goes wrong here, we might skip s02-s04 start at all, and cluster will be left in a bad state. + # So in order to make sure that services are at least attempted to be started, using threads here. + with ThreadPoolExecutor(max_workers=len(self.stopped_storage_nodes)) as executor: + start_result = executor.map(self.start_storage_service, self.stopped_storage_nodes) + + # Looks tricky, but if exception is raised in any thread, it will be "eaten" by ThreadPoolExecutor, + # But will be thrown here. + # Not ideal solution, but okay for now + for _ in start_result: + pass wait_all_storage_nodes_returned(self.shell, self.cluster) self.stopped_storage_nodes = [] - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop s3 gate on {node}") def stop_s3_gate(self, node: ClusterNode): node.s3_gate.stop_service() - self.stopped_s3_gate.append(node) + self.stopped_s3_gates.append(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start s3 gate on {node}") def start_s3_gate(self, node: ClusterNode): node.s3_gate.start_service() - self.stopped_s3_gate.remove(node) - + self.stopped_s3_gates.remove(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start stopped S3 gates") - def start_stopped_s3_gate(self): - # not sure if we need here to use threads like in start_stopped_storage_services - for s3_gate in self.stopped_s3_gate: - s3_gate.start_service() - self.stopped_s3_gate = [] + def start_stopped_s3_gates(self): + if not self.stopped_s3_gates: + return + + with ThreadPoolExecutor(max_workers=len(self.stopped_s3_gates)) as executor: + start_result = executor.map(self.start_s3_gate, self.stopped_s3_gates) + + # Looks tricky, but if exception is raised in any thread, it will be "eaten" by ThreadPoolExecutor, + # But will be thrown here. + # Not ideal solution, but okay for now + for _ in start_result: + pass @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Suspend {process_name} service in {node}") From 889e108be9d3fb923eb7d43bbd4c0cc0283b03a8 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 27 Jun 2023 11:47:42 +0300 Subject: [PATCH 100/363] Update epoch align check Signed-off-by: Andrey Berezin --- src/frostfs_testlib/steps/epoch.py | 18 ++++++++---------- .../controllers/cluster_state_controller.py | 11 ----------- 2 files changed, 8 insertions(+), 21 deletions(-) diff --git a/src/frostfs_testlib/steps/epoch.py b/src/frostfs_testlib/steps/epoch.py index 0d40f8d..a589569 100644 --- a/src/frostfs_testlib/steps/epoch.py +++ b/src/frostfs_testlib/steps/epoch.py @@ -54,16 +54,14 @@ def ensure_fresh_epoch( return epoch -@reporter.step_deco("Wait for epochs align in whole cluster") -@wait_for_success(60, 5) -def wait_for_epochs_align(shell: Shell, cluster: Cluster) -> None: - epochs = [] - for node in cluster.services(StorageNode): - epochs.append(get_epoch(shell, cluster, node)) - unique_epochs = list(set(epochs)) - assert ( - len(unique_epochs) == 1 - ), f"unaligned epochs found, {epochs}, count of unique epochs {len(unique_epochs)}" +@reporter.step_deco("Wait up to {timeout} seconds for nodes on cluster to align epochs") +def wait_for_epochs_align(shell: Shell, cluster: Cluster, timeout=60): + @wait_for_success(timeout, 5, None, True) + def check_epochs(): + epochs_by_node = get_epochs_from_nodes(shell, cluster) + assert len(set(epochs_by_node.values())) == 1, f"unaligned epochs found: {epochs_by_node}" + + check_epochs() @reporter.step_deco("Get Epoch") diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 6126f9d..c73a8f4 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -220,17 +220,6 @@ class ClusterStateController: wait_for_host_online(self.shell, node.storage_node) wait_for_node_online(node.storage_node) - @reporter.step_deco("Wait up to {timeout} seconds for nodes on cluster to align epochs") - def wait_for_epochs_align(self, timeout=60): - @wait_for_success(timeout, 5, None, True) - def check_epochs(): - epochs_by_node = epoch.get_epochs_from_nodes(self.shell, self.cluster) - assert ( - len(set(epochs_by_node.values())) == 1 - ), f"unaligned epochs found: {epochs_by_node}" - - check_epochs() - def _get_disk_controller( self, node: StorageNode, device: str, mountpoint: str ) -> DiskController: From ac28df2652a1cc875ab84c755191db30d4f1a242 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Mon, 26 Jun 2023 16:48:45 +0300 Subject: [PATCH 101/363] Removed --bearer_rules parameter from init s3 credentials due to changes in 1.3 --- src/frostfs_testlib/cli/frostfs_authmate/secret.py | 1 - src/frostfs_testlib/steps/s3/s3_helper.py | 2 -- 2 files changed, 3 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_authmate/secret.py b/src/frostfs_testlib/cli/frostfs_authmate/secret.py index ba5b5f5..5f300bc 100644 --- a/src/frostfs_testlib/cli/frostfs_authmate/secret.py +++ b/src/frostfs_testlib/cli/frostfs_authmate/secret.py @@ -44,7 +44,6 @@ class FrostfsAuthmateSecret(CliCommand): wallet: str, wallet_password: str, peer: str, - bearer_rules: str, gate_public_key: Union[str, list[str]], address: Optional[str] = None, container_id: Optional[str] = None, diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index d6c2095..ae27124 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -189,7 +189,6 @@ def init_s3_credentials( wallet: WalletInfo, shell: Shell, cluster: Cluster, - s3_bearer_rules_file: str, policy: Optional[dict] = None, s3gates: Optional[list[S3Gate]] = None, ): @@ -203,7 +202,6 @@ def init_s3_credentials( issue_secret_output = frostfs_authmate_exec.secret.issue( wallet=wallet.path, peer=cluster.default_rpc_endpoint, - bearer_rules=s3_bearer_rules_file, gate_public_key=gate_public_keys, wallet_password=wallet.password, container_policy=policy, From 05ac39248504927a246e2a1e4049469bf3eed9b6 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 27 Jun 2023 17:06:01 +0300 Subject: [PATCH 102/363] Remove deleted parameter Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/runners.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index 6f9d046..d8758f6 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -77,7 +77,6 @@ class DefaultRunner(ScenarioRunner): issue_secret_output = frostfs_authmate_exec.secret.issue( wallet=self.loaders_wallet.path, peer=grpc_peer, - bearer_rules=f"{k6_dir}/scenarios/files/rules.json", gate_public_key=s3_public_keys, container_placement_policy=load_params.preset.container_placement_policy, container_policy=f"{k6_dir}/scenarios/files/policy.json", From a14b082a4da57dd3346901951d95dae45912f2ed Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 4 Jul 2023 19:25:24 +0300 Subject: [PATCH 103/363] Make load things parallel Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/interfaces.py | 4 + src/frostfs_testlib/load/k6.py | 83 +++--- src/frostfs_testlib/load/load_report.py | 8 +- src/frostfs_testlib/load/runners.py | 254 +++++++++--------- .../controllers/background_load_controller.py | 12 +- src/frostfs_testlib/testing/__init__.py | 2 + src/frostfs_testlib/testing/parallel.py | 98 +++++++ src/frostfs_testlib/utils/__init__.py | 1 + 8 files changed, 284 insertions(+), 178 deletions(-) create mode 100644 src/frostfs_testlib/testing/__init__.py create mode 100644 src/frostfs_testlib/testing/parallel.py diff --git a/src/frostfs_testlib/load/interfaces.py b/src/frostfs_testlib/load/interfaces.py index fbbc20b..6f29868 100644 --- a/src/frostfs_testlib/load/interfaces.py +++ b/src/frostfs_testlib/load/interfaces.py @@ -39,6 +39,10 @@ class ScenarioRunner(ABC): def stop(self): """Stop K6 instances""" + @abstractmethod + def preset(self): + """Run preset for load""" + @property @abstractmethod def is_running(self) -> bool: diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index ca3f696..7ec3c21 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -72,58 +72,58 @@ class K6: def process_dir(self) -> str: return self._k6_process.process_dir - @reporter.step_deco("Preset containers and objects") def preset(self) -> str: - preset_grpc = f"{self._k6_dir}/scenarios/preset/preset_grpc.py" - preset_s3 = f"{self._k6_dir}/scenarios/preset/preset_s3.py" - preset_map = { - LoadType.gRPC: preset_grpc, - LoadType.S3: preset_s3, - LoadType.HTTP: preset_grpc, - } + with reporter.step(f"Run preset on loader {self.loader.ip} for endpoints {self.endpoints}"): + preset_grpc = f"{self._k6_dir}/scenarios/preset/preset_grpc.py" + preset_s3 = f"{self._k6_dir}/scenarios/preset/preset_s3.py" + preset_map = { + LoadType.gRPC: preset_grpc, + LoadType.S3: preset_s3, + LoadType.HTTP: preset_grpc, + } - base_args = { - preset_grpc: [ - preset_grpc, - f"--endpoint {self.endpoints[0]}", - f"--wallet {self.wallet.path} ", - f"--config {self.wallet.config_path} ", - ], - preset_s3: [ - preset_s3, - f"--endpoint {self.endpoints[0]}", - ], - } + base_args = { + preset_grpc: [ + preset_grpc, + f"--endpoint {','.join(self.endpoints)}", + f"--wallet {self.wallet.path} ", + f"--config {self.wallet.config_path} ", + ], + preset_s3: [ + preset_s3, + f"--endpoint {','.join(self.endpoints)}", + ], + } - preset_scenario = preset_map[self.load_params.load_type] - command_args = base_args[preset_scenario].copy() + preset_scenario = preset_map[self.load_params.load_type] + command_args = base_args[preset_scenario].copy() - command_args += [ - f"--{field.metadata['preset_argument']} '{getattr(self.load_params, field.name)}'" - for field in fields(self.load_params) - if field.metadata - and self.scenario in field.metadata["applicable_scenarios"] - and field.metadata["preset_argument"] - and getattr(self.load_params, field.name) is not None - ] - - if self.load_params.preset: command_args += [ - f"--{field.metadata['preset_argument']} '{getattr(self.load_params.preset, field.name)}'" - for field in fields(self.load_params.preset) + f"--{field.metadata['preset_argument']} '{getattr(self.load_params, field.name)}'" + for field in fields(self.load_params) if field.metadata and self.scenario in field.metadata["applicable_scenarios"] and field.metadata["preset_argument"] - and getattr(self.load_params.preset, field.name) is not None + and getattr(self.load_params, field.name) is not None ] - command = " ".join(command_args) - result = self.shell.exec(command) + if self.load_params.preset: + command_args += [ + f"--{field.metadata['preset_argument']} '{getattr(self.load_params.preset, field.name)}'" + for field in fields(self.load_params.preset) + if field.metadata + and self.scenario in field.metadata["applicable_scenarios"] + and field.metadata["preset_argument"] + and getattr(self.load_params.preset, field.name) is not None + ] - assert ( - result.return_code == EXIT_RESULT_CODE - ), f"Return code of preset is not zero: {result.stdout}" - return result.stdout.strip("\n") + command = " ".join(command_args) + result = self.shell.exec(command) + + assert ( + result.return_code == EXIT_RESULT_CODE + ), f"Return code of preset is not zero: {result.stdout}" + return result.stdout.strip("\n") @reporter.step_deco("Generate K6 command") def _generate_env_variables(self) -> str: @@ -232,7 +232,6 @@ class K6: self._wait_until_process_end() - @property def is_running(self) -> bool: if self._k6_process: return self._k6_process.running() diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py index 7f912e4..dcd81b4 100644 --- a/src/frostfs_testlib/load/load_report.py +++ b/src/frostfs_testlib/load/load_report.py @@ -43,8 +43,10 @@ class LoadReport: return html def _get_load_params_section_html(self) -> str: - params: str = yaml.safe_dump(self.load_test, sort_keys=False) - params = params.replace("\n", "
") + params: str = yaml.safe_dump( + [self.load_test], sort_keys=False, indent=2, explicit_start=True + ) + params = params.replace("\n", "
").replace(" ", " ") section_html = f"""

Scenario params

{params}
@@ -139,7 +141,7 @@ class LoadReport: duration = self._seconds_to_formatted_duration(self.load_params.load_time) model = self._get_model_string() # write 8KB 15h49m 50op/sec 50th open model/closed model/min_iteration duration=1s - 1.636MB/s 199.57451/s - short_summary = f"{operation_type} {object_size}{object_size_unit} {duration} {requested_rate_str} {vus_str} {model} - {throughput:.2f}{unit} {total_rate:.2f}/s" + short_summary = f"{operation_type} {object_size}{object_size_unit} {duration} {requested_rate_str} {vus_str} {model} - {throughput:.2f}{unit}/s {total_rate:.2f}/s" html = f""" diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index d8758f6..d6cf2ae 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -28,15 +28,31 @@ from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.wallet import WalletInfo -from frostfs_testlib.testing.test_control import run_optionally -from frostfs_testlib.utils import datetime_utils -from frostfs_testlib.utils.file_keeper import FileKeeper +from frostfs_testlib.testing import parallel, run_optionally +from frostfs_testlib.utils import FileKeeper, datetime_utils reporter = get_reporter() -class DefaultRunner(ScenarioRunner): +class RunnerBase(ScenarioRunner): k6_instances: list[K6] + + @reporter.step_deco("Run preset on loaders") + def preset(self): + parallel([k6.preset for k6 in self.k6_instances]) + + @reporter.step_deco("Wait until load finish") + def wait_until_finish(self): + parallel([k6.wait_until_finished for k6 in self.k6_instances]) + + @property + def is_running(self): + futures = parallel([k6.is_running for k6 in self.k6_instances]) + + return any([future.result() for future in futures]) + + +class DefaultRunner(RunnerBase): loaders: list[Loader] loaders_wallet: WalletInfo @@ -51,7 +67,7 @@ class DefaultRunner(ScenarioRunner): self.loaders_wallet = loaders_wallet @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Prepare load instances") + @reporter.step_deco("Preparation steps") def prepare( self, load_params: LoadParams, @@ -68,48 +84,52 @@ class DefaultRunner(ScenarioRunner): ] grpc_peer = storage_node.get_rpc_endpoint() - for loader in self.loaders: - with reporter.step(f"Init s3 client on {loader.ip}"): - shell = loader.get_shell() - frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate( - shell, FROSTFS_AUTHMATE_EXEC - ) - issue_secret_output = frostfs_authmate_exec.secret.issue( - wallet=self.loaders_wallet.path, - peer=grpc_peer, - gate_public_key=s3_public_keys, - container_placement_policy=load_params.preset.container_placement_policy, - container_policy=f"{k6_dir}/scenarios/files/policy.json", - wallet_password=self.loaders_wallet.password, - ).stdout - aws_access_key_id = str( - re.search( - r"access_key_id.*:\s.(?P\w*)", issue_secret_output - ).group("aws_access_key_id") - ) - aws_secret_access_key = str( - re.search( - r"secret_access_key.*:\s.(?P\w*)", - issue_secret_output, - ).group("aws_secret_access_key") - ) + parallel( + self._prepare_loader, self.loaders, load_params, grpc_peer, s3_public_keys, k6_dir + ) - configure_input = [ - InteractiveInput( - prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id - ), - InteractiveInput( - prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key - ), - InteractiveInput(prompt_pattern=r".*", input=""), - InteractiveInput(prompt_pattern=r".*", input=""), - ] - shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input)) + def _prepare_loader( + self, + loader: Loader, + load_params: LoadParams, + grpc_peer: str, + s3_public_keys: list[str], + k6_dir: str, + ): + with reporter.step(f"Init s3 client on {loader.ip}"): + shell = loader.get_shell() + frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC) + issue_secret_output = frostfs_authmate_exec.secret.issue( + wallet=self.loaders_wallet.path, + peer=grpc_peer, + gate_public_key=s3_public_keys, + container_placement_policy=load_params.preset.container_placement_policy, + container_policy=f"{k6_dir}/scenarios/files/policy.json", + wallet_password=self.loaders_wallet.password, + ).stdout + aws_access_key_id = str( + re.search( + r"access_key_id.*:\s.(?P\w*)", issue_secret_output + ).group("aws_access_key_id") + ) + aws_secret_access_key = str( + re.search( + r"secret_access_key.*:\s.(?P\w*)", + issue_secret_output, + ).group("aws_secret_access_key") + ) - def wait_until_finish(self): - for k6_instance in self.k6_instances: - k6_instance.wait_until_finished() + configure_input = [ + InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id), + InteractiveInput( + prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key + ), + InteractiveInput(prompt_pattern=r".*", input=""), + InteractiveInput(prompt_pattern=r".*", input=""), + ] + shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input)) + @reporter.step_deco("Init k6 instances") def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): self.k6_instances = [] cycled_loaders = itertools.cycle(self.loaders) @@ -131,29 +151,32 @@ class DefaultRunner(ScenarioRunner): load_params, k6_processes_count ) - for distributed_load_params in distributed_load_params_list: - loader = next(cycled_loaders) - shell = loader.get_shell() - with reporter.step( - f"Init K6 instances on {loader.ip} for load id {distributed_load_params.load_id}" - ): - with reporter.step(f"Make working directory"): - shell.exec(f"sudo mkdir -p {distributed_load_params.working_dir}") - shell.exec( - f"sudo chown {LOAD_NODE_SSH_USER} {distributed_load_params.working_dir}" - ) + futures = parallel( + self._init_k6_instance, + distributed_load_params_list, + loader=cycled_loaders, + endpoints=endpoints_gen, + k6_dir=k6_dir, + ) + self.k6_instances = [future.result() for future in futures] - k6_instance = K6( - distributed_load_params, - next(endpoints_gen), - k6_dir, - shell, - loader, - self.loaders_wallet, - ) - self.k6_instances.append(k6_instance) - if load_params.preset: - k6_instance.preset() + def _init_k6_instance( + self, load_params_for_loader: LoadParams, loader: Loader, endpoints: list[str], k6_dir: str + ): + shell = loader.get_shell() + with reporter.step(f"Init K6 instance on {loader.ip} for endpoints {endpoints}"): + with reporter.step(f"Make working directory"): + shell.exec(f"sudo mkdir -p {load_params_for_loader.working_dir}") + shell.exec(f"sudo chown {LOAD_NODE_SSH_USER} {load_params_for_loader.working_dir}") + + return K6( + load_params_for_loader, + endpoints, + k6_dir, + shell, + loader, + self.loaders_wallet, + ) def _get_distributed_load_params_list( self, original_load_params: LoadParams, workers_count: int @@ -215,15 +238,7 @@ class DefaultRunner(ScenarioRunner): def start(self): load_params = self.k6_instances[0].load_params - with ThreadPoolExecutor(max_workers=len(self.k6_instances)) as executor: - futures = [executor.submit(k6.start) for k6 in self.k6_instances] - - # Check for exceptions - exceptions = [future.exception() for future in futures if future.exception()] - if exceptions: - raise RuntimeError( - f"The following exceptions occured during start of k6: {exceptions}" - ) + parallel([k6.start for k6 in self.k6_instances]) wait_after_start_time = datetime_utils.parse_time(load_params.setup_timeout) + 5 with reporter.step( @@ -251,17 +266,8 @@ class DefaultRunner(ScenarioRunner): return results - @property - def is_running(self): - for k6_instance in self.k6_instances: - if not k6_instance.is_running: - return False - return True - - -class LocalRunner(ScenarioRunner): - k6_instances: list[K6] +class LocalRunner(RunnerBase): loaders: list[Loader] cluster_state_controller: ClusterStateController file_keeper: FileKeeper @@ -278,7 +284,7 @@ class LocalRunner(ScenarioRunner): self.loaders = [NodeLoader(node) for node in nodes_under_load] @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Prepare load instances") + @reporter.step_deco("Preparation steps") def prepare( self, load_params: LoadParams, @@ -319,37 +325,39 @@ class LocalRunner(ScenarioRunner): for _ in result: pass - def wait_until_finish(self): - for k6_instance in self.k6_instances: - k6_instance.wait_until_finished() - + @reporter.step_deco("Init k6 instances") def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): self.k6_instances = [] - for loader in self.loaders: - shell = loader.get_shell() - with reporter.step(f"Init K6 instances on {loader.ip}"): - with reporter.step(f"Make working directory"): - shell.exec(f"sudo mkdir -p {load_params.working_dir}") - # If we chmod /home/ folder we can no longer ssh to the node - # !! IMPORTANT !! - if ( - load_params.working_dir - and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}" - and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}/" - ): - shell.exec(f"sudo chmod -R 777 {load_params.working_dir}") + futures = parallel( + self._init_k6_instance, + self.loaders, + load_params, + k6_dir, + ) + self.k6_instances = [future.result() for future in futures] - k6_instance = K6( - load_params, - ["localhost:8080"], - k6_dir, - shell, - loader, - self.wallet, - ) - self.k6_instances.append(k6_instance) - if load_params.preset: - k6_instance.preset() + def _init_k6_instance(self, loader: Loader, load_params: LoadParams, k6_dir: str): + shell = loader.get_shell() + with reporter.step(f"Init K6 instance on {loader.ip}"): + with reporter.step(f"Make working directory"): + shell.exec(f"sudo mkdir -p {load_params.working_dir}") + # If we chmod /home/ folder we can no longer ssh to the node + # !! IMPORTANT !! + if ( + load_params.working_dir + and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}" + and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}/" + ): + shell.exec(f"sudo chmod -R 777 {load_params.working_dir}") + + return K6( + load_params, + ["localhost:8080"], + k6_dir, + shell, + loader, + self.wallet, + ) def start(self): load_params = self.k6_instances[0].load_params @@ -357,15 +365,7 @@ class LocalRunner(ScenarioRunner): self.cluster_state_controller.stop_all_s3_gates() self.cluster_state_controller.stop_all_storage_services() - with ThreadPoolExecutor(max_workers=len(self.k6_instances)) as executor: - futures = [executor.submit(k6.start) for k6 in self.k6_instances] - - # Check for exceptions - exceptions = [future.exception() for future in futures if future.exception()] - if exceptions: - raise RuntimeError( - f"The following exceptions occured during start of k6: {exceptions}" - ) + parallel([k6.start for k6 in self.k6_instances]) wait_after_start_time = datetime_utils.parse_time(load_params.setup_timeout) + 5 with reporter.step( @@ -387,11 +387,3 @@ class LocalRunner(ScenarioRunner): results[k6_instance.loader.ip] = result return results - - @property - def is_running(self): - for k6_instance in self.k6_instances: - if not k6_instance.is_running: - return False - - return True diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index 6cedd0f..ac3a920 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -80,14 +80,17 @@ class BackgroundLoadController: LoadType.S3: { EndpointSelectionStrategy.ALL: list( set( - endpoint.replace("http://", "") + endpoint.replace("http://", "").replace("https://", "") for node_under_load in self.nodes_under_load for endpoint in node_under_load.service(S3Gate).get_all_endpoints() ) ), EndpointSelectionStrategy.FIRST: list( set( - node_under_load.service(S3Gate).get_endpoint().replace("http://", "") + node_under_load.service(S3Gate) + .get_endpoint() + .replace("http://", "") + .replace("https://", "") for node_under_load in self.nodes_under_load ) ), @@ -131,8 +134,13 @@ class BackgroundLoadController: @reporter.step_deco("Startup load") def startup(self): self.prepare() + self.preset() self.start() + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + def preset(self): + self.runner.preset() + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @reporter.step_deco("Stop and get results of load") def teardown(self, load_report: Optional[LoadReport] = None): diff --git a/src/frostfs_testlib/testing/__init__.py b/src/frostfs_testlib/testing/__init__.py new file mode 100644 index 0000000..3483972 --- /dev/null +++ b/src/frostfs_testlib/testing/__init__.py @@ -0,0 +1,2 @@ +from frostfs_testlib.testing.parallel import parallel +from frostfs_testlib.testing.test_control import expect_not_raises, run_optionally, wait_for_success diff --git a/src/frostfs_testlib/testing/parallel.py b/src/frostfs_testlib/testing/parallel.py new file mode 100644 index 0000000..7f4ee26 --- /dev/null +++ b/src/frostfs_testlib/testing/parallel.py @@ -0,0 +1,98 @@ +import itertools +from concurrent.futures import Future, ThreadPoolExecutor +from typing import Callable, Collection, Optional, Union + + +def parallel( + fn: Union[Callable, list[Callable]], + parallel_items: Optional[Collection] = None, + *args, + **kwargs, +) -> list[Future]: + """Parallel execution of selected function or list of function using ThreadPoolExecutor. + Also checks the exceptions of each thread. + + Args: + fn: function(s) to run. Can work in 2 modes: + 1. If you have dedicated function with some items to process in parallel, + like you do with executor.map(fn, parallel_items), pass this function as fn. + 2. If you need to process each item with it's own method, like you do + with executor.submit(fn, args, kwargs), pass list of methods here. + See examples in runners.py in this repo. + parallel_items: items to iterate on (should be None in case of 2nd mode). + args: any other args required in target function(s). + if any arg is itertool.cycle, it will be iterated before passing to new thread. + kwargs: any other kwargs required in target function(s) + if any kwarg is itertool.cycle, it will be iterated before passing to new thread. + + Returns: + list of futures. + """ + + if callable(fn): + if not parallel_items: + raise RuntimeError("Parallel items should not be none when fn is callable.") + futures = _run_by_items(fn, parallel_items, *args, **kwargs) + elif isinstance(fn, list): + futures = _run_by_fn_list(fn, *args, **kwargs) + else: + raise RuntimeError("Nothing to run. fn should be either callable or list of callables.") + + # Check for exceptions + exceptions = [future.exception() for future in futures if future.exception()] + if exceptions: + message = "\n".join([str(e) for e in exceptions]) + raise RuntimeError(f"The following exceptions occured during parallel run: {message}") + return futures + + +def _run_by_fn_list(fn_list: list[Callable], *args, **kwargs) -> list[Future]: + if not len(fn_list): + return [] + if not all([callable(f) for f in fn_list]): + raise RuntimeError("fn_list should contain only callables") + + futures: list[Future] = [] + + with ThreadPoolExecutor(max_workers=len(fn_list)) as executor: + for fn in fn_list: + task_args = _get_args(*args) + task_kwargs = _get_kwargs(**kwargs) + + futures.append(executor.submit(fn, *task_args, **task_kwargs)) + + return futures + + +def _run_by_items(fn: Callable, parallel_items: Collection, *args, **kwargs) -> list[Future]: + futures: list[Future] = [] + + with ThreadPoolExecutor(max_workers=len(parallel_items)) as executor: + for item in parallel_items: + task_args = _get_args(*args) + task_kwargs = _get_kwargs(**kwargs) + task_args.insert(0, item) + + futures.append(executor.submit(fn, *task_args, **task_kwargs)) + + return futures + + +def _get_kwargs(**kwargs): + actkwargs = {} + for key, arg in kwargs.items(): + if isinstance(arg, itertools.cycle): + actkwargs[key] = next(arg) + else: + actkwargs[key] = arg + return actkwargs + + +def _get_args(*args): + actargs = [] + for arg in args: + if isinstance(arg, itertools.cycle): + actargs.append(next(arg)) + else: + actargs.append(arg) + return actargs diff --git a/src/frostfs_testlib/utils/__init__.py b/src/frostfs_testlib/utils/__init__.py index fbc4a8f..01cf462 100644 --- a/src/frostfs_testlib/utils/__init__.py +++ b/src/frostfs_testlib/utils/__init__.py @@ -3,3 +3,4 @@ import frostfs_testlib.utils.datetime_utils import frostfs_testlib.utils.json_utils import frostfs_testlib.utils.string_utils import frostfs_testlib.utils.wallet_utils +from frostfs_testlib.utils.file_keeper import FileKeeper From f8409fa9f96e28385dcbb330bb09831ebef6e0f6 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Wed, 5 Jul 2023 09:21:25 +0300 Subject: [PATCH 104/363] Change name metric Inner ring Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/storage/dataclasses/frostfs_services.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index 2b52c1f..23e3335 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -16,7 +16,7 @@ class InnerRing(NodeBase): """ def service_healthcheck(self) -> bool: - health_metric = "frostfs_node_ir_health" + health_metric = "frostfs_ir_ir_health" output = ( self.host.get_shell() .exec(f"curl -s localhost:6662 | grep {health_metric} | sed 1,2d") @@ -167,7 +167,7 @@ class StorageNode(NodeBase): def get_un_locode(self): return self._get_attribute(ConfigAttributes.UN_LOCODE) - + def get_data_directory(self) -> str: return self.host.get_data_directory(self.name) From 3050ccc9fa5597a398b6f81999de54f264d4d443 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Thu, 6 Jul 2023 14:20:23 +0300 Subject: [PATCH 105/363] Added -k parameter to curl to ignore self signed SSL certificate --- src/frostfs_testlib/steps/http/http_gate.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/steps/http/http_gate.py b/src/frostfs_testlib/steps/http/http_gate.py index 64bb5ce..e0ae8fa 100644 --- a/src/frostfs_testlib/steps/http/http_gate.py +++ b/src/frostfs_testlib/steps/http/http_gate.py @@ -215,13 +215,13 @@ def upload_via_http_gate_curl( # pre-clean _cmd_run("rm pipe -f") files = f"file=@pipe;filename={os.path.basename(filepath)}" - cmd = f"mkfifo pipe;cat {filepath} > pipe & curl --no-buffer -F '{files}' {attributes} {request}" + cmd = f"mkfifo pipe;cat {filepath} > pipe & curl -k --no-buffer -F '{files}' {attributes} {request}" output = _cmd_run(cmd, LONG_TIMEOUT) # clean up pipe _cmd_run("rm pipe") else: files = f"file=@{filepath};filename={os.path.basename(filepath)}" - cmd = f"curl -F '{files}' {attributes} {request}" + cmd = f"curl -k -F '{files}' {attributes} {request}" output = _cmd_run(cmd) if error_pattern: @@ -246,7 +246,7 @@ def get_via_http_curl(cid: str, oid: str, endpoint: str) -> str: request = f"{endpoint}/get/{cid}/{oid}" file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}") - cmd = f"curl {request} > {file_path}" + cmd = f"curl -k {request} > {file_path}" _cmd_run(cmd) return file_path From 14c85e0a9e8e288a0b4599843a7f803df9268ea2 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Fri, 7 Jul 2023 16:41:59 +0300 Subject: [PATCH 106/363] Added verify=False to requests calls to ignore self signed SSL certificate --- src/frostfs_testlib/steps/http/http_gate.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/frostfs_testlib/steps/http/http_gate.py b/src/frostfs_testlib/steps/http/http_gate.py index e0ae8fa..efc5258 100644 --- a/src/frostfs_testlib/steps/http/http_gate.py +++ b/src/frostfs_testlib/steps/http/http_gate.py @@ -49,7 +49,7 @@ def get_via_http_gate( else: request = f"{endpoint}{request_path}" - resp = requests.get(request, stream=True, timeout=timeout) + resp = requests.get(request, stream=True, timeout=timeout, verify=False) if not resp.ok: raise Exception( @@ -77,7 +77,7 @@ def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str, timeout: Optiona endpoint: http gate endpoint """ request = f"{endpoint}/zip/{cid}/{prefix}" - resp = requests.get(request, stream=True, timeout=timeout) + resp = requests.get(request, stream=True, timeout=timeout, verify=False) if not resp.ok: raise Exception( @@ -123,7 +123,7 @@ def get_via_http_gate_by_attribute( else: request = f"{endpoint}{request_path}" - resp = requests.get(request, stream=True, timeout=timeout) + resp = requests.get(request, stream=True, timeout=timeout, verify=False) if not resp.ok: raise Exception( @@ -156,7 +156,7 @@ def upload_via_http_gate( request = f"{endpoint}/upload/{cid}" files = {"upload_file": open(path, "rb")} body = {"filename": path} - resp = requests.post(request, files=files, data=body, headers=headers, timeout=timeout) + resp = requests.post(request, files=files, data=body, headers=headers, timeout=timeout, verify=False) if not resp.ok: raise Exception( From 917dc6f6d8cc22e4df046dd46a951d50dd3268d9 Mon Sep 17 00:00:00 2001 From: sstovbyra Date: Tue, 11 Jul 2023 18:22:54 +0300 Subject: [PATCH 107/363] add_wait_for_service_to_be_in_state --- src/frostfs_testlib/hosting/docker_host.py | 4 ++++ src/frostfs_testlib/hosting/interfaces.py | 13 +++++++++++++ 2 files changed, 17 insertions(+) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 5dcac9e..3934d9f 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -135,6 +135,10 @@ class DockerHost(Host): timeout=service_attributes.start_timeout, ) + def wait_for_service_to_be_in_state(self, systemd_service_name: str, expected_state: str, timeout: int) -> None: + raise NotImplementedError("Not implemented for docker") + + def get_data_directory(self, service_name: str) -> str: service_attributes = self._get_service_attributes(service_name) return service_attributes.data_directory_path diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 8d889da..cdd3379 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -266,3 +266,16 @@ class Host(ABC): True if message found in logs in the given time frame. False otherwise. """ + + + @abstractmethod + def wait_for_service_to_be_in_state(self, systemd_service_name: str, expected_state: str, timeout: int) -> None: + """ + Waites for service to be in specified state. + + Args: + systemd_service_name: Service to wait state of. + expected_state: State to wait for + timeout: Seconds to wait + + """ From 59b41579915c3750e3b98e7e859053a1c3bdee0b Mon Sep 17 00:00:00 2001 From: "d.anurin" Date: Wed, 12 Jul 2023 09:41:17 +0300 Subject: [PATCH 108/363] Added sudo parameter for getting shell with elevated rights or not --- src/frostfs_testlib/hosting/docker_host.py | 4 ++-- src/frostfs_testlib/hosting/interfaces.py | 5 ++++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 3934d9f..94ee2ff 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -61,10 +61,10 @@ class ServiceAttributes(ParsedAttributes): class DockerHost(Host): """Manages services hosted in Docker containers running on a local or remote machine.""" - def get_shell(self) -> Shell: + def get_shell(self, sudo: bool = True) -> Shell: host_attributes = HostAttributes.parse(self._config.attributes) command_inspectors = [] - if host_attributes.sudo_shell: + if sudo: command_inspectors.append(SudoInspector()) if not host_attributes.ssh_login: diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index cdd3379..b4f67fb 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -65,9 +65,12 @@ class Host(ABC): return cli_config @abstractmethod - def get_shell(self) -> Shell: + def get_shell(self, sudo: bool = True) -> Shell: """Returns shell to this host. + Args: + sudo: if True, run all commands in shell with elevated rights + Returns: Shell that executes commands on this host. """ From 62216293f8b4abcec73b92d6df3158745c90d7e3 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 14 Jul 2023 16:04:44 +0300 Subject: [PATCH 109/363] Updates for s3 k6 --- src/frostfs_testlib/load/k6.py | 60 +++-------------- src/frostfs_testlib/load/load_config.py | 67 ++++++++++++++++++- src/frostfs_testlib/load/runners.py | 4 +- .../controllers/background_load_controller.py | 7 +- 4 files changed, 80 insertions(+), 58 deletions(-) diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index 7ec3c21..cb3576e 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -2,9 +2,10 @@ import json import logging import math import os -from dataclasses import dataclass, fields +from dataclasses import dataclass from time import sleep from typing import Any +from urllib.parse import urlparse from frostfs_testlib.load.interfaces import Loader from frostfs_testlib.load.load_config import ( @@ -16,11 +17,7 @@ from frostfs_testlib.load.load_config import ( from frostfs_testlib.processes.remote_process import RemoteProcess from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.common import STORAGE_USER_NAME -from frostfs_testlib.resources.load_params import ( - K6_STOP_SIGNAL_TIMEOUT, - K6_TEARDOWN_PERIOD, - LOAD_NODE_SSH_USER, -) +from frostfs_testlib.resources.load_params import K6_STOP_SIGNAL_TIMEOUT, K6_TEARDOWN_PERIOD from frostfs_testlib.shell import Shell from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.test_control import wait_for_success @@ -60,10 +57,9 @@ class K6: self.loader: Loader = loader self.shell: Shell = shell self.wallet = wallet - self.scenario: LoadScenario = load_params.scenario self.summary_json: str = os.path.join( self.load_params.working_dir, - f"{self.load_params.load_id}_{self.scenario.value}_summary.json", + f"{self.load_params.load_id}_{self.load_params.scenario.value}_summary.json", ) self._k6_dir: str = k6_dir @@ -98,24 +94,7 @@ class K6: preset_scenario = preset_map[self.load_params.load_type] command_args = base_args[preset_scenario].copy() - command_args += [ - f"--{field.metadata['preset_argument']} '{getattr(self.load_params, field.name)}'" - for field in fields(self.load_params) - if field.metadata - and self.scenario in field.metadata["applicable_scenarios"] - and field.metadata["preset_argument"] - and getattr(self.load_params, field.name) is not None - ] - - if self.load_params.preset: - command_args += [ - f"--{field.metadata['preset_argument']} '{getattr(self.load_params.preset, field.name)}'" - for field in fields(self.load_params.preset) - if field.metadata - and self.scenario in field.metadata["applicable_scenarios"] - and field.metadata["preset_argument"] - and getattr(self.load_params.preset, field.name) is not None - ] + command_args += self.load_params.get_preset_arguments() command = " ".join(command_args) result = self.shell.exec(command) @@ -127,26 +106,7 @@ class K6: @reporter.step_deco("Generate K6 command") def _generate_env_variables(self) -> str: - env_vars = { - field.metadata["env_variable"]: getattr(self.load_params, field.name) - for field in fields(self.load_params) - if field.metadata - and self.scenario in field.metadata["applicable_scenarios"] - and field.metadata["env_variable"] - and getattr(self.load_params, field.name) is not None - } - - if self.load_params.preset: - env_vars.update( - { - field.metadata["env_variable"]: getattr(self.load_params.preset, field.name) - for field in fields(self.load_params.preset) - if field.metadata - and self.scenario in field.metadata["applicable_scenarios"] - and field.metadata["env_variable"] - and getattr(self.load_params.preset, field.name) is not None - } - ) + env_vars = self.load_params.get_env_vars() env_vars[f"{self.load_params.load_type.value.upper()}_ENDPOINTS"] = ",".join(self.endpoints) env_vars["SUMMARY_JSON"] = self.summary_json @@ -164,7 +124,7 @@ class K6: ): command = ( f"{self._k6_dir}/k6 run {self._generate_env_variables()} " - f"{self._k6_dir}/scenarios/{self.scenario.value}.js" + f"{self._k6_dir}/scenarios/{self.load_params.scenario.value}.js" ) user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None self._k6_process = RemoteProcess.create( @@ -215,10 +175,10 @@ class K6: summary_text = self.shell.exec(f"cat {self.summary_json}").stdout summary_json = json.loads(summary_text) - + endpoint = urlparse(self.endpoints[0]).netloc or self.endpoints[0] allure_filenames = { - K6ProcessAllocationStrategy.PER_LOAD_NODE: f"{self.loader.ip}_{self.scenario.value}_summary.json", - K6ProcessAllocationStrategy.PER_ENDPOINT: f"{self.loader.ip}_{self.scenario.value}_{self.endpoints[0]}_summary.json", + K6ProcessAllocationStrategy.PER_LOAD_NODE: f"{self.loader.ip}_{self.load_params.scenario.value}_summary.json", + K6ProcessAllocationStrategy.PER_ENDPOINT: f"{self.loader.ip}_{self.load_params.scenario.value}_{endpoint}_summary.json", } allure_filename = allure_filenames[self.load_params.k6_process_allocation_strategy] diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index c337d7c..357a129 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -1,7 +1,8 @@ import os -from dataclasses import dataclass, field +from dataclasses import dataclass, field, fields, is_dataclass from enum import Enum -from typing import Optional +from types import MappingProxyType +from typing import Any, Optional, get_args class LoadType(Enum): @@ -42,6 +43,12 @@ grpc_preset_scenarios = [ s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR] +@dataclass +class MetaField: + metadata: MappingProxyType + value: Any + + def metadata_field( applicable_scenarios: list[LoadScenario], preset_param: Optional[str] = None, @@ -138,6 +145,12 @@ class LoadParams: preset: Optional[Preset] = None # K6 download url k6_url: Optional[str] = None + # No ssl verification flag + no_verify_ssl: Optional[bool] = metadata_field( + [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.VERIFY, LoadScenario.HTTP], + "no-verify-ssl", + "NO_VERIFY_SSL", + ) # ------- COMMON SCENARIO PARAMS ------- # Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value. @@ -225,3 +238,53 @@ class LoadParams: self.registry_file = os.path.join(self.working_dir, f"{load_id}_registry.bolt") if self.preset: self.preset.pregen_json = os.path.join(self.working_dir, f"{load_id}_prepare.json") + + def get_env_vars(self): + env_vars = { + meta_field.metadata["env_variable"]: meta_field.value + for meta_field in self._get_meta_fields(self) + if self.scenario in meta_field.metadata["applicable_scenarios"] + and meta_field.metadata["env_variable"] + and meta_field.value + } + + return env_vars + + def get_preset_arguments(self): + command_args = [ + self._get_preset_argument(meta_field) + for meta_field in self._get_meta_fields(self) + if self.scenario in meta_field.metadata["applicable_scenarios"] + and meta_field.metadata["preset_argument"] + and meta_field.value + and self._get_preset_argument(meta_field) + ] + + return command_args + + @staticmethod + def _get_preset_argument(meta_field: MetaField) -> str: + if isinstance(meta_field.value, bool): + # For preset calls, bool values are passed with just -- if the value is True + return f"--{meta_field.metadata['preset_argument']}" if meta_field.value else "" + + return f"--{meta_field.metadata['preset_argument']} '{meta_field.value}'" + + @staticmethod + def _get_meta_fields(instance) -> list[MetaField]: + data_fields = fields(instance) + + fields_with_data = [ + MetaField(field.metadata, getattr(instance, field.name)) + for field in data_fields + if field.metadata and getattr(instance, field.name) + ] + + for field in data_fields: + actual_field_type = ( + get_args(field.type)[0] if len(get_args(field.type)) else get_args(field.type) + ) + if is_dataclass(actual_field_type) and getattr(instance, field.name): + fields_with_data += LoadParams._get_meta_fields(getattr(instance, field.name)) + + return fields_with_data or [] diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index d6cf2ae..428cd7d 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -6,6 +6,7 @@ import time from concurrent.futures import ThreadPoolExecutor from dataclasses import fields from typing import Optional +from urllib.parse import urlparse import yaml @@ -257,9 +258,10 @@ class DefaultRunner(RunnerBase): raise RuntimeError("k6_process_allocation_strategy should not be none") result = k6_instance.get_results() + endpoint = urlparse(k6_instance.endpoints[0]).netloc or k6_instance.endpoints[0] keys_map = { K6ProcessAllocationStrategy.PER_LOAD_NODE: k6_instance.loader.ip, - K6ProcessAllocationStrategy.PER_ENDPOINT: k6_instance.endpoints[0], + K6ProcessAllocationStrategy.PER_ENDPOINT: endpoint, } key = keys_map[k6_instance.load_params.k6_process_allocation_strategy] results[key] = result diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index ac3a920..58a7a6f 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -80,17 +80,14 @@ class BackgroundLoadController: LoadType.S3: { EndpointSelectionStrategy.ALL: list( set( - endpoint.replace("http://", "").replace("https://", "") + endpoint for node_under_load in self.nodes_under_load for endpoint in node_under_load.service(S3Gate).get_all_endpoints() ) ), EndpointSelectionStrategy.FIRST: list( set( - node_under_load.service(S3Gate) - .get_endpoint() - .replace("http://", "") - .replace("https://", "") + node_under_load.service(S3Gate).get_endpoint() for node_under_load in self.nodes_under_load ) ), From 4896abcec3959ef65c0e04515047510b0aeb951e Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 18 Jul 2023 20:38:37 +0300 Subject: [PATCH 110/363] Adding code validation targets Signed-off-by: Andrey Berezin --- .gitignore | 1 + CONTRIBUTING.md | 4 +- Makefile | 34 +++++++++++++--- pyproject.toml | 5 +++ requirements.txt | 1 + .../analytics/test_collector.py | 39 ++++++++++++------- .../analytics/test_exporter.py | 4 +- src/frostfs_testlib/hosting/docker_host.py | 18 +++++---- src/frostfs_testlib/utils/cli_utils.py | 4 +- tests/conftest.py | 5 +++ tests/helpers.py | 6 +-- 11 files changed, 80 insertions(+), 41 deletions(-) create mode 100644 tests/conftest.py diff --git a/.gitignore b/.gitignore index a7f7de0..e2967ea 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ # ignore IDE files .vscode .idea +venv.* # ignore temp files under any path .DS_Store diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5996820..fdcaec7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -63,9 +63,9 @@ $ git checkout -b feature/123-something_awesome ``` ### Test your changes -Before submitting any changes to the library, please, make sure that all unit tests are passing. To run the tests, please, use the following command: +Before submitting any changes to the library, please, make sure that linter and all unit tests are passing. To run the tests, please, use the following command: ```shell -$ python -m unittest discover --start-directory tests +$ make validation ``` To enable tests that interact with SSH server, please, setup SSH server and set the following environment variables before running the tests: diff --git a/Makefile b/Makefile index c746608..9dbd86c 100644 --- a/Makefile +++ b/Makefile @@ -3,6 +3,8 @@ PYTHON_VERSION := 3.10 VENV_DIR := venv.frostfs-testlib current_dir := $(shell pwd) +DIRECTORIES := $(sort $(dir $(wildcard ../frostfs-testlib-plugin-*/ ../*-testcases/))) +FROM_VENV := . ${VENV_DIR}/bin/activate && venv: create requirements paths precommit @echo Ready @@ -14,14 +16,34 @@ precommit: paths: @echo Append paths for project @echo Virtual environment: ${VENV_DIR} - @sudo rm -rf ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth - @sudo touch ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth - @echo ${current_dir}/src/frostfs_testlib_frostfs_testlib | sudo tee ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth + @rm -rf ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth + @touch ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth + @echo ${current_dir}/src/frostfs_testlib_frostfs_testlib | tee ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth -create: - @echo Create virtual environment for +create: ${VENV_DIR} + +${VENV_DIR}: + @echo Create virtual environment ${VENV_DIR} virtualenv --python=python${PYTHON_VERSION} --prompt=frostfs-testlib ${VENV_DIR} requirements: @echo Isntalling pip requirements - . ${VENV_DIR}/bin/activate && pip install -Ur requirements.txt \ No newline at end of file + . ${VENV_DIR}/bin/activate && pip install -Ur requirements.txt + + +#### VALIDATION SECTION #### +lint: create requirements + ${FROM_VENV} pylint --disable R,C,W ./src + +unit_test: + @echo Starting unit tests + ${FROM_VENV} python -m pytest tests + +.PHONY: lint_dependent $(DIRECTORIES) +lint_dependent: $(DIRECTORIES) + +$(DIRECTORIES): + @echo checking dependent repo $@ + $(MAKE) validation -C $@ + +validation: lint unit_test lint_dependent \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 9140ee0..8fca533 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -64,3 +64,8 @@ push = false [tool.bumpver.file_patterns] "pyproject.toml" = ['current_version = "{version}"', 'version = "{version}"'] "src/frostfs_testlib/__init__.py" = ["{version}"] + +[tool.pytest.ini_options] +filterwarnings = [ + "ignore:Blowfish has been deprecated:cryptography.utils.CryptographyDeprecationWarning", +] \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 5b47640..1fdf844 100644 --- a/requirements.txt +++ b/requirements.txt @@ -17,6 +17,7 @@ black==22.8.0 bumpver==2022.1118 isort==5.12.0 pre-commit==2.20.0 +pylint==2.17.4 # Packaging dependencies build==0.8.0 diff --git a/src/frostfs_testlib/analytics/test_collector.py b/src/frostfs_testlib/analytics/test_collector.py index 0f5398e..56ee606 100644 --- a/src/frostfs_testlib/analytics/test_collector.py +++ b/src/frostfs_testlib/analytics/test_collector.py @@ -6,6 +6,7 @@ from docstring_parser.google import DEFAULT_SECTIONS, Section, SectionType DEFAULT_SECTIONS.append(Section("Steps", "steps", SectionType.MULTIPLE)) + class TestCase: """ Test case object implementation for use in collector and exporters @@ -106,7 +107,9 @@ class TestCaseCollector: # Read test_case suite and section name from test class if possible and get test function from class if test.cls: suite_name = test.cls.__dict__.get("__test_case_suite_name__", suite_name) - suite_section_name = test.cls.__dict__.get("__test_case_suite_section__", suite_section_name) + suite_section_name = test.cls.__dict__.get( + "__test_case_suite_section__", suite_section_name + ) test_function = test.cls.__dict__[test.originalname] else: # If no test class, read test function from module @@ -117,7 +120,9 @@ class TestCaseCollector: test_case_title = test_function.__dict__.get("__test_case_title__", None) test_case_priority = test_function.__dict__.get("__test_case_priority__", None) suite_name = test_function.__dict__.get("__test_case_suite_name__", suite_name) - suite_section_name = test_function.__dict__.get("__test_case_suite_section__", suite_section_name) + suite_section_name = test_function.__dict__.get( + "__test_case_suite_section__", suite_section_name + ) # Parce test_steps if they define in __doc__ doc_string = parse(test_function.__doc__, style=DocstringStyle.GOOGLE) @@ -125,7 +130,9 @@ class TestCaseCollector: if doc_string.short_description: test_case_description = doc_string.short_description if doc_string.long_description: - test_case_description = f"{doc_string.short_description}\r\n{doc_string.long_description}" + test_case_description = ( + f"{doc_string.short_description}\r\n{doc_string.long_description}" + ) if doc_string.meta: for meta in doc_string.meta: @@ -140,25 +147,27 @@ class TestCaseCollector: test_case_params = test_case_call_spec.id # Format title with params if test_case_title: - test_case_title = self.__format_string_with_params__(test_case_title,test_case_call_spec.params) + test_case_title = self.__format_string_with_params__( + test_case_title, test_case_call_spec.params + ) # Format steps with params if test_case_steps: for key, value in test_case_steps.items(): - value = self.__format_string_with_params__(value,test_case_call_spec.params) + value = self.__format_string_with_params__(value, test_case_call_spec.params) test_case_steps[key] = value # If there is set basic test case attributes create TestCase and return if test_case_id and test_case_title and suite_name and suite_name: test_case = TestCase( - id=test_case_id, - title=test_case_title, - description=test_case_description, - priority=test_case_priority, - steps=test_case_steps, - params=test_case_params, - suite_name=suite_name, - suite_section_name=suite_section_name, - ) + uuid_id=test_case_id, + title=test_case_title, + description=test_case_description, + priority=test_case_priority, + steps=test_case_steps, + params=test_case_params, + suite_name=suite_name, + suite_section_name=suite_section_name, + ) return test_case # Return None if there is no enough information for return test case return None @@ -187,4 +196,4 @@ class TestCaseCollector: test_case = self.__get_test_case_from_pytest_test__(test) if test_case: test_cases.append(test_case) - return test_cases \ No newline at end of file + return test_cases diff --git a/src/frostfs_testlib/analytics/test_exporter.py b/src/frostfs_testlib/analytics/test_exporter.py index 263995c..5a569c6 100644 --- a/src/frostfs_testlib/analytics/test_exporter.py +++ b/src/frostfs_testlib/analytics/test_exporter.py @@ -67,6 +67,6 @@ class TestExporter(ABC): steps = [{"content": value, "expected": " "} for key, value in test_case.steps.items()] if test_case_in_tms: - self.update_test_case(test_case, test_case_in_tms) + self.update_test_case(test_case, test_case_in_tms, test_suite, test_section) else: - self.create_test_case(test_case) + self.create_test_case(test_case, test_suite, test_section) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 94ee2ff..3addd92 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -135,13 +135,19 @@ class DockerHost(Host): timeout=service_attributes.start_timeout, ) - def wait_for_service_to_be_in_state(self, systemd_service_name: str, expected_state: str, timeout: int) -> None: + def wait_for_service_to_be_in_state( + self, systemd_service_name: str, expected_state: str, timeout: int + ) -> None: raise NotImplementedError("Not implemented for docker") - def get_data_directory(self, service_name: str) -> str: service_attributes = self._get_service_attributes(service_name) - return service_attributes.data_directory_path + + client = self._get_docker_client() + volume_info = client.inspect_volume(service_attributes.volume_name) + volume_path = volume_info["Mountpoint"] + + return volume_path def delete_metabase(self, service_name: str) -> None: raise NotImplementedError("Not implemented for docker") @@ -159,11 +165,7 @@ class DockerHost(Host): raise NotImplementedError("Not implemented for docker") def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None: - service_attributes = self._get_service_attributes(service_name) - - client = self._get_docker_client() - volume_info = client.inspect_volume(service_attributes.volume_name) - volume_path = volume_info["Mountpoint"] + volume_path = self.get_data_directory(service_name) shell = self.get_shell() meta_clean_cmd = f"rm -rf {volume_path}/meta*/*" diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index 7ed1a27..d869714 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -68,9 +68,7 @@ def _cmd_run(cmd: str, timeout: int = 90) -> str: end_time = datetime.now() _attach_allure_log(cmd, cmd_output, return_code, start_time, end_time) logger.info( - f"Command: {cmd}\n" - f"Error:\nreturn code: {return_code}\n" - f"Output: {exc.output.decode('utf-8') if type(exc.output) is bytes else exc.output}" + f"Command: {cmd}\n" f"Error:\nreturn code: {return_code}\n" f"Output: {cmd_output}" ) raise diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..ea6d681 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,5 @@ +import os +import sys + +app_dir = os.path.join(os.getcwd(), "src") +sys.path.insert(0, app_dir) diff --git a/tests/helpers.py b/tests/helpers.py index 8391002..b7776fd 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -14,11 +14,7 @@ def format_error_details(error: Exception) -> str: Returns: String containing exception details. """ - detail_lines = traceback.format_exception( - etype=type(error), - value=error, - tb=error.__traceback__, - ) + detail_lines = traceback.format_exception(error) return "".join(detail_lines) From 15862e5901d431dc62d9f5f26fb65aca3e5e4df8 Mon Sep 17 00:00:00 2001 From: Vladimir Avdeev Date: Thu, 20 Jul 2023 03:41:21 +0300 Subject: [PATCH 111/363] Add optional parameter "copies_number" in "frostfs-cli object put" Signed-off-by: Vladimir Avdeev --- src/frostfs_testlib/cli/frostfs_cli/object.py | 2 ++ src/frostfs_testlib/steps/cli/object.py | 3 +++ 2 files changed, 5 insertions(+) diff --git a/src/frostfs_testlib/cli/frostfs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py index 1c1d0ac..8915914 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/object.py +++ b/src/frostfs_testlib/cli/frostfs_cli/object.py @@ -224,6 +224,7 @@ class FrostfsCliObject(CliCommand): address: Optional[str] = None, attributes: Optional[dict] = None, bearer: Optional[str] = None, + copies_number: Optional[int] = None, disable_filename: bool = False, disable_timestamp: bool = False, expire_at: Optional[int] = None, @@ -241,6 +242,7 @@ class FrostfsCliObject(CliCommand): address: Address of wallet account. attributes: User attributes in form of Key1=Value1,Key2=Value2. bearer: File with signed JSON or binary encoded bearer token. + copies_number: Number of copies of the object to store within the RPC call. cid: Container ID. disable_filename: Do not set well-known filename attribute. disable_timestamp: Do not set well-known timestamp attribute. diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index 8be7982..d575522 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -239,6 +239,7 @@ def put_object( shell: Shell, endpoint: str, bearer: Optional[str] = None, + copies_number: Optional[int] = None, attributes: Optional[dict] = None, xhdr: Optional[dict] = None, wallet_config: Optional[str] = None, @@ -256,6 +257,7 @@ def put_object( cid: ID of Container where we get the Object from shell: executor for cli command bearer: path to Bearer Token file, appends to `--bearer` key + copies_number: Number of copies of the object to store within the RPC call attributes: User attributes in form of Key1=Value1,Key2=Value2 endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key wallet_config: path to the wallet config @@ -276,6 +278,7 @@ def put_object( cid=cid, attributes=attributes, bearer=bearer, + copies_number=copies_number, expire_at=expire_at, no_progress=no_progress, xhdr=xhdr, From 8dcfae5cb2c4bea03c8ae16293feee5d97c3d6de Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Thu, 20 Jul 2023 12:45:19 +0300 Subject: [PATCH 112/363] Fix empty and zero values parsing Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/load_config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 357a129..e73eea7 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -245,7 +245,7 @@ class LoadParams: for meta_field in self._get_meta_fields(self) if self.scenario in meta_field.metadata["applicable_scenarios"] and meta_field.metadata["env_variable"] - and meta_field.value + and meta_field.value is not None } return env_vars @@ -256,7 +256,7 @@ class LoadParams: for meta_field in self._get_meta_fields(self) if self.scenario in meta_field.metadata["applicable_scenarios"] and meta_field.metadata["preset_argument"] - and meta_field.value + and meta_field.value is not None and self._get_preset_argument(meta_field) ] From 675183cd9a350b186f9b4a9b5448d1634142e240 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 21 Jul 2023 11:46:01 +0300 Subject: [PATCH 113/363] Fix empty and zero values parsing part 2 Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/load_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index e73eea7..73addf7 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -277,7 +277,7 @@ class LoadParams: fields_with_data = [ MetaField(field.metadata, getattr(instance, field.name)) for field in data_fields - if field.metadata and getattr(instance, field.name) + if field.metadata and getattr(instance, field.name) is not None ] for field in data_fields: From 49ccd47e814ead80ce9b642506f3e467581e8a4e Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Fri, 21 Jul 2023 15:28:10 +0300 Subject: [PATCH 114/363] =?UTF-8?q?Add=20=D1=81opies=5Fnumber=20argument?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/steps/cli/object.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index d575522..9a63604 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -183,6 +183,7 @@ def put_object_to_random_node( shell: Shell, cluster: Cluster, bearer: Optional[str] = None, + copies_number: Optional[int] = None, attributes: Optional[dict] = None, xhdr: Optional[dict] = None, wallet_config: Optional[str] = None, @@ -201,6 +202,7 @@ def put_object_to_random_node( shell: executor for cli command cluster: cluster under test bearer: path to Bearer Token file, appends to `--bearer` key + copies_number: Number of copies of the object to store within the RPC call attributes: User attributes in form of Key1=Value1,Key2=Value2 cluster: cluster under test wallet_config: path to the wallet config @@ -221,6 +223,7 @@ def put_object_to_random_node( shell, endpoint, bearer, + copies_number, attributes, xhdr, wallet_config, From 9c792c091e657acfefd99e598fc1420f9fbfc73d Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Thu, 20 Jul 2023 21:05:49 +0300 Subject: [PATCH 115/363] Add error_threshold parameter, add error check after load Signed-off-by: Andrey Berezin --- Makefile | 11 ++- src/frostfs_testlib/load/load_config.py | 5 + src/frostfs_testlib/load/load_report.py | 1 + src/frostfs_testlib/load/load_verifiers.py | 94 +++++++++++++------ .../controllers/background_load_controller.py | 54 ++++++----- 5 files changed, 105 insertions(+), 60 deletions(-) diff --git a/Makefile b/Makefile index 9dbd86c..365e2fc 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,7 @@ SHELL := /bin/bash PYTHON_VERSION := 3.10 -VENV_DIR := venv.frostfs-testlib +VENV_NAME := frostfs-testlib +VENV_DIR := venv.${VENV_NAME} current_dir := $(shell pwd) DIRECTORIES := $(sort $(dir $(wildcard ../frostfs-testlib-plugin-*/ ../*-testcases/))) @@ -15,16 +16,16 @@ precommit: paths: @echo Append paths for project - @echo Virtual environment: ${VENV_DIR} + @echo Virtual environment: ${current_dir}/${VENV_DIR} @rm -rf ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth @touch ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth - @echo ${current_dir}/src/frostfs_testlib_frostfs_testlib | tee ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth + @echo ${current_dir}/src/frostfs_testlib | tee ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth create: ${VENV_DIR} ${VENV_DIR}: - @echo Create virtual environment ${VENV_DIR} - virtualenv --python=python${PYTHON_VERSION} --prompt=frostfs-testlib ${VENV_DIR} + @echo Create virtual environment ${current_dir}/${VENV_DIR} + virtualenv --python=python${PYTHON_VERSION} --prompt=${VENV_NAME} ${VENV_DIR} requirements: @echo Isntalling pip requirements diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 73addf7..9a7e49c 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -139,6 +139,11 @@ class LoadParams: verify: Optional[bool] = None # Just id for load so distinct it between runs. Filled automatically. load_id: Optional[str] = None + # Acceptable number of load errors in % + # 100 means 100% errors allowed + # 1.5 means 1.5% errors allowed + # 0 means no errors allowed + error_threshold: Optional[float] = None # Working directory working_dir: Optional[str] = None # Preset for the k6 run diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py index dcd81b4..fa71069 100644 --- a/src/frostfs_testlib/load/load_report.py +++ b/src/frostfs_testlib/load/load_report.py @@ -154,6 +154,7 @@ class LoadReport: {per_node_errors_html} {self._row("Total", f"{total_errors} ({total_errors/total_operations*100.0:.2f}%)")} + {self._row("Threshold", f"{self.load_params.error_threshold:.2f}%")}
Errors


""" diff --git a/src/frostfs_testlib/load/load_verifiers.py b/src/frostfs_testlib/load/load_verifiers.py index 1ff63ae..f2a3e7e 100644 --- a/src/frostfs_testlib/load/load_verifiers.py +++ b/src/frostfs_testlib/load/load_verifiers.py @@ -2,7 +2,9 @@ import logging from frostfs_testlib.load.load_config import LoadParams, LoadScenario from frostfs_testlib.load.load_metrics import get_metrics_object +from frostfs_testlib.reporter import get_reporter +reporter = get_reporter() logger = logging.getLogger("NeoLogger") @@ -10,54 +12,88 @@ class LoadVerifier: def __init__(self, load_params: LoadParams) -> None: self.load_params = load_params - def verify_summaries(self, load_summary, verification_summary) -> None: - exceptions = [] + def verify_load_results(self, load_summaries: dict[str, dict]): + write_operations = 0 + write_errors = 0 - if not verification_summary or not load_summary: - logger.info("Can't check load results due to missing summary") + read_operations = 0 + read_errors = 0 - load_metrics = get_metrics_object(self.load_params.scenario, load_summary) + delete_operations = 0 + delete_errors = 0 writers = self.load_params.writers or self.load_params.preallocated_writers or 0 readers = self.load_params.readers or self.load_params.preallocated_readers or 0 deleters = self.load_params.deleters or self.load_params.preallocated_deleters or 0 - objects_count = load_metrics.write_success_iterations - fails_count = load_metrics.write_failed_iterations + for load_summary in load_summaries.values(): + metrics = get_metrics_object(self.load_params.scenario, load_summary) - if writers > 0: - if objects_count < 1: - exceptions.append("Total put objects should be greater than 0") - if fails_count > 0: - exceptions.append(f"There were {fails_count} failed write operations") + if writers: + write_operations += metrics.write_total_iterations + write_errors += metrics.write_failed_iterations - if readers > 0: - read_count = load_metrics.read_success_iterations - read_fails_count = load_metrics.read_failed_iterations - if read_count < 1: - exceptions.append("Total read operations should be greater than 0") - if read_fails_count > 0: - exceptions.append(f"There were {read_fails_count} failed read operations") + if readers: + read_operations += metrics.read_total_iterations + read_errors += metrics.read_failed_iterations + + if deleters: + delete_operations += metrics.delete_total_iterations + delete_errors += metrics.delete_failed_iterations + + exceptions = [] + if writers and not write_operations: + exceptions.append(f"No any write operation was performed") + if readers and not read_operations: + exceptions.append(f"No any read operation was performed") + if deleters and not delete_operations: + exceptions.append(f"No any delete operation was performed") + + if writers and write_errors / write_operations * 100 > self.load_params.error_threshold: + exceptions.append( + f"Write error rate is greater than threshold: {write_errors / write_operations * 100} > {self.load_params.error_threshold}" + ) + if readers and read_errors / read_operations * 100 > self.load_params.error_threshold: + exceptions.append( + f"Read error rate is greater than threshold: {read_errors / read_operations * 100} > {self.load_params.error_threshold}" + ) + if deleters and delete_errors / delete_operations * 100 > self.load_params.error_threshold: + exceptions.append( + f"Delete error rate is greater than threshold: {delete_errors / delete_operations * 100} > {self.load_params.error_threshold}" + ) + + assert not exceptions, "\n".join(exceptions) + + def check_verify_results(self, load_summaries, verification_summaries) -> None: + for node_or_endpoint in load_summaries: + with reporter.step(f"Check verify scenario results for {node_or_endpoint}"): + self._check_verify_result( + load_summaries[node_or_endpoint], verification_summaries[node_or_endpoint] + ) + + def _check_verify_result(self, load_summary, verification_summary) -> None: + exceptions = [] + + load_metrics = get_metrics_object(self.load_params.scenario, load_summary) + + writers = self.load_params.writers or self.load_params.preallocated_writers or 0 + deleters = self.load_params.deleters or self.load_params.preallocated_deleters or 0 + + delete_success = 0 if deleters > 0: - delete_count = load_metrics.delete_success_iterations - delete_fails_count = load_metrics.delete_failed_iterations - if delete_count < 1: - exceptions.append("Total delete operations should be greater than 0") - if delete_fails_count > 0: - exceptions.append(f"There were {delete_fails_count} failed delete operations") + delete_success = load_metrics.delete_success_iterations if verification_summary: verify_metrics = get_metrics_object(LoadScenario.VERIFY, verification_summary) verified_objects = verify_metrics.read_success_iterations invalid_objects = verify_metrics.read_failed_iterations + total_left_objects = load_metrics.write_success_iterations - delete_success - if invalid_objects > 0: - exceptions.append(f"There were {invalid_objects} verification fails") # Due to interruptions we may see total verified objects to be less than written on writers count - if abs(objects_count - verified_objects) > writers: + if abs(total_left_objects - verified_objects) > writers: exceptions.append( - f"Verified objects mismatch. Total: {objects_count}, Verified: {verified_objects}. Writers: {writers}." + f"Verified objects mismatch. Total: {total_left_objects}, Verified: {verified_objects}. Writers: {writers}." ) assert not exceptions, "\n".join(exceptions) diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index 58a7a6f..91cb1af 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -9,6 +9,7 @@ from frostfs_testlib.load.load_config import ( LoadScenario, LoadType, ) +from frostfs_testlib.load.load_metrics import get_metrics_object from frostfs_testlib.load.load_report import LoadReport from frostfs_testlib.load.load_verifiers import LoadVerifier from frostfs_testlib.reporter import get_reporter @@ -151,54 +152,55 @@ class BackgroundLoadController: load_report.add_summaries(self.load_summaries) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Verify results of load") + @reporter.step_deco("Run post-load verification") def verify(self): try: + self._verify_load_results() if self.load_params.verify: - self.verification_params = LoadParams( - verify_clients=self.load_params.verify_clients, - scenario=LoadScenario.VERIFY, - registry_file=self.load_params.registry_file, - verify_time=self.load_params.verify_time, - load_type=self.load_params.load_type, - load_id=self.load_params.load_id, - working_dir=self.load_params.working_dir, - endpoint_selection_strategy=self.load_params.endpoint_selection_strategy, - k6_process_allocation_strategy=self.load_params.k6_process_allocation_strategy, - setup_timeout="1s", - ) self._run_verify_scenario() - verification_summaries = self._get_results() - self.verify_summaries(self.load_summaries, verification_summaries) finally: self._reset_for_consequent_load() @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Verify summaries from k6") - def verify_summaries(self, load_summaries: dict, verification_summaries: dict): + @reporter.step_deco("Verify load results") + def _verify_load_results(self): verifier = LoadVerifier(self.load_params) - for node_or_endpoint in load_summaries: - with reporter.step(f"Verify load summaries for {node_or_endpoint}"): - verifier.verify_summaries( - load_summaries[node_or_endpoint], verification_summaries[node_or_endpoint] - ) + verifier.verify_load_results(self.load_summaries) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) def wait_until_finish(self): self.runner.wait_until_finish() @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Run verify scenario") + @reporter.step_deco("Verify loaded objects") def _run_verify_scenario(self): + self.verification_params = LoadParams( + verify_clients=self.load_params.verify_clients, + scenario=LoadScenario.VERIFY, + registry_file=self.load_params.registry_file, + verify_time=self.load_params.verify_time, + load_type=self.load_params.load_type, + load_id=self.load_params.load_id, + working_dir=self.load_params.working_dir, + endpoint_selection_strategy=self.load_params.endpoint_selection_strategy, + k6_process_allocation_strategy=self.load_params.k6_process_allocation_strategy, + setup_timeout="1s", + ) + if self.verification_params.verify_time is None: raise RuntimeError("verify_time should not be none") self.runner.init_k6_instances(self.verification_params, self.endpoints, self.k6_dir) - with reporter.step("Run verify load data"): + with reporter.step("Run verify scenario"): self.runner.start() self.runner.wait_until_finish() + with reporter.step("Check verify results"): + verification_summaries = self._get_results() + verifier = LoadVerifier(self.load_params) + verifier.check_verify_results(self.load_summaries, verification_summaries) + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Get load results") def _get_results(self) -> dict: - return self.runner.get_results() + with reporter.step(f"Get {self.load_params.scenario.value} scenario results"): + return self.runner.get_results() From 38742badf2e6e565345cefa1fb48442046e58222 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 21 Jul 2023 19:12:11 +0300 Subject: [PATCH 116/363] Add unit tests for load_config.py Signed-off-by: Andrey Berezin --- src/frostfs_testlib/utils/__init__.py | 2 + tests/test_load_config.py | 541 ++++++++++++++++++++++++++ 2 files changed, 543 insertions(+) create mode 100644 tests/test_load_config.py diff --git a/src/frostfs_testlib/utils/__init__.py b/src/frostfs_testlib/utils/__init__.py index 01cf462..0ac903a 100644 --- a/src/frostfs_testlib/utils/__init__.py +++ b/src/frostfs_testlib/utils/__init__.py @@ -3,4 +3,6 @@ import frostfs_testlib.utils.datetime_utils import frostfs_testlib.utils.json_utils import frostfs_testlib.utils.string_utils import frostfs_testlib.utils.wallet_utils + +# TODO: Circullar dependency FileKeeper -> NodeBase -> Utils -> FileKeeper -> NodeBase from frostfs_testlib.utils.file_keeper import FileKeeper diff --git a/tests/test_load_config.py b/tests/test_load_config.py new file mode 100644 index 0000000..a9b6de1 --- /dev/null +++ b/tests/test_load_config.py @@ -0,0 +1,541 @@ +from dataclasses import Field, dataclass, fields, is_dataclass +from typing import Any, get_args + +import pytest + +from frostfs_testlib.load.load_config import LoadParams, LoadScenario, LoadType, Preset + + +@dataclass +class MetaTestField: + field: Field + field_type: type + instance: Any + + +class TestLoadConfig: + @pytest.fixture + def set_empty(self, request: pytest.FixtureRequest): + # Workaround for verify + if "param" in request.__dict__ and request.param: + return request.param + + return False + + @pytest.fixture + def load_type(self, request: pytest.FixtureRequest): + # Workaround for verify + if "param" in request.__dict__ and request.param: + return request.param + + return None + + @pytest.fixture + def load_params(self, load_type: LoadType, set_empty: bool, request: pytest.FixtureRequest): + load_scenario = request.param + return self._get_filled_load_params(load_type, load_scenario, set_empty) + + def test_load_params_only_load_type_required(self): + LoadParams(load_type=LoadType.S3) + + def test_load_params_initially_have_all_values_none(self): + load_params = LoadParams(load_type=LoadType.S3) + self._check_all_values_none(load_params, ["load_type"]) + + def test_preset_initially_have_all_values_none(self): + preset = Preset() + self._check_all_values_none(preset) + + def test_load_set_id_changes_fields(self): + load_params = LoadParams(load_type=LoadType.S3) + load_params.preset = Preset() + load_params.working_dir = "/tmp" + load_params.set_id("test_id") + + assert load_params.registry_file == "/tmp/test_id_registry.bolt" + assert load_params.preset.pregen_json == "/tmp/test_id_prepare.json" + assert load_params.load_id == "test_id" + + # No other values should be changed + self._check_all_values_none( + load_params, ["load_type", "working_dir", "load_id", "registry_file", "preset"] + ) + self._check_all_values_none(load_params.preset, ["pregen_json"]) + + @pytest.mark.parametrize("load_params", [LoadScenario.gRPC], indirect=True) + def test_argument_parsing_for_grpc_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '11'", + "--preload_obj '13'", + "--out 'pregen_json'", + "--workers '7'", + "--containers '16'", + "--policy 'container_placement_policy'", + ] + expected_env_vars = { + "DURATION": 9, + "WRITE_OBJ_SIZE": 11, + "REGISTRY_FILE": "registry_file", + "K6_MIN_ITERATION_DURATION": "min_iteration_duration", + "K6_SETUP_TIMEOUT": "setup_timeout", + "WRITERS": 7, + "READERS": 7, + "DELETERS": 8, + "PREGEN_JSON": "pregen_json", + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params", [LoadScenario.gRPC_CAR], indirect=True) + def test_argument_parsing_for_grpc_car_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '11'", + "--preload_obj '13'", + "--out 'pregen_json'", + "--workers '7'", + "--containers '16'", + "--policy 'container_placement_policy'", + ] + expected_env_vars = { + "DURATION": 9, + "WRITE_OBJ_SIZE": 11, + "REGISTRY_FILE": "registry_file", + "K6_MIN_ITERATION_DURATION": "min_iteration_duration", + "K6_SETUP_TIMEOUT": "setup_timeout", + "MAX_WRITERS": 11, + "MAX_READERS": 11, + "MAX_DELETERS": 12, + "PRE_ALLOC_DELETERS": 21, + "PRE_ALLOC_READERS": 20, + "PRE_ALLOC_WRITERS": 20, + "PREGEN_JSON": "pregen_json", + "TIME_UNIT": "time_unit", + "WRITE_RATE": 10, + "READ_RATE": 9, + "DELETE_RATE": 11, + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params", [LoadScenario.S3], indirect=True) + def test_argument_parsing_for_s3_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '11'", + "--preload_obj '13'", + "--no-verify-ssl", + "--out 'pregen_json'", + "--workers '7'", + "--buckets '13'", + "--location 's3_location'", + ] + expected_env_vars = { + "DURATION": 9, + "WRITE_OBJ_SIZE": 11, + "REGISTRY_FILE": "registry_file", + "K6_MIN_ITERATION_DURATION": "min_iteration_duration", + "K6_SETUP_TIMEOUT": "setup_timeout", + "WRITERS": 7, + "READERS": 7, + "DELETERS": 8, + "NO_VERIFY_SSL": True, + "PREGEN_JSON": "pregen_json", + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params", [LoadScenario.S3_CAR], indirect=True) + def test_argument_parsing_for_s3_car_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '11'", + "--preload_obj '13'", + "--no-verify-ssl", + "--out 'pregen_json'", + "--workers '7'", + "--buckets '13'", + "--location 's3_location'", + ] + expected_env_vars = { + "DURATION": 9, + "WRITE_OBJ_SIZE": 11, + "REGISTRY_FILE": "registry_file", + "K6_MIN_ITERATION_DURATION": "min_iteration_duration", + "K6_SETUP_TIMEOUT": "setup_timeout", + "NO_VERIFY_SSL": True, + "MAX_WRITERS": 11, + "MAX_READERS": 11, + "MAX_DELETERS": 12, + "PRE_ALLOC_DELETERS": 21, + "PRE_ALLOC_READERS": 20, + "PRE_ALLOC_WRITERS": 20, + "PREGEN_JSON": "pregen_json", + "TIME_UNIT": "time_unit", + "WRITE_RATE": 10, + "READ_RATE": 9, + "DELETE_RATE": 11, + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params", [LoadScenario.HTTP], indirect=True) + def test_argument_parsing_for_http_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--no-verify-ssl", + "--size '11'", + "--preload_obj '13'", + "--out 'pregen_json'", + "--workers '7'", + "--containers '16'", + "--policy 'container_placement_policy'", + ] + expected_env_vars = { + "DURATION": 9, + "WRITE_OBJ_SIZE": 11, + "NO_VERIFY_SSL": True, + "REGISTRY_FILE": "registry_file", + "K6_MIN_ITERATION_DURATION": "min_iteration_duration", + "K6_SETUP_TIMEOUT": "setup_timeout", + "WRITERS": 7, + "READERS": 7, + "DELETERS": 8, + "PREGEN_JSON": "pregen_json", + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params", [LoadScenario.LOCAL], indirect=True) + def test_argument_parsing_for_local_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '11'", + "--preload_obj '13'", + "--out 'pregen_json'", + "--workers '7'", + "--containers '16'", + "--policy 'container_placement_policy'", + ] + expected_env_vars = { + "CONFIG_FILE": "config_file", + "DURATION": 9, + "WRITE_OBJ_SIZE": 11, + "REGISTRY_FILE": "registry_file", + "K6_MIN_ITERATION_DURATION": "min_iteration_duration", + "K6_SETUP_TIMEOUT": "setup_timeout", + "WRITERS": 7, + "READERS": 7, + "DELETERS": 8, + "PREGEN_JSON": "pregen_json", + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize( + "load_params, load_type", [(LoadScenario.VERIFY, LoadType.S3)], indirect=True + ) + def test_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams): + expected_env_vars = { + "CLIENTS": 14, + "REGISTRY_FILE": "registry_file", + "K6_SETUP_TIMEOUT": "setup_timeout", + "NO_VERIFY_SSL": True, + "TIME_LIMIT": 11, + } + + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize( + "load_params, load_type", [(LoadScenario.VERIFY, LoadType.gRPC)], indirect=True + ) + def test_argument_parsing_for_grpc_verify_scenario(self, load_params: LoadParams): + expected_env_vars = { + "CLIENTS": 14, + "REGISTRY_FILE": "registry_file", + "K6_SETUP_TIMEOUT": "setup_timeout", + "NO_VERIFY_SSL": True, + "TIME_LIMIT": 11, + } + + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.gRPC, True)], indirect=True) + def test_empty_argument_parsing_for_grpc_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '0'", + "--preload_obj '0'", + "--out ''", + "--workers '0'", + "--containers '0'", + "--policy ''", + ] + expected_env_vars = { + "DURATION": 0, + "WRITE_OBJ_SIZE": 0, + "REGISTRY_FILE": "", + "K6_MIN_ITERATION_DURATION": "", + "K6_SETUP_TIMEOUT": "", + "WRITERS": 0, + "READERS": 0, + "DELETERS": 0, + "PREGEN_JSON": "", + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize( + "load_params, set_empty", [(LoadScenario.gRPC_CAR, True)], indirect=True + ) + def test_empty_argument_parsing_for_grpc_car_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '0'", + "--preload_obj '0'", + "--out ''", + "--workers '0'", + "--containers '0'", + "--policy ''", + ] + expected_env_vars = { + "DURATION": 0, + "WRITE_OBJ_SIZE": 0, + "REGISTRY_FILE": "", + "K6_MIN_ITERATION_DURATION": "", + "K6_SETUP_TIMEOUT": "", + "MAX_WRITERS": 0, + "MAX_READERS": 0, + "MAX_DELETERS": 0, + "PRE_ALLOC_DELETERS": 0, + "PRE_ALLOC_READERS": 0, + "PRE_ALLOC_WRITERS": 0, + "PREGEN_JSON": "", + "TIME_UNIT": "", + "WRITE_RATE": 0, + "READ_RATE": 0, + "DELETE_RATE": 0, + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.S3, True)], indirect=True) + def test_empty_argument_parsing_for_s3_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '0'", + "--preload_obj '0'", + "--out ''", + "--workers '0'", + "--buckets '0'", + "--location ''", + ] + expected_env_vars = { + "DURATION": 0, + "WRITE_OBJ_SIZE": 0, + "REGISTRY_FILE": "", + "K6_MIN_ITERATION_DURATION": "", + "K6_SETUP_TIMEOUT": "", + "WRITERS": 0, + "READERS": 0, + "DELETERS": 0, + "NO_VERIFY_SSL": False, + "PREGEN_JSON": "", + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.S3_CAR, True)], indirect=True) + def test_empty_argument_parsing_for_s3_car_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '0'", + "--preload_obj '0'", + "--out ''", + "--workers '0'", + "--buckets '0'", + "--location ''", + ] + expected_env_vars = { + "DURATION": 0, + "WRITE_OBJ_SIZE": 0, + "REGISTRY_FILE": "", + "K6_MIN_ITERATION_DURATION": "", + "K6_SETUP_TIMEOUT": "", + "NO_VERIFY_SSL": False, + "MAX_WRITERS": 0, + "MAX_READERS": 0, + "MAX_DELETERS": 0, + "PRE_ALLOC_DELETERS": 0, + "PRE_ALLOC_READERS": 0, + "PRE_ALLOC_WRITERS": 0, + "PREGEN_JSON": "", + "TIME_UNIT": "", + "WRITE_RATE": 0, + "READ_RATE": 0, + "DELETE_RATE": 0, + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.HTTP, True)], indirect=True) + def test_empty_argument_parsing_for_http_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '0'", + "--preload_obj '0'", + "--out ''", + "--workers '0'", + "--containers '0'", + "--policy ''", + ] + expected_env_vars = { + "DURATION": 0, + "WRITE_OBJ_SIZE": 0, + "NO_VERIFY_SSL": False, + "REGISTRY_FILE": "", + "K6_MIN_ITERATION_DURATION": "", + "K6_SETUP_TIMEOUT": "", + "WRITERS": 0, + "READERS": 0, + "DELETERS": 0, + "PREGEN_JSON": "", + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.LOCAL, True)], indirect=True) + def test_empty_argument_parsing_for_local_scenario(self, load_params: LoadParams): + expected_preset_args = [ + "--size '0'", + "--preload_obj '0'", + "--out ''", + "--workers '0'", + "--containers '0'", + "--policy ''", + ] + expected_env_vars = { + "CONFIG_FILE": "", + "DURATION": 0, + "WRITE_OBJ_SIZE": 0, + "REGISTRY_FILE": "", + "K6_MIN_ITERATION_DURATION": "", + "K6_SETUP_TIMEOUT": "", + "WRITERS": 0, + "READERS": 0, + "DELETERS": 0, + "PREGEN_JSON": "", + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize( + "load_params, load_type, set_empty", + [(LoadScenario.VERIFY, LoadType.S3, True)], + indirect=True, + ) + def test_empty_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams): + expected_env_vars = { + "CLIENTS": 0, + "REGISTRY_FILE": "", + "K6_SETUP_TIMEOUT": "", + "NO_VERIFY_SSL": False, + "TIME_LIMIT": 0, + } + + self._check_env_vars(load_params, expected_env_vars) + + @pytest.mark.parametrize( + "load_params, load_type, set_empty", + [(LoadScenario.VERIFY, LoadType.gRPC, True)], + indirect=True, + ) + def test_argument_parsing_for_grpc_verify_scenario(self, load_params: LoadParams): + expected_env_vars = { + "CLIENTS": 0, + "REGISTRY_FILE": "", + "K6_SETUP_TIMEOUT": "", + "NO_VERIFY_SSL": False, + "TIME_LIMIT": 0, + } + + self._check_env_vars(load_params, expected_env_vars) + + def _check_preset_params(self, load_params: LoadParams, expected_preset_args: list[str]): + preset_parameters = load_params.get_preset_arguments() + assert sorted(preset_parameters) == sorted(expected_preset_args) + + def _check_env_vars(self, load_params: LoadParams, expected_env_vars: dict[str, str]): + env_vars = load_params.get_env_vars() + assert env_vars == expected_env_vars + + def _check_all_values_none(self, dataclass, skip_fields=None): + if skip_fields is None: + skip_fields = [] + + dataclass_fields = [field for field in fields(dataclass) if field.name not in skip_fields] + for field in dataclass_fields: + value = getattr(dataclass, field.name) + assert value is None, f"{field.name} is not None" + + def _check_all_values_not_none(self, dataclass, skip_fields=None): + if skip_fields is None: + skip_fields = [] + + dataclass_fields = [field for field in fields(dataclass) if field.name not in skip_fields] + for field in dataclass_fields: + value = getattr(dataclass, field.name) + assert value is not None, f"{field.name} is not None" + + def _get_filled_load_params( + self, load_type: LoadType, load_scenario: LoadScenario, set_emtpy: bool = False + ) -> LoadParams: + load_type_map = { + LoadScenario.S3: LoadType.S3, + LoadScenario.S3_CAR: LoadType.S3, + LoadScenario.gRPC: LoadType.gRPC, + LoadScenario.gRPC_CAR: LoadType.gRPC, + LoadScenario.LOCAL: LoadType.gRPC, + LoadScenario.HTTP: LoadType.HTTP, + } + load_type = load_type_map[load_scenario] if not load_type else load_type + + load_params = LoadParams(load_type) + load_params.scenario = load_scenario + load_params.preset = Preset() + + meta_fields = self._get_meta_fields(load_params) + for field in meta_fields: + if ( + getattr(field.instance, field.field.name) is None + and load_params.scenario in field.field.metadata["applicable_scenarios"] + ): + value_to_set_map = { + int: 0 if set_emtpy else len(field.field.name), + str: "" if set_emtpy else field.field.name, + bool: False if set_emtpy else True, + } + value_to_set = value_to_set_map[field.field_type] + setattr(field.instance, field.field.name, value_to_set) + + return load_params + + def _get_actual_field_type(self, field: Field) -> type: + return get_args(field.type)[0] if len(get_args(field.type)) else get_args(field.type) + + def _get_meta_fields(self, instance): + data_fields = fields(instance) + fields_with_data = [ + MetaTestField(field, self._get_actual_field_type(field), instance) + for field in data_fields + if field.metadata + ] + + for field in data_fields: + actual_field_type = self._get_actual_field_type(field) + if is_dataclass(actual_field_type) and getattr(instance, field.name): + fields_with_data += self._get_meta_fields(getattr(instance, field.name)) + + return fields_with_data or [] From 2240be09d2091a45316fbe4f9f9325f88315db76 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 24 Jul 2023 19:34:21 +0300 Subject: [PATCH 117/363] Add repr and str for most classes used in parametrize Signed-off-by: Andrey Berezin --- .gitignore | 1 + Makefile | 2 +- pyproject.toml | 3 +- src/frostfs_testlib/load/load_config.py | 94 +++++++++++++------ src/frostfs_testlib/load/load_report.py | 18 +--- src/frostfs_testlib/s3/aws_cli_client.py | 2 + src/frostfs_testlib/s3/boto3_client.py | 2 + src/frostfs_testlib/s3/interfaces.py | 6 +- .../storage/dataclasses/node_base.py | 3 +- .../storage/dataclasses/object_size.py | 13 +++ src/frostfs_testlib/testing/readable.py | 27 ++++++ src/frostfs_testlib/utils/converting_utils.py | 13 +++ tests/test_dataclasses.py | 37 ++++++++ tests/test_load_config.py | 14 +++ 14 files changed, 187 insertions(+), 48 deletions(-) create mode 100644 src/frostfs_testlib/storage/dataclasses/object_size.py create mode 100644 src/frostfs_testlib/testing/readable.py create mode 100644 tests/test_dataclasses.py diff --git a/.gitignore b/.gitignore index e2967ea..4691fe4 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,4 @@ venv.* /dist /build *.egg-info +wallet_config.yml \ No newline at end of file diff --git a/Makefile b/Makefile index 365e2fc..644eab0 100644 --- a/Makefile +++ b/Makefile @@ -19,7 +19,7 @@ paths: @echo Virtual environment: ${current_dir}/${VENV_DIR} @rm -rf ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth @touch ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth - @echo ${current_dir}/src/frostfs_testlib | tee ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth + @echo ${current_dir}/src | tee ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth create: ${VENV_DIR} diff --git a/pyproject.toml b/pyproject.toml index 8fca533..f85b883 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -68,4 +68,5 @@ push = false [tool.pytest.ini_options] filterwarnings = [ "ignore:Blowfish has been deprecated:cryptography.utils.CryptographyDeprecationWarning", -] \ No newline at end of file +] +testpaths = ["tests"] \ No newline at end of file diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 9a7e49c..4e0b71f 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -4,6 +4,8 @@ from enum import Enum from types import MappingProxyType from typing import Any, Optional, get_args +from frostfs_testlib.utils.converting_utils import calc_unit + class LoadType(Enum): gRPC = "grpc" @@ -45,6 +47,7 @@ s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR] @dataclass class MetaField: + name: str metadata: MappingProxyType value: Any @@ -53,6 +56,7 @@ def metadata_field( applicable_scenarios: list[LoadScenario], preset_param: Optional[str] = None, scenario_variable: Optional[str] = None, + string_repr: Optional[bool] = True, distributed: Optional[bool] = False, ): return field( @@ -61,6 +65,7 @@ def metadata_field( "applicable_scenarios": applicable_scenarios, "preset_argument": preset_param, "env_variable": scenario_variable, + "string_repr": string_repr, "distributed": distributed, }, ) @@ -100,25 +105,27 @@ class K6ProcessAllocationStrategy(Enum): class Preset: # ------ COMMON ------ # Amount of objects which should be created - objects_count: Optional[int] = metadata_field(all_load_scenarios, "preload_obj", None) + objects_count: Optional[int] = metadata_field(all_load_scenarios, "preload_obj", None, False) # Preset json. Filled automatically. - pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON") + pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False) # Workers count for preset - workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None) + workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False) # ------ GRPC ------ # Amount of containers which should be created - containers_count: Optional[int] = metadata_field(grpc_preset_scenarios, "containers", None) + containers_count: Optional[int] = metadata_field( + grpc_preset_scenarios, "containers", None, False + ) # Container placement policy for containers for gRPC container_placement_policy: Optional[str] = metadata_field( - grpc_preset_scenarios, "policy", None + grpc_preset_scenarios, "policy", None, False ) # ------ S3 ------ # Amount of buckets which should be created - buckets_count: Optional[int] = metadata_field(s3_preset_scenarios, "buckets", None) + buckets_count: Optional[int] = metadata_field(s3_preset_scenarios, "buckets", None, False) # S3 region (AKA placement policy for S3 buckets) - s3_location: Optional[str] = metadata_field(s3_preset_scenarios, "location", None) + s3_location: Optional[str] = metadata_field(s3_preset_scenarios, "location", None, False) @dataclass @@ -155,88 +162,93 @@ class LoadParams: [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.VERIFY, LoadScenario.HTTP], "no-verify-ssl", "NO_VERIFY_SSL", + False, ) # ------- COMMON SCENARIO PARAMS ------- # Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value. - load_time: Optional[int] = metadata_field(all_load_scenarios, None, "DURATION") + load_time: Optional[int] = metadata_field(all_load_scenarios, None, "DURATION", False) # Object size in KB for load and preset. - object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE") + object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE", False) # Output registry K6 file. Filled automatically. - registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE") + registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE", False) # Specifies the minimum duration of every single execution (i.e. iteration). # Any iterations that are shorter than this value will cause that VU to # sleep for the remainder of the time until the specified minimum duration is reached. min_iteration_duration: Optional[str] = metadata_field( - all_load_scenarios, None, "K6_MIN_ITERATION_DURATION" + all_load_scenarios, None, "K6_MIN_ITERATION_DURATION", False ) # Specifies K6 setupTimeout time. Currently hardcoded in xk6 as 5 seconds for all scenarios # https://k6.io/docs/using-k6/k6-options/reference/#setup-timeout - setup_timeout: Optional[str] = metadata_field(all_scenarios, None, "K6_SETUP_TIMEOUT") + setup_timeout: Optional[str] = metadata_field(all_scenarios, None, "K6_SETUP_TIMEOUT", False) # ------- CONSTANT VUS SCENARIO PARAMS ------- # Amount of Writers VU. - writers: Optional[int] = metadata_field(constant_vus_scenarios, None, "WRITERS", True) + writers: Optional[int] = metadata_field(constant_vus_scenarios, None, "WRITERS", True, True) # Amount of Readers VU. - readers: Optional[int] = metadata_field(constant_vus_scenarios, None, "READERS", True) + readers: Optional[int] = metadata_field(constant_vus_scenarios, None, "READERS", True, True) # Amount of Deleters VU. - deleters: Optional[int] = metadata_field(constant_vus_scenarios, None, "DELETERS", True) + deleters: Optional[int] = metadata_field(constant_vus_scenarios, None, "DELETERS", True, True) # ------- CONSTANT ARRIVAL RATE SCENARIO PARAMS ------- # Number of iterations to start during each timeUnit period for write. write_rate: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "WRITE_RATE", True + constant_arrival_rate_scenarios, None, "WRITE_RATE", True, True ) # Number of iterations to start during each timeUnit period for read. read_rate: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "READ_RATE", True + constant_arrival_rate_scenarios, None, "READ_RATE", True, True ) # Number of iterations to start during each timeUnit period for delete. delete_rate: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "DELETE_RATE", True + constant_arrival_rate_scenarios, None, "DELETE_RATE", True, True ) # Amount of preAllocatedVUs for write operations. preallocated_writers: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True + constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True, True ) # Amount of maxVUs for write operations. max_writers: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "MAX_WRITERS", True + constant_arrival_rate_scenarios, None, "MAX_WRITERS", False, True ) # Amount of preAllocatedVUs for read operations. preallocated_readers: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True + constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True, True ) # Amount of maxVUs for read operations. max_readers: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "MAX_READERS", True + constant_arrival_rate_scenarios, None, "MAX_READERS", False, True ) # Amount of preAllocatedVUs for read operations. preallocated_deleters: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True + constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True, True ) # Amount of maxVUs for delete operations. max_deleters: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "MAX_DELETERS", True + constant_arrival_rate_scenarios, None, "MAX_DELETERS", False, True ) # Period of time to apply the rate value. - time_unit: Optional[str] = metadata_field(constant_arrival_rate_scenarios, None, "TIME_UNIT") + time_unit: Optional[str] = metadata_field( + constant_arrival_rate_scenarios, None, "TIME_UNIT", False + ) # ------- VERIFY SCENARIO PARAMS ------- # Maximum verification time for k6 to verify objects. Default is BACKGROUND_LOAD_MAX_VERIFY_TIME (3600). - verify_time: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "TIME_LIMIT") + verify_time: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "TIME_LIMIT", False) # Amount of Verification VU. - verify_clients: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "CLIENTS", True) + verify_clients: Optional[int] = metadata_field( + [LoadScenario.VERIFY], None, "CLIENTS", True, False + ) # ------- LOCAL SCENARIO PARAMS ------- # Config file location (filled automatically) - config_file: Optional[str] = metadata_field([LoadScenario.LOCAL], None, "CONFIG_FILE") + config_file: Optional[str] = metadata_field([LoadScenario.LOCAL], None, "CONFIG_FILE", False) def set_id(self, load_id): self.load_id = load_id @@ -267,6 +279,15 @@ class LoadParams: return command_args + def _get_applicable_fields(self): + applicable_fields = [ + meta_field + for meta_field in self._get_meta_fields(self) + if self.scenario in meta_field.metadata["applicable_scenarios"] and meta_field.value + ] + + return applicable_fields + @staticmethod def _get_preset_argument(meta_field: MetaField) -> str: if isinstance(meta_field.value, bool): @@ -280,7 +301,7 @@ class LoadParams: data_fields = fields(instance) fields_with_data = [ - MetaField(field.metadata, getattr(instance, field.name)) + MetaField(field.name, field.metadata, getattr(instance, field.name)) for field in data_fields if field.metadata and getattr(instance, field.name) is not None ] @@ -293,3 +314,18 @@ class LoadParams: fields_with_data += LoadParams._get_meta_fields(getattr(instance, field.name)) return fields_with_data or [] + + def __str__(self) -> str: + size, unit = calc_unit(self.object_size, 1) + static_params = [f"{self.scenario.value} ({size:.4g} {unit})"] + dynamic_params = [ + f"{meta_field.name}={meta_field.value}" + for meta_field in self._get_applicable_fields() + if meta_field.metadata["string_repr"] + ] + params = ", ".join(static_params + dynamic_params) + + return f"load: {params}" + + def __repr__(self) -> str: + return self.__str__() diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py index fa71069..e1056b7 100644 --- a/src/frostfs_testlib/load/load_report.py +++ b/src/frostfs_testlib/load/load_report.py @@ -1,10 +1,11 @@ from datetime import datetime -from typing import Optional, Tuple +from typing import Optional import yaml from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario from frostfs_testlib.load.load_metrics import get_metrics_object +from frostfs_testlib.utils.converting_utils import calc_unit class LoadReport: @@ -62,17 +63,6 @@ class LoadReport: return html - def _calc_unit(self, value: float, skip_units: int = 0) -> Tuple[float, str]: - units = ["B", "KiB", "MiB", "GiB", "TiB"] - - for unit in units[skip_units:]: - if value < 1024: - return value, unit - - value = value / 1024.0 - - return value, unit - def _seconds_to_formatted_duration(self, seconds: int) -> str: """Converts N number of seconds to formatted output ignoring zeroes. Examples: @@ -122,7 +112,7 @@ class LoadReport: ): throughput_html = "" if throughput > 0: - throughput, unit = self._calc_unit(throughput) + throughput, unit = calc_unit(throughput) throughput_html = self._row("Throughput", f"{throughput:.2f} {unit}/sec") per_node_errors_html = "" @@ -137,7 +127,7 @@ class LoadReport: ): per_node_errors_html += self._row(f"At {node_key}", errors) - object_size, object_size_unit = self._calc_unit(self.load_params.object_size, 1) + object_size, object_size_unit = calc_unit(self.load_params.object_size, 1) duration = self._seconds_to_formatted_duration(self.load_params.load_time) model = self._get_model_string() # write 8KB 15h49m 50op/sec 50th open model/closed model/min_iteration duration=1s - 1.636MB/s 199.57451/s diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index a9aeb37..2e61679 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -24,6 +24,8 @@ LONG_TIMEOUT = 240 class AwsCliClient(S3ClientWrapper): + __repr_name__: str = "AWS CLI" + # Flags that we use for all S3 commands: disable SSL verification (as we use self-signed # certificate in devenv) and disable automatic pagination in CLI output common_flags = "--no-verify-ssl --no-paginate" diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index 6d6fc74..2251efe 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -44,6 +44,8 @@ def report_error(func): class Boto3ClientWrapper(S3ClientWrapper): + __repr_name__: str = "Boto3 client" + @reporter.step_deco("Configure S3 client (boto3)") @report_error def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None: diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index 3f31395..166abff 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -1,8 +1,10 @@ -from abc import ABC, abstractmethod +from abc import abstractmethod from datetime import datetime from enum import Enum from typing import Literal, Optional, Union +from frostfs_testlib.testing.readable import HumanReadableABC + def _make_objs_dict(key_names): objs_list = [] @@ -29,7 +31,7 @@ ACL_COPY = [ ] -class S3ClientWrapper(ABC): +class S3ClientWrapper(HumanReadableABC): @abstractmethod def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None: pass diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 150b963..9748bc2 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -7,11 +7,12 @@ import yaml from frostfs_testlib.hosting.config import ServiceConfig from frostfs_testlib.hosting.interfaces import Host from frostfs_testlib.storage.constants import ConfigAttributes +from frostfs_testlib.testing.readable import HumanReadableABC from frostfs_testlib.utils import wallet_utils @dataclass -class NodeBase(ABC): +class NodeBase(HumanReadableABC): """ Represents a node of some underlying service """ diff --git a/src/frostfs_testlib/storage/dataclasses/object_size.py b/src/frostfs_testlib/storage/dataclasses/object_size.py new file mode 100644 index 0000000..520bdc3 --- /dev/null +++ b/src/frostfs_testlib/storage/dataclasses/object_size.py @@ -0,0 +1,13 @@ +from dataclasses import dataclass + + +@dataclass +class ObjectSize: + name: str + value: int + + def __str__(self) -> str: + return f"{self.name} object size" + + def __repr__(self) -> str: + return self.__str__() diff --git a/src/frostfs_testlib/testing/readable.py b/src/frostfs_testlib/testing/readable.py new file mode 100644 index 0000000..66384b7 --- /dev/null +++ b/src/frostfs_testlib/testing/readable.py @@ -0,0 +1,27 @@ +from abc import ABCMeta + + +class HumanReadableABCMeta(ABCMeta): + def __str__(cls): + if "__repr_name__" in cls.__dict__: + return cls.__dict__["__repr_name__"] + return cls.__name__ + + def __repr__(cls): + if "__repr_name__" in cls.__dict__: + return cls.__dict__["__repr_name__"] + return cls.__name__ + + +class HumanReadableABC(metaclass=HumanReadableABCMeta): + @classmethod + def __str__(cls): + if "__repr_name__" in cls.__dict__: + return cls.__dict__["__repr_name__"] + return type(cls).__name__ + + @classmethod + def __repr__(cls): + if "__repr_name__" in cls.__dict__: + return cls.__dict__["__repr_name__"] + return type(cls).__name__ diff --git a/src/frostfs_testlib/utils/converting_utils.py b/src/frostfs_testlib/utils/converting_utils.py index 24b77ae..273d9b4 100644 --- a/src/frostfs_testlib/utils/converting_utils.py +++ b/src/frostfs_testlib/utils/converting_utils.py @@ -1,10 +1,23 @@ import base64 import binascii import json +from typing import Tuple import base58 +def calc_unit(value: float, skip_units: int = 0) -> Tuple[float, str]: + units = ["B", "KiB", "MiB", "GiB", "TiB"] + + for unit in units[skip_units:]: + if value < 1024: + return value, unit + + value = value / 1024.0 + + return value, unit + + def str_to_ascii_hex(input: str) -> str: b = binascii.hexlify(input.encode()) return str(b)[2:-1] diff --git a/tests/test_dataclasses.py b/tests/test_dataclasses.py new file mode 100644 index 0000000..11cda7a --- /dev/null +++ b/tests/test_dataclasses.py @@ -0,0 +1,37 @@ +from typing import Any + +import pytest + +from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper +from frostfs_testlib.storage.dataclasses.frostfs_services import ( + HTTPGate, + InnerRing, + MorphChain, + S3Gate, + StorageNode, +) +from frostfs_testlib.storage.dataclasses.object_size import ObjectSize + + +class TestDataclassesStr: + """Here we are testing important classes string representation.""" + + @pytest.mark.parametrize( + "obj, expected", + [ + (Boto3ClientWrapper, "Boto3 client"), + (AwsCliClient, "AWS CLI"), + (ObjectSize("simple", 1), "simple object size"), + (ObjectSize("simple", 10), "simple object size"), + (ObjectSize("complex", 5000), "complex object size"), + (ObjectSize("complex", 5555), "complex object size"), + (StorageNode, "StorageNode"), + (MorphChain, "MorphChain"), + (S3Gate, "S3Gate"), + (HTTPGate, "HTTPGate"), + (InnerRing, "InnerRing"), + ], + ) + def test_classes_string_representation(self, obj: Any, expected: str): + assert f"{obj}" == expected + assert repr(obj) == expected diff --git a/tests/test_load_config.py b/tests/test_load_config.py index a9b6de1..89a10ea 100644 --- a/tests/test_load_config.py +++ b/tests/test_load_config.py @@ -46,6 +46,20 @@ class TestLoadConfig: preset = Preset() self._check_all_values_none(preset) + @pytest.mark.parametrize("load_params", [LoadScenario.S3_CAR], indirect=True) + def test_string_representation_s3_car(self, load_params: LoadParams): + load_params.object_size = 524288 + expected = "load: s3_car (512 MiB), write_rate=10, read_rate=9, delete_rate=11, preallocated_writers=20, preallocated_readers=20, preallocated_deleters=21" + assert f"{load_params}" == expected + assert repr(load_params) == expected + + @pytest.mark.parametrize("load_params", [LoadScenario.gRPC], indirect=True) + def test_string_representation_grpc(self, load_params: LoadParams): + load_params.object_size = 512 + expected = "load: grpc (512 KiB), writers=7, readers=7, deleters=8" + assert f"{load_params}" == expected + assert repr(load_params) == expected + def test_load_set_id_changes_fields(self): load_params = LoadParams(load_type=LoadType.S3) load_params.preset = Preset() From b856e820080fba80ce7abf6fb46798b68b66145a Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Mon, 24 Jul 2023 10:21:11 +0300 Subject: [PATCH 118/363] Added http hostname as a header to all http calls --- src/frostfs_testlib/steps/http/http_gate.py | 39 ++++++++++++------- src/frostfs_testlib/storage/cluster.py | 4 ++ src/frostfs_testlib/storage/constants.py | 2 + .../storage/dataclasses/frostfs_services.py | 6 +++ 4 files changed, 38 insertions(+), 13 deletions(-) diff --git a/src/frostfs_testlib/steps/http/http_gate.py b/src/frostfs_testlib/steps/http/http_gate.py index efc5258..8080689 100644 --- a/src/frostfs_testlib/steps/http/http_gate.py +++ b/src/frostfs_testlib/steps/http/http_gate.py @@ -32,6 +32,7 @@ def get_via_http_gate( cid: str, oid: str, endpoint: str, + http_hostname: str, request_path: Optional[str] = None, timeout: Optional[int] = 300, ): @@ -40,6 +41,7 @@ def get_via_http_gate( cid: container id to get object from oid: object ID endpoint: http gate endpoint + http_hostname: http host name on the node request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}] """ @@ -49,13 +51,14 @@ def get_via_http_gate( else: request = f"{endpoint}{request_path}" - resp = requests.get(request, stream=True, timeout=timeout, verify=False) + resp = requests.get(request, headers={"Host": http_hostname}, stream=True, timeout=timeout, verify=False) if not resp.ok: raise Exception( f"""Failed to get object via HTTP gate: request: {resp.request.path_url}, response: {resp.text}, + headers: {resp.headers}, status code: {resp.status_code} {resp.reason}""" ) @@ -69,12 +72,13 @@ def get_via_http_gate( @reporter.step_deco("Get via Zip HTTP Gate") -def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str, timeout: Optional[int] = 300): +def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str, http_hostname: str, timeout: Optional[int] = 300): """ This function gets given object from HTTP gate cid: container id to get object from prefix: common prefix endpoint: http gate endpoint + http_hostname: http host name on the node """ request = f"{endpoint}/zip/{cid}/{prefix}" resp = requests.get(request, stream=True, timeout=timeout, verify=False) @@ -84,6 +88,7 @@ def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str, timeout: Optiona f"""Failed to get object via HTTP gate: request: {resp.request.path_url}, response: {resp.text}, + headers: {resp.headers}, status code: {resp.status_code} {resp.reason}""" ) @@ -105,6 +110,7 @@ def get_via_http_gate_by_attribute( cid: str, attribute: dict, endpoint: str, + http_hostname: str, request_path: Optional[str] = None, timeout: Optional[int] = 300, ): @@ -113,6 +119,7 @@ def get_via_http_gate_by_attribute( cid: CID to get object from attribute: attribute {name: attribute} value pair endpoint: http gate endpoint + http_hostname: http host name on the node request_path: (optional) http request path, if ommited - use default [{endpoint}/get_by_attribute/{Key}/{Value}] """ attr_name = list(attribute.keys())[0] @@ -123,13 +130,14 @@ def get_via_http_gate_by_attribute( else: request = f"{endpoint}{request_path}" - resp = requests.get(request, stream=True, timeout=timeout, verify=False) + resp = requests.get(request, stream=True, timeout=timeout, verify=False, headers={"Host": http_hostname}) if not resp.ok: raise Exception( f"""Failed to get object via HTTP gate: request: {resp.request.path_url}, response: {resp.text}, + headers: {resp.headers}, status code: {resp.status_code} {resp.reason}""" ) @@ -142,6 +150,7 @@ def get_via_http_gate_by_attribute( return file_path +# TODO: pass http_hostname as a header @reporter.step_deco("Upload via HTTP Gate") def upload_via_http_gate( cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300 @@ -188,6 +197,7 @@ def is_object_large(filepath: str) -> bool: return False +# TODO: pass http_hostname as a header @reporter.step_deco("Upload via HTTP Gate using Curl") def upload_via_http_gate_curl( cid: str, @@ -236,17 +246,18 @@ def upload_via_http_gate_curl( @reporter.step_deco("Get via HTTP Gate using Curl") -def get_via_http_curl(cid: str, oid: str, endpoint: str) -> str: +def get_via_http_curl(cid: str, oid: str, endpoint: str, http_hostname: str) -> str: """ This function gets given object from HTTP gate using curl utility. cid: CID to get object from oid: object OID endpoint: http gate endpoint + http_hostname: http host name of the node """ request = f"{endpoint}/get/{cid}/{oid}" file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}") - cmd = f"curl -k {request} > {file_path}" + cmd = f"curl -k -H \"Host: {http_hostname}\" {request} > {file_path}" _cmd_run(cmd) return file_path @@ -260,10 +271,10 @@ def _attach_allure_step(request: str, status_code: int, req_type="GET"): @reporter.step_deco("Try to get object and expect error") def try_to_get_object_and_expect_error( - cid: str, oid: str, error_pattern: str, endpoint: str + cid: str, oid: str, error_pattern: str, endpoint: str, http_hostname: str, ) -> None: try: - get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint) + get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname) raise AssertionError(f"Expected error on getting object with cid: {cid}") except Exception as err: match = error_pattern.casefold() in str(err).casefold() @@ -272,11 +283,11 @@ def try_to_get_object_and_expect_error( @reporter.step_deco("Verify object can be get using HTTP header attribute") def get_object_by_attr_and_verify_hashes( - oid: str, file_name: str, cid: str, attrs: dict, endpoint: str + oid: str, file_name: str, cid: str, attrs: dict, endpoint: str, http_hostname: str, ) -> None: - got_file_path_http = get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint) + got_file_path_http = get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname) got_file_path_http_attr = get_via_http_gate_by_attribute( - cid=cid, attribute=attrs, endpoint=endpoint + cid=cid, attribute=attrs, endpoint=endpoint, http_hostname=http_hostname ) assert_hashes_are_equal(file_name, got_file_path_http, got_file_path_http_attr) @@ -289,6 +300,7 @@ def verify_object_hash( shell: Shell, nodes: list[StorageNode], endpoint: str, + http_hostname: str, object_getter=None, ) -> None: @@ -314,7 +326,7 @@ def verify_object_hash( shell=shell, endpoint=random_node.get_rpc_endpoint(), ) - got_file_path_http = object_getter(cid=cid, oid=oid, endpoint=endpoint) + got_file_path_http = object_getter(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname) assert_hashes_are_equal(file_name, got_file_path, got_file_path_http) @@ -352,14 +364,15 @@ def try_to_get_object_via_passed_request_and_expect_error( error_pattern: str, endpoint: str, http_request_path: str, + http_hostname: str, attrs: Optional[dict] = None, ) -> None: try: if attrs is None: - get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, request_path=http_request_path) + get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, request_path=http_request_path, http_hostname=http_hostname) else: get_via_http_gate_by_attribute( - cid=cid, attribute=attrs, endpoint=endpoint, request_path=http_request_path + cid=cid, attribute=attrs, endpoint=endpoint, request_path=http_request_path, http_hostname=http_hostname ) raise AssertionError(f"Expected error on getting object with cid: {cid}") except Exception as err: diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index 91487c9..0e24ebb 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -130,6 +130,8 @@ class Cluster: default_rpc_endpoint: str default_s3_gate_endpoint: str default_http_gate_endpoint: str + default_http_hostname: str + default_s3_hostname: str def __init__(self, hosting: Hosting) -> None: self._hosting = hosting @@ -138,6 +140,8 @@ class Cluster: self.default_rpc_endpoint = self.services(StorageNode)[0].get_rpc_endpoint() self.default_s3_gate_endpoint = self.services(S3Gate)[0].get_endpoint() self.default_http_gate_endpoint = self.services(HTTPGate)[0].get_endpoint() + self.default_http_hostname = self.services(StorageNode)[0].get_http_hostname() + self.default_s3_hostname = self.services(StorageNode)[0].get_s3_hostname() @property def hosts(self) -> list[Host]: diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 95ea3f2..2f9d8a8 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -11,6 +11,8 @@ class ConfigAttributes: ENDPOINT_INTERNAL = "endpoint_internal0" CONTROL_ENDPOINT = "control_endpoint" UN_LOCODE = "un_locode" + HTTP_HOSTNAME = "http_hostname" + S3_HOSTNAME = "s3_hostname" class _FrostfsServicesNames: diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index 23e3335..944837a 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -170,6 +170,12 @@ class StorageNode(NodeBase): def get_data_directory(self) -> str: return self.host.get_data_directory(self.name) + + def get_http_hostname(self) -> str: + return self._get_attribute(ConfigAttributes.HTTP_HOSTNAME) + + def get_s3_hostname(self) -> str: + return self._get_attribute(ConfigAttributes.S3_HOSTNAME) def delete_blobovnicza(self): self.host.delete_blobovnicza(self.name) From 612e0887631d8d4221135d5b0bb97a13baac6bae Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Thu, 27 Jul 2023 10:49:41 +0300 Subject: [PATCH 119/363] Fix string representation for load params with empty fields Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/load_config.py | 10 +++++-- .../controllers/background_load_controller.py | 13 ++++++--- tests/test_load_config.py | 28 +++++++++++++++++-- 3 files changed, 43 insertions(+), 8 deletions(-) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 4e0b71f..ec5d3fd 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -316,8 +316,14 @@ class LoadParams: return fields_with_data or [] def __str__(self) -> str: - size, unit = calc_unit(self.object_size, 1) - static_params = [f"{self.scenario.value} ({size:.4g} {unit})"] + load_type_str = self.scenario.value if self.scenario else self.load_type.value + # TODO: migrate load_params defaults to testlib + if self.object_size is not None: + size, unit = calc_unit(self.object_size, 1) + static_params = [f"{load_type_str} ({size:.4g} {unit})"] + else: + static_params = [f"{load_type_str}"] + dynamic_params = [ f"{meta_field.name}={meta_field.value}" for meta_field in self._get_applicable_fields() diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index 91cb1af..aa17f4e 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -53,10 +53,6 @@ class BackgroundLoadController: if load_params.endpoint_selection_strategy is None: raise RuntimeError("endpoint_selection_strategy should not be None") - self.endpoints = self._get_endpoints( - load_params.load_type, load_params.endpoint_selection_strategy - ) - @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, []) def _get_endpoints( self, load_type: LoadType, endpoint_selection_strategy: EndpointSelectionStrategy @@ -100,6 +96,9 @@ class BackgroundLoadController: @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @reporter.step_deco("Prepare load instances") def prepare(self): + self.endpoints = self._get_endpoints( + self.load_params.load_type, self.load_params.endpoint_selection_strategy + ) self.runner.prepare(self.load_params, self.nodes_under_load, self.k6_dir) self.runner.init_k6_instances(self.load_params, self.endpoints, self.k6_dir) @@ -204,3 +203,9 @@ class BackgroundLoadController: def _get_results(self) -> dict: with reporter.step(f"Get {self.load_params.scenario.value} scenario results"): return self.runner.get_results() + + def __str__(self) -> str: + return self.load_params.__str__() + + def __repr__(self) -> str: + return repr(self.load_params) diff --git a/tests/test_load_config.py b/tests/test_load_config.py index 89a10ea..a84a188 100644 --- a/tests/test_load_config.py +++ b/tests/test_load_config.py @@ -3,7 +3,17 @@ from typing import Any, get_args import pytest -from frostfs_testlib.load.load_config import LoadParams, LoadScenario, LoadType, Preset +from frostfs_testlib.load.load_config import ( + EndpointSelectionStrategy, + LoadParams, + LoadScenario, + LoadType, + Preset, +) +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.controllers.background_load_controller import BackgroundLoadController +from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode +from frostfs_testlib.storage.dataclasses.node_base import NodeBase @dataclass @@ -36,7 +46,10 @@ class TestLoadConfig: return self._get_filled_load_params(load_type, load_scenario, set_empty) def test_load_params_only_load_type_required(self): - LoadParams(load_type=LoadType.S3) + load_params = LoadParams(load_type=LoadType.S3) + expected = "load: s3" + assert repr(load_params) == expected + assert f"{load_params}" == expected def test_load_params_initially_have_all_values_none(self): load_params = LoadParams(load_type=LoadType.S3) @@ -60,6 +73,17 @@ class TestLoadConfig: assert f"{load_params}" == expected assert repr(load_params) == expected + @pytest.mark.parametrize("load_params", [LoadScenario.gRPC], indirect=True) + def test_load_controller_string_representation(self, load_params: LoadParams): + load_params.endpoint_selection_strategy = EndpointSelectionStrategy.ALL + load_params.object_size = 512 + background_load_controller = BackgroundLoadController( + "tmp", load_params, "wallet", None, None + ) + expected = "load: grpc (512 KiB), writers=7, readers=7, deleters=8" + assert f"{background_load_controller}" == expected + assert repr(background_load_controller) == expected + def test_load_set_id_changes_fields(self): load_params = LoadParams(load_type=LoadType.S3) load_params.preset = Preset() From d6e08c477b8fedeef29ebb72e02f03569c0cd531 Mon Sep 17 00:00:00 2001 From: "m.malygina" Date: Wed, 26 Jul 2023 13:35:35 +0300 Subject: [PATCH 120/363] fix divizion by zero, when total operations is zero --- src/frostfs_testlib/load/load_report.py | 14 ++++++++------ src/frostfs_testlib/load/load_verifiers.py | 6 +++--- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py index e1056b7..a2cecf6 100644 --- a/src/frostfs_testlib/load/load_report.py +++ b/src/frostfs_testlib/load/load_report.py @@ -100,7 +100,7 @@ class LoadReport: return model_map[self.load_params.scenario] - def _get_oprations_sub_section_html( + def _get_operations_sub_section_html( self, operation_type: str, total_operations: int, @@ -132,7 +132,9 @@ class LoadReport: model = self._get_model_string() # write 8KB 15h49m 50op/sec 50th open model/closed model/min_iteration duration=1s - 1.636MB/s 199.57451/s short_summary = f"{operation_type} {object_size}{object_size_unit} {duration} {requested_rate_str} {vus_str} {model} - {throughput:.2f}{unit}/s {total_rate:.2f}/s" - + errors_percent = 0 + if total_operations: + errors_percent = total_errors/total_operations*100.0 html = f""" @@ -143,7 +145,7 @@ class LoadReport: {per_node_errors_html} - {self._row("Total", f"{total_errors} ({total_errors/total_operations*100.0:.2f}%)")} + {self._row("Total", f"{total_errors} ({errors_percent:.2f}%)")} {self._row("Threshold", f"{self.load_params.error_threshold:.2f}%")}
{short_summary}
Errors


""" @@ -228,7 +230,7 @@ class LoadReport: delete_errors[node_key] = metrics.delete_failed_iterations if write_section_required: - html += self._get_oprations_sub_section_html( + html += self._get_operations_sub_section_html( "Write", write_operations, requested_write_rate_str, @@ -239,7 +241,7 @@ class LoadReport: ) if read_section_required: - html += self._get_oprations_sub_section_html( + html += self._get_operations_sub_section_html( "Read", read_operations, requested_read_rate_str, @@ -250,7 +252,7 @@ class LoadReport: ) if delete_section_required: - html += self._get_oprations_sub_section_html( + html += self._get_operations_sub_section_html( "Delete", delete_operations, requested_delete_rate_str, diff --git a/src/frostfs_testlib/load/load_verifiers.py b/src/frostfs_testlib/load/load_verifiers.py index f2a3e7e..80c3962 100644 --- a/src/frostfs_testlib/load/load_verifiers.py +++ b/src/frostfs_testlib/load/load_verifiers.py @@ -49,15 +49,15 @@ class LoadVerifier: if deleters and not delete_operations: exceptions.append(f"No any delete operation was performed") - if writers and write_errors / write_operations * 100 > self.load_params.error_threshold: + if write_operations and writers and write_errors / write_operations * 100 > self.load_params.error_threshold: exceptions.append( f"Write error rate is greater than threshold: {write_errors / write_operations * 100} > {self.load_params.error_threshold}" ) - if readers and read_errors / read_operations * 100 > self.load_params.error_threshold: + if read_operations and readers and read_errors / read_operations * 100 > self.load_params.error_threshold: exceptions.append( f"Read error rate is greater than threshold: {read_errors / read_operations * 100} > {self.load_params.error_threshold}" ) - if deleters and delete_errors / delete_operations * 100 > self.load_params.error_threshold: + if delete_operations and deleters and delete_errors / delete_operations * 100 > self.load_params.error_threshold: exceptions.append( f"Delete error rate is greater than threshold: {delete_errors / delete_operations * 100} > {self.load_params.error_threshold}" ) From 716a780a13102d6b87841407a01a977532ad606c Mon Sep 17 00:00:00 2001 From: anikeev-yadro Date: Thu, 27 Jul 2023 14:29:22 +0300 Subject: [PATCH 121/363] Add epoch align after tick Signed-off-by: anikeev-yadro --- src/frostfs_testlib/steps/node_management.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/frostfs_testlib/steps/node_management.py b/src/frostfs_testlib/steps/node_management.py index aec9b8a..4b46b62 100644 --- a/src/frostfs_testlib/steps/node_management.py +++ b/src/frostfs_testlib/steps/node_management.py @@ -16,6 +16,7 @@ from frostfs_testlib.resources.cli import ( from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.shell import Shell from frostfs_testlib.steps.epoch import tick_epoch +from frostfs_testlib.steps.epoch import wait_for_epochs_align from frostfs_testlib.storage.cluster import Cluster, StorageNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate from frostfs_testlib.utils import datetime_utils @@ -189,6 +190,7 @@ def exclude_node_from_network_map( time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) tick_epoch(shell, cluster) + wait_for_epochs_align(shell, cluster) snapshot = get_netmap_snapshot(node=alive_node, shell=shell) assert ( From 807235af95f019265493056b0009207966eee20a Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 31 Jul 2023 14:08:12 +0300 Subject: [PATCH 122/363] Fix multiple services start (copy array for upstream functions) Signed-off-by: Andrey Berezin --- .../controllers/cluster_state_controller.py | 27 +++++-------------- 1 file changed, 7 insertions(+), 20 deletions(-) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index c73a8f4..3a2b509 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -1,13 +1,13 @@ +import copy import time -from concurrent.futures import ThreadPoolExecutor import frostfs_testlib.resources.optionals as optionals from frostfs_testlib.reporter import get_reporter from frostfs_testlib.shell import CommandOptions, Shell -from frostfs_testlib.steps import epoch from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode from frostfs_testlib.storage.controllers.disk_controller import DiskController -from frostfs_testlib.testing.test_control import run_optionally, wait_for_success +from frostfs_testlib.testing import parallel +from frostfs_testlib.testing.test_control import run_optionally from frostfs_testlib.utils.failover_utils import ( wait_all_storage_nodes_returned, wait_for_host_offline, @@ -139,15 +139,8 @@ class ClusterStateController: # In case if we stopped couple services, for example (s01-s04): # After starting only s01, it may require connections to s02-s04, which is still down, and fail to start. # Also, if something goes wrong here, we might skip s02-s04 start at all, and cluster will be left in a bad state. - # So in order to make sure that services are at least attempted to be started, using threads here. - with ThreadPoolExecutor(max_workers=len(self.stopped_storage_nodes)) as executor: - start_result = executor.map(self.start_storage_service, self.stopped_storage_nodes) - - # Looks tricky, but if exception is raised in any thread, it will be "eaten" by ThreadPoolExecutor, - # But will be thrown here. - # Not ideal solution, but okay for now - for _ in start_result: - pass + # So in order to make sure that services are at least attempted to be started, using parallel runs here. + parallel(self.start_storage_service, copy.copy(self.stopped_storage_nodes)) wait_all_storage_nodes_returned(self.shell, self.cluster) self.stopped_storage_nodes = [] @@ -170,14 +163,8 @@ class ClusterStateController: if not self.stopped_s3_gates: return - with ThreadPoolExecutor(max_workers=len(self.stopped_s3_gates)) as executor: - start_result = executor.map(self.start_s3_gate, self.stopped_s3_gates) - - # Looks tricky, but if exception is raised in any thread, it will be "eaten" by ThreadPoolExecutor, - # But will be thrown here. - # Not ideal solution, but okay for now - for _ in start_result: - pass + parallel(self.start_s3_gate, copy.copy(self.stopped_s3_gates)) + self.stopped_s3_gates = [] @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Suspend {process_name} service in {node}") From e4878f4d1e4b1aaa1df2006674d5437c9ace90c2 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 2 Aug 2023 21:38:27 +0300 Subject: [PATCH 123/363] Add readable enums Signed-off-by: Andrey Berezin --- src/frostfs_testlib/s3/interfaces.py | 5 ++--- src/frostfs_testlib/steps/session_token.py | 5 +++-- src/frostfs_testlib/storage/dataclasses/acl.py | 11 ++++++----- src/frostfs_testlib/testing/readable.py | 9 +++++++++ tests/test_dataclasses.py | 2 ++ 5 files changed, 22 insertions(+), 10 deletions(-) diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index 166abff..8d82f71 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -1,9 +1,8 @@ from abc import abstractmethod from datetime import datetime -from enum import Enum from typing import Literal, Optional, Union -from frostfs_testlib.testing.readable import HumanReadableABC +from frostfs_testlib.testing.readable import HumanReadableABC, HumanReadableEnum def _make_objs_dict(key_names): @@ -15,7 +14,7 @@ def _make_objs_dict(key_names): return objs_dict -class VersioningStatus(Enum): +class VersioningStatus(HumanReadableEnum): ENABLED = "Enabled" SUSPENDED = "Suspended" diff --git a/src/frostfs_testlib/steps/session_token.py b/src/frostfs_testlib/steps/session_token.py index 14e25f1..b82d0e2 100644 --- a/src/frostfs_testlib/steps/session_token.py +++ b/src/frostfs_testlib/steps/session_token.py @@ -14,6 +14,7 @@ from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +from frostfs_testlib.testing.readable import HumanReadableEnum from frostfs_testlib.utils import json_utils, wallet_utils reporter = get_reporter() @@ -26,7 +27,7 @@ WRONG_VERB = "wrong verb of the session" INVALID_SIGNATURE = "invalid signature of the session data" -class ObjectVerb(Enum): +class ObjectVerb(HumanReadableEnum): PUT = "PUT" DELETE = "DELETE" GET = "GET" @@ -36,7 +37,7 @@ class ObjectVerb(Enum): SEARCH = "SEARCH" -class ContainerVerb(Enum): +class ContainerVerb(HumanReadableEnum): CREATE = "PUT" DELETE = "DELETE" SETEACL = "SETEACL" diff --git a/src/frostfs_testlib/storage/dataclasses/acl.py b/src/frostfs_testlib/storage/dataclasses/acl.py index cceb4d8..1330618 100644 --- a/src/frostfs_testlib/storage/dataclasses/acl.py +++ b/src/frostfs_testlib/storage/dataclasses/acl.py @@ -3,6 +3,7 @@ from dataclasses import dataclass from enum import Enum from typing import Any, Dict, List, Optional, Union +from frostfs_testlib.testing.readable import HumanReadableEnum from frostfs_testlib.utils import wallet_utils logger = logging.getLogger("NeoLogger") @@ -10,7 +11,7 @@ EACL_LIFETIME = 100500 FROSTFS_CONTRACT_CACHE_TIMEOUT = 30 -class EACLOperation(Enum): +class EACLOperation(HumanReadableEnum): PUT = "put" GET = "get" HEAD = "head" @@ -20,24 +21,24 @@ class EACLOperation(Enum): DELETE = "delete" -class EACLAccess(Enum): +class EACLAccess(HumanReadableEnum): ALLOW = "allow" DENY = "deny" -class EACLRole(Enum): +class EACLRole(HumanReadableEnum): OTHERS = "others" USER = "user" SYSTEM = "system" -class EACLHeaderType(Enum): +class EACLHeaderType(HumanReadableEnum): REQUEST = "req" # Filter request headers OBJECT = "obj" # Filter object headers SERVICE = "SERVICE" # Filter service headers. These are not processed by FrostFS nodes and exist for service use only -class EACLMatchType(Enum): +class EACLMatchType(HumanReadableEnum): STRING_EQUAL = "=" # Return true if strings are equal STRING_NOT_EQUAL = "!=" # Return true if strings are different diff --git a/src/frostfs_testlib/testing/readable.py b/src/frostfs_testlib/testing/readable.py index 66384b7..80f1169 100644 --- a/src/frostfs_testlib/testing/readable.py +++ b/src/frostfs_testlib/testing/readable.py @@ -1,4 +1,13 @@ from abc import ABCMeta +from enum import Enum + + +class HumanReadableEnum(Enum): + def __str__(self): + return self._name_ + + def __repr__(self): + return self._name_ class HumanReadableABCMeta(ABCMeta): diff --git a/tests/test_dataclasses.py b/tests/test_dataclasses.py index 11cda7a..f1cc51e 100644 --- a/tests/test_dataclasses.py +++ b/tests/test_dataclasses.py @@ -3,6 +3,7 @@ from typing import Any import pytest from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper +from frostfs_testlib.storage.dataclasses.acl import EACLRole from frostfs_testlib.storage.dataclasses.frostfs_services import ( HTTPGate, InnerRing, @@ -30,6 +31,7 @@ class TestDataclassesStr: (S3Gate, "S3Gate"), (HTTPGate, "HTTPGate"), (InnerRing, "InnerRing"), + (EACLRole.OTHERS, "OTHERS"), ], ) def test_classes_string_representation(self, obj: Any, expected: str): From d28f3cdc286603e043de882040b61c33935cde77 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 4 Aug 2023 14:19:49 +0300 Subject: [PATCH 124/363] Add UNDEFINED versionins status Signed-off-by: Andrey Berezin --- src/frostfs_testlib/s3/interfaces.py | 1 + src/frostfs_testlib/steps/s3/s3_helper.py | 3 +++ 2 files changed, 4 insertions(+) diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index 8d82f71..2b6be7d 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -15,6 +15,7 @@ def _make_objs_dict(key_names): class VersioningStatus(HumanReadableEnum): + UNDEFINED = None ENABLED = "Enabled" SUSPENDED = "Suspended" diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index ae27124..4b900eb 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -67,6 +67,9 @@ def try_to_get_objects_and_expect_error( @reporter.step_deco("Set versioning status to '{status}' for bucket '{bucket}'") def set_bucket_versioning(s3_client: S3ClientWrapper, bucket: str, status: VersioningStatus): + if status == VersioningStatus.UNDEFINED: + return + s3_client.get_bucket_versioning_status(bucket) s3_client.put_bucket_versioning(bucket, status=status) bucket_status = s3_client.get_bucket_versioning_status(bucket) From 02c079eda3145f49bddc59846fe46624d6628230 Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Fri, 4 Aug 2023 15:32:09 +0300 Subject: [PATCH 125/363] [OBJECT-3949] delete mainchain ready --- src/frostfs_testlib/steps/payment_neogo.py | 85 ++----------------- src/frostfs_testlib/storage/__init__.py | 3 - src/frostfs_testlib/storage/constants.py | 1 - .../storage/dataclasses/frostfs_services.py | 24 ------ 4 files changed, 9 insertions(+), 104 deletions(-) diff --git a/src/frostfs_testlib/steps/payment_neogo.py b/src/frostfs_testlib/steps/payment_neogo.py index 6a64a5a..7fe0b4d 100644 --- a/src/frostfs_testlib/steps/payment_neogo.py +++ b/src/frostfs_testlib/steps/payment_neogo.py @@ -13,7 +13,7 @@ from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import NEOGO_EXECUTABLE from frostfs_testlib.resources.common import FROSTFS_CONTRACT, GAS_HASH, MORPH_BLOCK_TIME from frostfs_testlib.shell import Shell -from frostfs_testlib.storage.dataclasses.frostfs_services import MainChain, MorphChain +from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain from frostfs_testlib.utils import converting_utils, datetime_utils, wallet_utils reporter = get_reporter() @@ -21,10 +21,8 @@ logger = logging.getLogger("NeoLogger") EMPTY_PASSWORD = "" TX_PERSIST_TIMEOUT = 15 # seconds -ASSET_POWER_MAINCHAIN = 10**8 ASSET_POWER_SIDECHAIN = 10**12 - def get_nns_contract_hash(morph_chain: MorphChain) -> str: return morph_chain.rpc_client.get_contract_state(1)["hash"] @@ -41,33 +39,7 @@ def get_contract_hash(morph_chain: MorphChain, resolve_name: str, shell: Shell) stack_data = json.loads(out.stdout.replace("\n", ""))["stack"][0]["value"] return bytes.decode(base64.b64decode(stack_data[0]["value"])) - -@reporter.step_deco("Withdraw Mainnet Gas") -def withdraw_mainnet_gas(shell: Shell, main_chain: MainChain, wlt: str, amount: int): - address = wallet_utils.get_last_address_from_wallet(wlt, EMPTY_PASSWORD) - scripthash = neo3_utils.address_to_script_hash(address) - - neogo = NeoGo(shell=shell, neo_go_exec_path=NEOGO_EXECUTABLE) - out = neogo.contract.invokefunction( - wallet=wlt, - address=address, - rpc_endpoint=main_chain.get_endpoint(), - scripthash=FROSTFS_CONTRACT, - method="withdraw", - arguments=f"{scripthash} int:{amount}", - multisig_hash=f"{scripthash}:Global", - wallet_password="", - ) - - m = re.match(r"^Sent invocation transaction (\w{64})$", out.stdout) - if m is None: - raise Exception("Can not get Tx.") - tx = m.group(1) - if not transaction_accepted(main_chain, tx): - raise AssertionError(f"TX {tx} hasn't been processed") - - -def transaction_accepted(main_chain: MainChain, tx_id: str): +def transaction_accepted(morph_chain: MorphChain, tx_id: str): """ This function returns True in case of accepted TX. Args: @@ -79,8 +51,8 @@ def transaction_accepted(main_chain: MainChain, tx_id: str): try: for _ in range(0, TX_PERSIST_TIMEOUT): time.sleep(1) - neogo = NeoGo(shell=main_chain.host.get_shell(), neo_go_exec_path=NEOGO_EXECUTABLE) - resp = neogo.query.tx(tx_hash=tx_id, rpc_endpoint=main_chain.get_endpoint()) + neogo = NeoGo(shell=morph_chain.host.get_shell(), neo_go_exec_path=NEOGO_EXECUTABLE) + resp = neogo.query.tx(tx_hash=tx_id, rpc_endpoint=morph_chain.get_endpoint()) if resp is not None: logger.info(f"TX is accepted in block: {resp}") return True, resp @@ -110,12 +82,11 @@ def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_ logger.error(f"failed to get wallet balance: {out}") raise out - @reporter.step_deco("Transfer Gas") def transfer_gas( shell: Shell, amount: int, - main_chain: MainChain, + morph_chain: MorphChain, wallet_from_path: Optional[str] = None, wallet_from_password: Optional[str] = None, address_from: Optional[str] = None, @@ -138,11 +109,11 @@ def transfer_gas( address_to: The address of the wallet to transfer assets to. amount: Amount of gas to transfer. """ - wallet_from_path = wallet_from_path or main_chain.get_wallet_path() + wallet_from_path = wallet_from_path or morph_chain.get_wallet_path() wallet_from_password = ( wallet_from_password if wallet_from_password is not None - else main_chain.get_wallet_password() + else morph_chain.get_wallet_password() ) address_from = address_from or wallet_utils.get_last_address_from_wallet( wallet_from_path, wallet_from_password @@ -153,7 +124,7 @@ def transfer_gas( neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE) out = neogo.nep17.transfer( - rpc_endpoint=main_chain.get_endpoint(), + rpc_endpoint=morph_chain.get_endpoint(), wallet=wallet_from_path, wallet_password=wallet_from_password, amount=amount, @@ -165,49 +136,11 @@ def transfer_gas( txid = out.stdout.strip().split("\n")[-1] if len(txid) != 64: raise Exception("Got no TXID after run the command") - if not transaction_accepted(main_chain, txid): + if not transaction_accepted(morph_chain, txid): raise AssertionError(f"TX {txid} hasn't been processed") time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) -@reporter.step_deco("FrostFS Deposit") -def deposit_gas( - shell: Shell, - main_chain: MainChain, - amount: int, - wallet_from_path: str, - wallet_from_password: str, -): - """ - Transferring GAS from given wallet to FrostFS contract address. - """ - # get FrostFS contract address - deposit_addr = converting_utils.contract_hash_to_address(FROSTFS_CONTRACT) - logger.info(f"FrostFS contract address: {deposit_addr}") - address_from = wallet_utils.get_last_address_from_wallet( - wallet_path=wallet_from_path, wallet_password=wallet_from_password - ) - transfer_gas( - shell=shell, - main_chain=main_chain, - amount=amount, - wallet_from_path=wallet_from_path, - wallet_from_password=wallet_from_password, - address_to=deposit_addr, - address_from=address_from, - ) - - -@reporter.step_deco("Get Mainnet Balance") -def get_mainnet_balance(main_chain: MainChain, address: str): - resp = main_chain.rpc_client.get_nep17_balances(address=address) - logger.info(f"Got getnep17balances response: {resp}") - for balance in resp["balance"]: - if balance["assethash"] == GAS_HASH: - return float(balance["amount"]) / ASSET_POWER_MAINCHAIN - return float(0) - - @reporter.step_deco("Get Sidechain Balance") def get_sidechain_balance(morph_chain: MorphChain, address: str): resp = morph_chain.rpc_client.get_nep17_balances(address=address) diff --git a/src/frostfs_testlib/storage/__init__.py b/src/frostfs_testlib/storage/__init__.py index 531964c..3562d25 100644 --- a/src/frostfs_testlib/storage/__init__.py +++ b/src/frostfs_testlib/storage/__init__.py @@ -2,7 +2,6 @@ from frostfs_testlib.storage.constants import _FrostfsServicesNames from frostfs_testlib.storage.dataclasses.frostfs_services import ( HTTPGate, InnerRing, - MainChain, MorphChain, S3Gate, StorageNode, @@ -17,8 +16,6 @@ __class_registry.register_service(_FrostfsServicesNames.INNER_RING, InnerRing) __class_registry.register_service(_FrostfsServicesNames.MORPH_CHAIN, MorphChain) __class_registry.register_service(_FrostfsServicesNames.S3_GATE, S3Gate) __class_registry.register_service(_FrostfsServicesNames.HTTP_GATE, HTTPGate) -# # TODO: Remove this since we are no longer have main chain -__class_registry.register_service(_FrostfsServicesNames.MAIN_CHAIN, MainChain) def get_service_registry() -> ServiceRegistry: diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 2f9d8a8..6deedfb 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -21,4 +21,3 @@ class _FrostfsServicesNames: HTTP_GATE = "http-gate" MORPH_CHAIN = "morph-chain" INNER_RING = "ir" - MAIN_CHAIN = "main-chain" diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index 944837a..ccb30d5 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -110,30 +110,6 @@ class MorphChain(NodeBase): def label(self) -> str: return f"{self.name}: {self.get_endpoint()}" - -class MainChain(NodeBase): - """ - Class represents main-chain consensus node in a cluster - - Consensus node is not always the same as physical host: - It can be service running in a container or on physical host (or physical node, if you will): - For testing perspective, it's not relevant how it is actually running, - since frostfs network will still treat it as "node" - """ - - rpc_client: RPCClient - - def construct(self): - self.rpc_client = RPCClient(self.get_endpoint()) - - def get_endpoint(self) -> str: - return self._get_attribute(ConfigAttributes.ENDPOINT_INTERNAL) - - @property - def label(self) -> str: - return f"{self.name}: {self.get_endpoint()}" - - class StorageNode(NodeBase): """ Class represents storage node in a storage cluster From b1c21e0e5b6ae3d1d3c0ad86d611a2673df0c0d6 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 15 Aug 2023 16:48:28 +0300 Subject: [PATCH 126/363] Add Iptables helper Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/cli/frostfs_cli/object.py | 42 +++++++++++ src/frostfs_testlib/hosting/config.py | 1 + src/frostfs_testlib/steps/cli/object.py | 62 ++++++++++++++- src/frostfs_testlib/steps/iptables.py | 42 +++++++++++ .../controllers/cluster_state_controller.py | 75 ++++++++++++++++++- .../dataclasses/storage_object_info.py | 18 +++++ src/frostfs_testlib/utils/cli_utils.py | 51 ++++++++++++- 7 files changed, 288 insertions(+), 3 deletions(-) create mode 100644 src/frostfs_testlib/steps/iptables.py diff --git a/src/frostfs_testlib/cli/frostfs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py index 8915914..476af68 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/object.py +++ b/src/frostfs_testlib/cli/frostfs_cli/object.py @@ -351,3 +351,45 @@ class FrostfsCliObject(CliCommand): "object search", **{param: value for param, value in locals().items() if param not in ["self"]}, ) + + def nodes( + self, + rpc_endpoint: str, + wallet: str, + cid: str, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional = None, + oid: Optional[str] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """ + Search object nodes. + + Args: + address: Address of wallet account. + bearer: File with signed JSON or binary encoded bearer token. + cid: Container ID. + generate_key: Generate new private key. + oid: Object ID. + trace: Generate trace ID and print it. + root: Search for user objects. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + verify_presence_all: Verify the actual presence of the object on all netmap nodes. + ttl: TTL value in request meta header (default 2). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Dict with request X-Headers. + timeout: Timeout for the operation (default 15s). + + Returns: + Command's result. + """ + return self._execute( + "object nodes", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/frostfs_testlib/hosting/config.py b/src/frostfs_testlib/hosting/config.py index dd8b4b9..6679470 100644 --- a/src/frostfs_testlib/hosting/config.py +++ b/src/frostfs_testlib/hosting/config.py @@ -64,6 +64,7 @@ class HostConfig: services: list[ServiceConfig] = field(default_factory=list) clis: list[CLIConfig] = field(default_factory=list) attributes: dict[str, str] = field(default_factory=dict) + interfaces: dict[str, str] = field(default_factory=dict) def __post_init__(self) -> None: self.services = [ServiceConfig(**service) for service in self.services or []] diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index 9a63604..9c7c694 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -11,8 +11,9 @@ from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell -from frostfs_testlib.storage.cluster import Cluster +from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.utils import json_utils +from frostfs_testlib.utils.cli_utils import parse_cmd_table, parse_netmap_output logger = logging.getLogger("NeoLogger") reporter = get_reporter() @@ -731,3 +732,62 @@ def neo_go_query_height(shell: Shell, endpoint: str) -> dict: latest_block[0].replace(":", ""): int(latest_block[1]), validated_state[0].replace(":", ""): int(validated_state[1]), } + + +@reporter.step_deco("Search object nodes") +def get_object_nodes( + cluster: Cluster, + wallet: str, + cid: str, + oid: str, + shell: Shell, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + wallet_config: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> list[ClusterNode]: + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + + result_object_nodes = cli.object.nodes( + rpc_endpoint=endpoint, + wallet=wallet, + cid=cid, + oid=oid, + bearer=bearer, + ttl=1 if is_direct else None, + xhdr=xhdr, + timeout=timeout, + verify_presence_all=verify_presence_all, + ) + + parsing_output = parse_cmd_table(result_object_nodes.stdout, "|") + list_object_nodes = [ + node + for node in parsing_output + if node["should_contain_object"] == "true" and node["actually_contains_object"] == "true" + ] + + netmap_nodes_list = parse_netmap_output( + cli.netmap.snapshot( + rpc_endpoint=endpoint, + wallet=wallet, + ).stdout + ) + netmap_nodes = [ + netmap_node + for object_node in list_object_nodes + for netmap_node in netmap_nodes_list + if object_node["node_id"] == netmap_node.node_id + ] + + result = [ + cluster_node + for netmap_node in netmap_nodes + for cluster_node in cluster.cluster_nodes + if netmap_node.node == cluster_node.host_ip + ] + + return result diff --git a/src/frostfs_testlib/steps/iptables.py b/src/frostfs_testlib/steps/iptables.py new file mode 100644 index 0000000..db0bb22 --- /dev/null +++ b/src/frostfs_testlib/steps/iptables.py @@ -0,0 +1,42 @@ +from frostfs_testlib.shell import Shell +from frostfs_testlib.storage.cluster import ClusterNode + + +class IpTablesHelper: + @staticmethod + def drop_input_traffic_to_port(node: ClusterNode, ports: list[str]) -> None: + shell = node.host.get_shell() + for port in ports: + shell.exec(f"iptables -A INPUT -p tcp --dport {port} -j DROP") + + @staticmethod + def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[str]) -> None: + shell = node.host.get_shell() + for ip in block_ip: + shell.exec(f"iptables -A INPUT -s {ip} -j DROP") + + @staticmethod + def restore_input_traffic_to_port(node: ClusterNode) -> None: + shell = node.host.get_shell() + ports = ( + shell.exec("iptables -L --numeric | grep DROP | awk '{print $7}'") + .stdout.strip() + .split("\n") + ) + if ports[0] == "": + return + for port in ports: + shell.exec(f"iptables -D INPUT -p tcp --dport {port.split(':')[-1]} -j DROP") + + @staticmethod + def restore_input_traffic_to_node(node: ClusterNode) -> None: + shell = node.host.get_shell() + unlock_ip = ( + shell.exec("iptables -L --numeric | grep DROP | awk '{print $4}'") + .stdout.strip() + .split("\n") + ) + if unlock_ip[0] == "": + return + for ip in unlock_ip: + shell.exec(f"iptables -D INPUT -s {ip} -j DROP") diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 3a2b509..2d439d9 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -1,13 +1,16 @@ import copy +import itertools import time import frostfs_testlib.resources.optionals as optionals from frostfs_testlib.reporter import get_reporter from frostfs_testlib.shell import CommandOptions, Shell +from frostfs_testlib.steps import epoch +from frostfs_testlib.steps.iptables import IpTablesHelper from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.testing import parallel -from frostfs_testlib.testing.test_control import run_optionally +from frostfs_testlib.testing.test_control import run_optionally, wait_for_success from frostfs_testlib.utils.failover_utils import ( wait_all_storage_nodes_returned, wait_for_host_offline, @@ -24,6 +27,7 @@ class ClusterStateController: self.detached_disks: dict[str, DiskController] = {} self.stopped_storage_nodes: list[ClusterNode] = [] self.stopped_s3_gates: list[ClusterNode] = [] + self.dropped_traffic: list[ClusterNode] = [] self.cluster = cluster self.shell = shell self.suspended_services: dict[str, list[ClusterNode]] = {} @@ -191,6 +195,62 @@ class ClusterStateController: [node.host.wait_success_resume_process(process_name) for node in list_nodes] self.suspended_services = {} + @reporter.step_deco("Drop traffic to {node}, with ports - {ports}, nodes - {block_nodes}") + def drop_traffic( + self, + mode: str, + node: ClusterNode, + wakeup_timeout: int, + ports: list[str] = None, + block_nodes: list[ClusterNode] = None, + ) -> None: + allowed_modes = ["ports", "nodes"] + assert mode in allowed_modes + + match mode: + case "ports": + IpTablesHelper.drop_input_traffic_to_port(node, ports) + case "nodes": + list_ip = self._parse_intefaces(block_nodes) + IpTablesHelper.drop_input_traffic_to_node(node, list_ip) + time.sleep(wakeup_timeout) + self.dropped_traffic.append(node) + + @reporter.step_deco("Ping traffic") + def ping_traffic( + self, + node: ClusterNode, + nodes_list: list[ClusterNode], + expect_result: int, + ) -> bool: + shell = node.host.get_shell() + options = CommandOptions(check=False) + ips = self._parse_intefaces(nodes_list) + for ip in ips: + code = shell.exec(f"ping {ip} -c 1", options).return_code + if code != expect_result: + return False + return True + + @reporter.step_deco("Start traffic to {node}") + def restore_traffic( + self, + mode: str, + node: ClusterNode, + ) -> None: + allowed_modes = ["ports", "nodes"] + assert mode in allowed_modes + + match mode: + case "ports": + IpTablesHelper.restore_input_traffic_to_port(node=node) + case "nodes": + IpTablesHelper.restore_input_traffic_to_node(node=node) + + @reporter.step_deco("Restore blocked nodes") + def restore_all_traffic(self): + parallel(self._restore_traffic_to_node, self.dropped_traffic) + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Hard reboot host {node} via magic SysRq option") def panic_reboot_host(self, node: ClusterNode, wait_for_return: bool = True): @@ -217,3 +277,16 @@ class ClusterStateController: disk_controller = DiskController(node, device, mountpoint) return disk_controller + + def _restore_traffic_to_node(self, node): + IpTablesHelper.restore_input_traffic_to_port(node) + IpTablesHelper.restore_input_traffic_to_node(node) + + def _parse_intefaces(self, nodes: list[ClusterNode]): + interfaces = [] + for node in nodes: + dict_interfaces = node.host.config.interfaces + for type, ip in dict_interfaces.items(): + if "mgmt" not in type: + interfaces.append(ip) + return interfaces diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index dd46740..7747ea8 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -23,3 +23,21 @@ class StorageObjectInfo(ObjectRef): attributes: Optional[list[dict[str, str]]] = None tombstone: Optional[str] = None locks: Optional[list[LockObjectInfo]] = None + + +@dataclass +class NodeNetmapInfo: + node_id: str + node_status: str + node_data_ip: str + continent: str + country: str + country_code: str + external_address: str + location: str + node: str + price: int + sub_div: str + sub_div_code: int + un_locode: str + role: str diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index d869714..5bd4695 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -5,18 +5,21 @@ """ Helper functions to use with `frostfs-cli`, `neo-go` and other CLIs. """ +import csv import json import logging import subprocess import sys from contextlib import suppress from datetime import datetime +from io import StringIO from textwrap import shorten -from typing import TypedDict, Union +from typing import Dict, List, TypedDict, Union import pexpect from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetmapInfo reporter = get_reporter() logger = logging.getLogger("NeoLogger") @@ -131,3 +134,49 @@ def log_command_execution(cmd: str, output: Union[str, TypedDict]) -> None: command_attachment = f"COMMAND: '{cmd}'\n" f"OUTPUT:\n {output}\n" with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'): reporter.attach(command_attachment, "Command execution") + + +def parse_netmap_output(output: str) -> list[NodeNetmapInfo]: + """ + The cli command will return something like. + + Epoch: 240 + Node 1: 01234 ONLINE /ip4/10.10.10.10/tcp/8080 + Continent: Europe + Country: Russia + CountryCode: RU + ExternalAddr: /ip4/10.10.11.18/tcp/8080 + Location: Moskva + Node: 10.10.10.12 + Price: 5 + SubDiv: Moskva + SubDivCode: MOW + UN-LOCODE: RU MOW + role: alphabet + + The code will parse each line and return each node as dataclass. + """ + netmap_list = output.split("Node ")[1:] + dataclass_list = [] + for node in netmap_list: + node = node.replace("\t", "").split("\n") + node = *node[0].split(" ")[1:-1], *[row.split(": ")[-1] for row in node[1:-1]] + dataclass_list.append(NodeNetmapInfo(*node)) + + return dataclass_list + + +def parse_cmd_table(output: str, delimiter="|") -> list[dict[str, str]]: + parsing_output = [] + reader = csv.reader(StringIO(output.strip()), delimiter=delimiter) + iter_reader = iter(reader) + header_row = next(iter_reader) + for row in iter_reader: + table = {} + for i in range(len(row)): + header = header_row[i].strip().lower().replace(" ", "_") + value = row[i].strip().lower() + if header: + table[header] = value + parsing_output.append(table) + return parsing_output From 7112bf9c88e89ca61d36be885aa3c99d08cfde38 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Thu, 17 Aug 2023 12:54:05 +0300 Subject: [PATCH 127/363] Change NodeNetmapInfo class Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/storage/dataclasses/storage_object_info.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index 7747ea8..ea3c510 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -30,6 +30,7 @@ class NodeNetmapInfo: node_id: str node_status: str node_data_ip: str + cluster_name: str continent: str country: str country_code: str From 70595965068404d3eac3c4a5e9feb58ab06e7e7a Mon Sep 17 00:00:00 2001 From: "m.malygina" Date: Mon, 21 Aug 2023 14:55:27 +0300 Subject: [PATCH 128/363] Support prepare locally flag Signed-off-by: m.malygina --- src/frostfs_testlib/load/load_config.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index ec5d3fd..3a7e0b4 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -178,6 +178,10 @@ class LoadParams: min_iteration_duration: Optional[str] = metadata_field( all_load_scenarios, None, "K6_MIN_ITERATION_DURATION", False ) + # Prepare/cut objects locally on client before sending + prepare_locally: Optional[bool] = metadata_field( + [LoadScenario.gRPC, LoadScenario.gRPC_CAR], None, "PREPARE_LOCALLY", False + ) # Specifies K6 setupTimeout time. Currently hardcoded in xk6 as 5 seconds for all scenarios # https://k6.io/docs/using-k6/k6-options/reference/#setup-timeout setup_timeout: Optional[str] = metadata_field(all_scenarios, None, "K6_SETUP_TIMEOUT", False) From aa277fdd6af9c0468ef3ea198b8b494b7bc0e855 Mon Sep 17 00:00:00 2001 From: anikeev-yadro Date: Tue, 29 Aug 2023 16:55:25 +0300 Subject: [PATCH 129/363] Increase default load time Signed-off-by: anikeev-yadro --- src/frostfs_testlib/resources/load_params.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/resources/load_params.py b/src/frostfs_testlib/resources/load_params.py index 6699207..2ced33d 100644 --- a/src/frostfs_testlib/resources/load_params.py +++ b/src/frostfs_testlib/resources/load_params.py @@ -11,7 +11,7 @@ BACKGROUND_WRITERS_COUNT = os.getenv("BACKGROUND_WRITERS_COUNT", 0) BACKGROUND_READERS_COUNT = os.getenv("BACKGROUND_READERS_COUNT", 0) BACKGROUND_DELETERS_COUNT = os.getenv("BACKGROUND_DELETERS_COUNT", 0) BACKGROUND_VERIFIERS_COUNT = os.getenv("BACKGROUND_VERIFIERS_COUNT", 0) -BACKGROUND_LOAD_DEFAULT_TIME = os.getenv("BACKGROUND_LOAD_DEFAULT_TIME", 600) +BACKGROUND_LOAD_DEFAULT_TIME = os.getenv("BACKGROUND_LOAD_DEFAULT_TIME", 1200) BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE = os.getenv("BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE", 32) BACKGROUND_LOAD_SETUP_TIMEOUT = os.getenv("BACKGROUND_LOAD_SETUP_TIMEOUT", "5s") From 449c18bb1a4be35dc444dfad2af67e279435c7f0 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 30 Aug 2023 15:28:12 +0300 Subject: [PATCH 130/363] Adding options to work with any service type Signed-off-by: Andrey Berezin --- .../controllers/cluster_state_controller.py | 51 +++++++++++++++++-- .../storage/dataclasses/node_base.py | 14 +++-- 2 files changed, 58 insertions(+), 7 deletions(-) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 2d439d9..0148c0d 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -1,16 +1,15 @@ import copy -import itertools import time import frostfs_testlib.resources.optionals as optionals from frostfs_testlib.reporter import get_reporter from frostfs_testlib.shell import CommandOptions, Shell -from frostfs_testlib.steps import epoch from frostfs_testlib.steps.iptables import IpTablesHelper from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode from frostfs_testlib.storage.controllers.disk_controller import DiskController +from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass from frostfs_testlib.testing import parallel -from frostfs_testlib.testing.test_control import run_optionally, wait_for_success +from frostfs_testlib.testing.test_control import run_optionally from frostfs_testlib.utils.failover_utils import ( wait_all_storage_nodes_returned, wait_for_host_offline, @@ -28,6 +27,7 @@ class ClusterStateController: self.stopped_storage_nodes: list[ClusterNode] = [] self.stopped_s3_gates: list[ClusterNode] = [] self.dropped_traffic: list[ClusterNode] = [] + self.stopped_services: set[NodeBase] = set() self.cluster = cluster self.shell = shell self.suspended_services: dict[str, list[ClusterNode]] = {} @@ -128,6 +128,51 @@ class ClusterStateController: node.storage_node.stop_service() self.stopped_storage_nodes.append(node) + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Stop all {service_type} services") + def stop_services_of_type(self, service_type: type[ServiceClass]): + services = self.cluster.services(service_type) + self.stopped_services.update(services) + parallel([service.stop_service for service in services]) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Start all {service_type} services") + def start_services_of_type(self, service_type: type[ServiceClass]): + services = self.cluster.services(service_type) + parallel([service.start_service for service in services]) + + if service_type == StorageNode: + wait_all_storage_nodes_returned(self.shell, self.cluster) + + self.stopped_services = self.stopped_services - set(services) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Start all stopped services") + def start_all_stopped_services(self): + parallel([service.start_service for service in self.stopped_services]) + + for service in self.stopped_services: + if isinstance(service, StorageNode): + wait_all_storage_nodes_returned(self.shell, self.cluster) + break + + self.stopped_services.clear() + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Stop {service_type} service on {node}") + def stop_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass]): + service = node.service(service_type) + service.stop_service() + self.stopped_services.add(service) + + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Start {service_type} service on {node}") + def start_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass]): + service = node.service(service_type) + service.start_service() + if service in self.stopped_services: + self.stopped_services.remove(service) + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start storage service on {node}") def start_storage_service(self, node: ClusterNode): diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 9748bc2..3b1964c 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -1,4 +1,4 @@ -from abc import ABC, abstractmethod +from abc import abstractmethod from dataclasses import dataclass from typing import Optional, Tuple, TypedDict, TypeVar @@ -6,10 +6,13 @@ import yaml from frostfs_testlib.hosting.config import ServiceConfig from frostfs_testlib.hosting.interfaces import Host +from frostfs_testlib.reporter import get_reporter from frostfs_testlib.storage.constants import ConfigAttributes from frostfs_testlib.testing.readable import HumanReadableABC from frostfs_testlib.utils import wallet_utils +reporter = get_reporter() + @dataclass class NodeBase(HumanReadableABC): @@ -54,17 +57,20 @@ class NodeBase(HumanReadableABC): return self._process_name def start_service(self): - self.host.start_service(self.name) + with reporter.step(f"Start {self.name} service on {self.host.config.address}"): + self.host.start_service(self.name) @abstractmethod def service_healthcheck(self) -> bool: """Service healthcheck.""" def stop_service(self): - self.host.stop_service(self.name) + with reporter.step(f"Stop {self.name} service on {self.host.config.address}"): + self.host.stop_service(self.name) def restart_service(self): - self.host.restart_service(self.name) + with reporter.step(f"Restart {self.name} service on {self.host.config.address}"): + self.host.restart_service(self.name) def get_wallet_password(self) -> str: return self._get_attribute(ConfigAttributes.WALLET_PASSWORD) From e14896400f767ecac8fa381ee3fe7f9f34725d68 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 6 Sep 2023 16:51:18 +0300 Subject: [PATCH 131/363] Add post-init for load params Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/load_config.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 3a7e0b4..9023f87 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -271,6 +271,16 @@ class LoadParams: return env_vars + def __post_init__(self): + default_scenario_map = { + LoadType.gRPC: LoadScenario.gRPC, + LoadType.HTTP: LoadScenario.HTTP, + LoadType.S3: LoadScenario.S3, + } + + if self.scenario is None: + self.scenario = default_scenario_map[self.load_type] + def get_preset_arguments(self): command_args = [ self._get_preset_argument(meta_field) @@ -324,7 +334,7 @@ class LoadParams: # TODO: migrate load_params defaults to testlib if self.object_size is not None: size, unit = calc_unit(self.object_size, 1) - static_params = [f"{load_type_str} ({size:.4g} {unit})"] + static_params = [f"{load_type_str} {size:.4g} {unit}"] else: static_params = [f"{load_type_str}"] @@ -335,7 +345,7 @@ class LoadParams: ] params = ", ".join(static_params + dynamic_params) - return f"load: {params}" + return params def __repr__(self) -> str: return self.__str__() From f2d34dbf2e44a37e5595fb6b27da0770c2367178 Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Wed, 6 Sep 2023 20:47:30 +0300 Subject: [PATCH 132/363] add latency report --- src/frostfs_testlib/load/load_metrics.py | 26 +++++++++++++++++++++++- src/frostfs_testlib/load/load_report.py | 23 ++++++++++++++++++++- 2 files changed, 47 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/load/load_metrics.py b/src/frostfs_testlib/load/load_metrics.py index 0b4e28e..6b44de0 100644 --- a/src/frostfs_testlib/load/load_metrics.py +++ b/src/frostfs_testlib/load/load_metrics.py @@ -8,12 +8,15 @@ class MetricsBase(ABC): _WRITE_SUCCESS = "" _WRITE_ERRORS = "" _WRITE_THROUGHPUT = "data_sent" + _WRITE_LATENCY = "" _READ_SUCCESS = "" _READ_ERRORS = "" + _READ_LATENCY = "" _READ_THROUGHPUT = "data_received" _DELETE_SUCCESS = "" + _DELETE_LATENCY = "" _DELETE_ERRORS = "" def __init__(self, summary) -> None: @@ -27,6 +30,10 @@ class MetricsBase(ABC): @property def write_success_iterations(self) -> int: return self._get_metric(self._WRITE_SUCCESS) + + @property + def write_latency(self) -> dict: + return self._get_metric(self._WRITE_LATENCY) @property def write_rate(self) -> float: @@ -47,6 +54,10 @@ class MetricsBase(ABC): @property def read_success_iterations(self) -> int: return self._get_metric(self._READ_SUCCESS) + + @property + def read_latency(self) -> dict: + return self._get_metric(self._READ_LATENCY) @property def read_rate(self) -> int: @@ -67,6 +78,10 @@ class MetricsBase(ABC): @property def delete_success_iterations(self) -> int: return self._get_metric(self._DELETE_SUCCESS) + + @property + def delete_latency(self) -> dict: + return self._get_metric(self._DELETE_LATENCY) @property def delete_failed_iterations(self) -> int: @@ -77,7 +92,7 @@ class MetricsBase(ABC): return self._get_metric_rate(self._DELETE_SUCCESS) def _get_metric(self, metric: str) -> int: - metrics_method_map = {"counter": self._get_counter_metric, "gauge": self._get_gauge_metric} + metrics_method_map = {"counter": self._get_counter_metric, "gauge": self._get_gauge_metric, "trend" : self._get_trend_metrics} if metric not in self.metrics: return 0 @@ -114,28 +129,37 @@ class MetricsBase(ABC): def _get_gauge_metric(self, metric: str) -> int: return metric["values"]["value"] + + def _get_trend_metrics(self, metric: str) -> int: + return metric["values"] class GrpcMetrics(MetricsBase): _WRITE_SUCCESS = "frostfs_obj_put_total" _WRITE_ERRORS = "frostfs_obj_put_fails" + _WRITE_LATENCY = "frostfs_obj_put_duration" _READ_SUCCESS = "frostfs_obj_get_total" _READ_ERRORS = "frostfs_obj_get_fails" + _READ_LATENCY = "frostfs_obj_get_duration" _DELETE_SUCCESS = "frostfs_obj_delete_total" _DELETE_ERRORS = "frostfs_obj_delete_fails" + _DELETE_LATENCY = "frostfs_obj_delete_duration" class S3Metrics(MetricsBase): _WRITE_SUCCESS = "aws_obj_put_total" _WRITE_ERRORS = "aws_obj_put_fails" + _WRITE_LATENCY = "aws_obj_put_duration" _READ_SUCCESS = "aws_obj_get_total" _READ_ERRORS = "aws_obj_get_fails" + _READ_LATENCY = "aws_obj_get_duration" _DELETE_SUCCESS = "aws_obj_delete_total" _DELETE_ERRORS = "aws_obj_delete_fails" + _DELETE_LATENCY = "aws_obj_delete_duration" class LocalMetrics(MetricsBase): diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py index a2cecf6..26ab542 100644 --- a/src/frostfs_testlib/load/load_report.py +++ b/src/frostfs_testlib/load/load_report.py @@ -2,6 +2,7 @@ from datetime import datetime from typing import Optional import yaml +import os from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario from frostfs_testlib.load.load_metrics import get_metrics_object @@ -109,6 +110,7 @@ class LoadReport: total_rate: float, throughput: float, errors: dict[str, int], + latency: dict[str, dict], ): throughput_html = "" if throughput > 0: @@ -127,6 +129,15 @@ class LoadReport: ): per_node_errors_html += self._row(f"At {node_key}", errors) + latency_html = "" + if latency: + for node_key, param_dict in latency.items(): + latency_values = "" + for param_name, param_val in param_dict.items(): + latency_values += f"{param_name}={param_val:.2f}ms " + + latency_html += self._row(f"Put latency {node_key.split(':')[0]}", latency_values) + object_size, object_size_unit = calc_unit(self.load_params.object_size, 1) duration = self._seconds_to_formatted_duration(self.load_params.load_time) model = self._get_model_string() @@ -135,6 +146,7 @@ class LoadReport: errors_percent = 0 if total_operations: errors_percent = total_errors/total_operations*100.0 + html = f""" @@ -142,7 +154,7 @@ class LoadReport: {self._row("Total operations", total_operations)} {self._row("OP/sec", f"{total_rate:.2f}")} {throughput_html} - + {latency_html} {per_node_errors_html} {self._row("Total", f"{total_errors} ({errors_percent:.2f}%)")} @@ -160,6 +172,7 @@ class LoadReport: write_operations = 0 write_op_sec = 0 write_throughput = 0 + write_latency = {} write_errors = {} requested_write_rate = self.load_params.write_rate requested_write_rate_str = ( @@ -169,12 +182,14 @@ class LoadReport: read_operations = 0 read_op_sec = 0 read_throughput = 0 + read_latency = {} read_errors = {} requested_read_rate = self.load_params.read_rate requested_read_rate_str = f"{requested_read_rate}op/sec" if requested_read_rate else "" delete_operations = 0 delete_op_sec = 0 + delete_latency = {} delete_errors = {} requested_delete_rate = self.load_params.delete_rate requested_delete_rate_str = ( @@ -210,6 +225,7 @@ class LoadReport: if write_operations: write_section_required = True write_op_sec += metrics.write_rate + write_latency[node_key] = metrics.write_latency write_throughput += metrics.write_throughput if metrics.write_failed_iterations: write_errors[node_key] = metrics.write_failed_iterations @@ -219,6 +235,7 @@ class LoadReport: read_section_required = True read_op_sec += metrics.read_rate read_throughput += metrics.read_throughput + read_latency[node_key] = metrics.read_latency if metrics.read_failed_iterations: read_errors[node_key] = metrics.read_failed_iterations @@ -226,6 +243,7 @@ class LoadReport: if delete_operations: delete_section_required = True delete_op_sec += metrics.delete_rate + delete_latency[node_key] = metrics.delete_latency if metrics.delete_failed_iterations: delete_errors[node_key] = metrics.delete_failed_iterations @@ -238,6 +256,7 @@ class LoadReport: write_op_sec, write_throughput, write_errors, + write_latency, ) if read_section_required: @@ -249,6 +268,7 @@ class LoadReport: read_op_sec, read_throughput, read_errors, + read_latency, ) if delete_section_required: @@ -260,6 +280,7 @@ class LoadReport: delete_op_sec, 0, delete_errors, + delete_latency, ) return html From 19b8b96898cee72bff2166b5cf5765347e34fd64 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Thu, 7 Sep 2023 14:36:46 +0300 Subject: [PATCH 133/363] Use only name in ObjectSize repr and str Signed-off-by: Andrey Berezin --- src/frostfs_testlib/storage/dataclasses/object_size.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/storage/dataclasses/object_size.py b/src/frostfs_testlib/storage/dataclasses/object_size.py index 520bdc3..0429c78 100644 --- a/src/frostfs_testlib/storage/dataclasses/object_size.py +++ b/src/frostfs_testlib/storage/dataclasses/object_size.py @@ -7,7 +7,7 @@ class ObjectSize: value: int def __str__(self) -> str: - return f"{self.name} object size" + return self.name def __repr__(self) -> str: return self.__str__() From ecf8f0841a1ea01494572fedab41cfcf288c200f Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 11 Sep 2023 10:36:54 +0300 Subject: [PATCH 134/363] Change NodeNetmapInfo class Signed-off-by: Dmitriy Zayakin --- .../dataclasses/storage_object_info.py | 29 +++++++++---------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index ea3c510..21a820f 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -27,18 +27,17 @@ class StorageObjectInfo(ObjectRef): @dataclass class NodeNetmapInfo: - node_id: str - node_status: str - node_data_ip: str - cluster_name: str - continent: str - country: str - country_code: str - external_address: str - location: str - node: str - price: int - sub_div: str - sub_div_code: int - un_locode: str - role: str + node_id: str = None + node_status: str = None + node_data_ip: str = None + cluster_name: str = None + continent: str = None + country: str = None + country_code: str = None + external_address: str = None + location: str = None + node: str = None + sub_div: str = None + sub_div_code: int = None + un_locode: str = None + role: str = None From f7ef8cb8814a76e98145255e20be091c5acf3a69 Mon Sep 17 00:00:00 2001 From: anikeev-yadro Date: Fri, 15 Sep 2023 12:30:58 +0300 Subject: [PATCH 135/363] Another increase default load time Signed-off-by: anikeev-yadro --- src/frostfs_testlib/resources/load_params.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/resources/load_params.py b/src/frostfs_testlib/resources/load_params.py index 2ced33d..bd99859 100644 --- a/src/frostfs_testlib/resources/load_params.py +++ b/src/frostfs_testlib/resources/load_params.py @@ -11,7 +11,7 @@ BACKGROUND_WRITERS_COUNT = os.getenv("BACKGROUND_WRITERS_COUNT", 0) BACKGROUND_READERS_COUNT = os.getenv("BACKGROUND_READERS_COUNT", 0) BACKGROUND_DELETERS_COUNT = os.getenv("BACKGROUND_DELETERS_COUNT", 0) BACKGROUND_VERIFIERS_COUNT = os.getenv("BACKGROUND_VERIFIERS_COUNT", 0) -BACKGROUND_LOAD_DEFAULT_TIME = os.getenv("BACKGROUND_LOAD_DEFAULT_TIME", 1200) +BACKGROUND_LOAD_DEFAULT_TIME = os.getenv("BACKGROUND_LOAD_DEFAULT_TIME", 1800) BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE = os.getenv("BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE", 32) BACKGROUND_LOAD_SETUP_TIMEOUT = os.getenv("BACKGROUND_LOAD_SETUP_TIMEOUT", "5s") From be9b3f585529d47a89762b8c8f8948ce90004408 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 12 Sep 2023 17:40:45 +0300 Subject: [PATCH 136/363] Update argument func init s3 Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/steps/s3/s3_helper.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index 4b900eb..d746337 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -194,6 +194,7 @@ def init_s3_credentials( cluster: Cluster, policy: Optional[dict] = None, s3gates: Optional[list[S3Gate]] = None, + container_placement_policy: Optional[str] = None, ): gate_public_keys = [] bucket = str(uuid.uuid4()) @@ -209,6 +210,7 @@ def init_s3_credentials( wallet_password=wallet.password, container_policy=policy, container_friendly_name=bucket, + container_placement_policy=container_placement_policy, ).stdout aws_access_key_id = str( re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group( From b039ee99401ccd760fdab7cfae3e87cef86825dd Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 18 Sep 2023 17:48:30 +0300 Subject: [PATCH 137/363] Dev Env should not use sudo by default --- src/frostfs_testlib/hosting/docker_host.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 3addd92..e2bc949 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -61,7 +61,7 @@ class ServiceAttributes(ParsedAttributes): class DockerHost(Host): """Manages services hosted in Docker containers running on a local or remote machine.""" - def get_shell(self, sudo: bool = True) -> Shell: + def get_shell(self, sudo: bool = False) -> Shell: host_attributes = HostAttributes.parse(self._config.attributes) command_inspectors = [] if sudo: From fc1f37347783bf326e5ec9653140a613cccb2383 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 19 Sep 2023 11:59:05 +0300 Subject: [PATCH 138/363] Adding interval between ssh connection attempts Signed-off-by: Andrey Berezin --- src/frostfs_testlib/shell/ssh_shell.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/frostfs_testlib/shell/ssh_shell.py b/src/frostfs_testlib/shell/ssh_shell.py index 5771274..435a494 100644 --- a/src/frostfs_testlib/shell/ssh_shell.py +++ b/src/frostfs_testlib/shell/ssh_shell.py @@ -91,8 +91,9 @@ class SSHShell(Shell): # to allow remote command to flush its output buffer DELAY_AFTER_EXIT = 0.2 - SSH_CONNECTION_ATTEMPTS: ClassVar[int] = 3 - CONNECTION_TIMEOUT = 90 + SSH_CONNECTION_ATTEMPTS: ClassVar[int] = 4 + SSH_ATTEMPTS_INTERVAL: ClassVar[int] = 10 + CONNECTION_TIMEOUT = 60 def __init__( self, @@ -251,7 +252,9 @@ class SSHShell(Shell): return (full_stdout.decode(errors="ignore"), full_stderr.decode(errors="ignore")) - def _create_connection(self, attempts: int = SSH_CONNECTION_ATTEMPTS) -> SSHClient: + def _create_connection( + self, attempts: int = SSH_CONNECTION_ATTEMPTS, interval: int = SSH_ATTEMPTS_INTERVAL + ) -> SSHClient: for attempt in range(attempts): connection = SSHClient() connection.set_missing_host_key_policy(AutoAddPolicy()) @@ -295,7 +298,10 @@ class SSHShell(Shell): connection.close() can_retry = attempt + 1 < attempts if can_retry: - logger.warn(f"Can't connect to host {self.host}, will retry. Error: {exc}") + logger.warn( + f"Can't connect to host {self.host}, will retry after {interval}s. Error: {exc}" + ) + sleep(interval) continue logger.exception(f"Can't connect to host {self.host}") raise HostIsNotAvailable(self.host) from exc From 602de43bffe5e22d6a20ce720b986acbc54bcf67 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 19 Sep 2023 18:14:32 +0300 Subject: [PATCH 139/363] Use all nodes for s3 creds --- src/frostfs_testlib/load/interfaces.py | 1 + src/frostfs_testlib/load/runners.py | 4 +++- .../storage/controllers/background_load_controller.py | 7 ++++++- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/load/interfaces.py b/src/frostfs_testlib/load/interfaces.py index 6f29868..98c9d62 100644 --- a/src/frostfs_testlib/load/interfaces.py +++ b/src/frostfs_testlib/load/interfaces.py @@ -22,6 +22,7 @@ class ScenarioRunner(ABC): def prepare( self, load_params: LoadParams, + cluster_nodes: list[ClusterNode], nodes_under_load: list[ClusterNode], k6_dir: str, ): diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index 428cd7d..a7fa787 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -72,6 +72,7 @@ class DefaultRunner(RunnerBase): def prepare( self, load_params: LoadParams, + cluster_nodes: list[ClusterNode], nodes_under_load: list[ClusterNode], k6_dir: str, ): @@ -81,7 +82,7 @@ class DefaultRunner(RunnerBase): with reporter.step("Init s3 client on loaders"): storage_node = nodes_under_load[0].service(StorageNode) s3_public_keys = [ - node.service(S3Gate).get_wallet_public_key() for node in nodes_under_load + node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes ] grpc_peer = storage_node.get_rpc_endpoint() @@ -290,6 +291,7 @@ class LocalRunner(RunnerBase): def prepare( self, load_params: LoadParams, + cluster_nodes: list[ClusterNode], nodes_under_load: list[ClusterNode], k6_dir: str, ): diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index aa17f4e..c309b65 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -26,6 +26,7 @@ class BackgroundLoadController: load_params: LoadParams original_load_params: LoadParams verification_params: LoadParams + cluster_nodes: list[ClusterNode] nodes_under_load: list[ClusterNode] load_counter: int loaders_wallet: WalletInfo @@ -39,12 +40,14 @@ class BackgroundLoadController: k6_dir: str, load_params: LoadParams, loaders_wallet: WalletInfo, + cluster_nodes: list[ClusterNode], nodes_under_load: list[ClusterNode], runner: ScenarioRunner, ) -> None: self.k6_dir = k6_dir self.original_load_params = load_params self.load_params = copy.deepcopy(self.original_load_params) + self.cluster_nodes = cluster_nodes self.nodes_under_load = nodes_under_load self.load_counter = 1 self.loaders_wallet = loaders_wallet @@ -99,7 +102,9 @@ class BackgroundLoadController: self.endpoints = self._get_endpoints( self.load_params.load_type, self.load_params.endpoint_selection_strategy ) - self.runner.prepare(self.load_params, self.nodes_under_load, self.k6_dir) + self.runner.prepare( + self.load_params, self.cluster_nodes, self.nodes_under_load, self.k6_dir + ) self.runner.init_k6_instances(self.load_params, self.endpoints, self.k6_dir) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) From eb37573df898dacb539a4d9227902925ae91711b Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 25 Sep 2023 16:26:45 +0300 Subject: [PATCH 140/363] [#88] Add read from switch Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/__init__.py | 1 + src/frostfs_testlib/load/load_config.py | 15 +++++++++++- tests/test_load_config.py | 32 ++++++++++++++++++------- 3 files changed, 39 insertions(+), 9 deletions(-) diff --git a/src/frostfs_testlib/load/__init__.py b/src/frostfs_testlib/load/__init__.py index e8ed75e..74b710f 100644 --- a/src/frostfs_testlib/load/__init__.py +++ b/src/frostfs_testlib/load/__init__.py @@ -7,6 +7,7 @@ from frostfs_testlib.load.load_config import ( LoadType, NodesSelectionStrategy, Preset, + ReadFrom, ) from frostfs_testlib.load.load_report import LoadReport from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 9023f87..97f5dd6 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -23,6 +23,12 @@ class LoadScenario(Enum): LOCAL = "local" +class ReadFrom(Enum): + REGISTRY = "registry" + PRESET = "preset" + MANUAL = "manual" + + all_load_scenarios = [ LoadScenario.gRPC, LoadScenario.S3, @@ -170,6 +176,8 @@ class LoadParams: load_time: Optional[int] = metadata_field(all_load_scenarios, None, "DURATION", False) # Object size in KB for load and preset. object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE", False) + # For read operations, controls from which set get objects to read + read_from: Optional[ReadFrom] = None # Output registry K6 file. Filled automatically. registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE", False) # Specifies the minimum duration of every single execution (i.e. iteration). @@ -256,7 +264,12 @@ class LoadParams: def set_id(self, load_id): self.load_id = load_id - self.registry_file = os.path.join(self.working_dir, f"{load_id}_registry.bolt") + + if self.read_from == ReadFrom.REGISTRY: + self.registry_file = os.path.join(self.working_dir, f"{load_id}_registry.bolt") + if self.read_from == ReadFrom.PRESET: + self.registry_file = None + if self.preset: self.preset.pregen_json = os.path.join(self.working_dir, f"{load_id}_prepare.json") diff --git a/tests/test_load_config.py b/tests/test_load_config.py index a84a188..256a04b 100644 --- a/tests/test_load_config.py +++ b/tests/test_load_config.py @@ -9,7 +9,9 @@ from frostfs_testlib.load.load_config import ( LoadScenario, LoadType, Preset, + ReadFrom, ) +from frostfs_testlib.load.runners import DefaultRunner from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.controllers.background_load_controller import BackgroundLoadController from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode @@ -47,13 +49,13 @@ class TestLoadConfig: def test_load_params_only_load_type_required(self): load_params = LoadParams(load_type=LoadType.S3) - expected = "load: s3" + expected = "s3" assert repr(load_params) == expected assert f"{load_params}" == expected def test_load_params_initially_have_all_values_none(self): load_params = LoadParams(load_type=LoadType.S3) - self._check_all_values_none(load_params, ["load_type"]) + self._check_all_values_none(load_params, ["load_type", "scenario"]) def test_preset_initially_have_all_values_none(self): preset = Preset() @@ -62,14 +64,14 @@ class TestLoadConfig: @pytest.mark.parametrize("load_params", [LoadScenario.S3_CAR], indirect=True) def test_string_representation_s3_car(self, load_params: LoadParams): load_params.object_size = 524288 - expected = "load: s3_car (512 MiB), write_rate=10, read_rate=9, delete_rate=11, preallocated_writers=20, preallocated_readers=20, preallocated_deleters=21" + expected = "s3_car 512 MiB, write_rate=10, read_rate=9, delete_rate=11, preallocated_writers=20, preallocated_readers=20, preallocated_deleters=21" assert f"{load_params}" == expected assert repr(load_params) == expected @pytest.mark.parametrize("load_params", [LoadScenario.gRPC], indirect=True) def test_string_representation_grpc(self, load_params: LoadParams): load_params.object_size = 512 - expected = "load: grpc (512 KiB), writers=7, readers=7, deleters=8" + expected = "grpc 512 KiB, writers=7, readers=7, deleters=8" assert f"{load_params}" == expected assert repr(load_params) == expected @@ -78,15 +80,16 @@ class TestLoadConfig: load_params.endpoint_selection_strategy = EndpointSelectionStrategy.ALL load_params.object_size = 512 background_load_controller = BackgroundLoadController( - "tmp", load_params, "wallet", None, None + "tmp", load_params, "wallet", None, None, DefaultRunner(None) ) - expected = "load: grpc (512 KiB), writers=7, readers=7, deleters=8" + expected = "grpc 512 KiB, writers=7, readers=7, deleters=8" assert f"{background_load_controller}" == expected assert repr(background_load_controller) == expected def test_load_set_id_changes_fields(self): load_params = LoadParams(load_type=LoadType.S3) load_params.preset = Preset() + load_params.read_from = ReadFrom["REGISTRY"] load_params.working_dir = "/tmp" load_params.set_id("test_id") @@ -96,9 +99,18 @@ class TestLoadConfig: # No other values should be changed self._check_all_values_none( - load_params, ["load_type", "working_dir", "load_id", "registry_file", "preset"] + load_params, + [ + "load_type", + "working_dir", + "load_id", + "registry_file", + "preset", + "scenario", + "read_from", + ], ) - self._check_all_values_none(load_params.preset, ["pregen_json"]) + self._check_all_values_none(load_params.preset, ["pregen_json", "scenario"]) @pytest.mark.parametrize("load_params", [LoadScenario.gRPC], indirect=True) def test_argument_parsing_for_grpc_scenario(self, load_params: LoadParams): @@ -120,6 +132,7 @@ class TestLoadConfig: "READERS": 7, "DELETERS": 8, "PREGEN_JSON": "pregen_json", + "PREPARE_LOCALLY": True, } self._check_preset_params(load_params, expected_preset_args) @@ -152,6 +165,7 @@ class TestLoadConfig: "WRITE_RATE": 10, "READ_RATE": 9, "DELETE_RATE": 11, + "PREPARE_LOCALLY": True, } self._check_preset_params(load_params, expected_preset_args) @@ -319,6 +333,7 @@ class TestLoadConfig: "READERS": 0, "DELETERS": 0, "PREGEN_JSON": "", + "PREPARE_LOCALLY": False, } self._check_preset_params(load_params, expected_preset_args) @@ -353,6 +368,7 @@ class TestLoadConfig: "WRITE_RATE": 0, "READ_RATE": 0, "DELETE_RATE": 0, + "PREPARE_LOCALLY": False, } self._check_preset_params(load_params, expected_preset_args) From 64f004d5a5f0298eaa0bc653f0e98172ffa57c87 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 27 Sep 2023 10:38:28 +0300 Subject: [PATCH 141/363] Add read-from to verify settings --- .../storage/controllers/background_load_controller.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index c309b65..38cdf0f 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -9,7 +9,6 @@ from frostfs_testlib.load.load_config import ( LoadScenario, LoadType, ) -from frostfs_testlib.load.load_metrics import get_metrics_object from frostfs_testlib.load.load_report import LoadReport from frostfs_testlib.load.load_verifiers import LoadVerifier from frostfs_testlib.reporter import get_reporter @@ -181,6 +180,7 @@ class BackgroundLoadController: self.verification_params = LoadParams( verify_clients=self.load_params.verify_clients, scenario=LoadScenario.VERIFY, + read_from=self.load_params.read_from, registry_file=self.load_params.registry_file, verify_time=self.load_params.verify_time, load_type=self.load_params.load_type, From 9feb8135e381b601fa9fca2104ddd09dad1daa0f Mon Sep 17 00:00:00 2001 From: "m.malygina" Date: Wed, 27 Sep 2023 16:32:29 +0300 Subject: [PATCH 142/363] local scenario Signed-off-by: m.malygina --- src/frostfs_testlib/load/load_metrics.py | 1 + src/frostfs_testlib/storage/constants.py | 1 + src/frostfs_testlib/storage/dataclasses/frostfs_services.py | 3 +++ 3 files changed, 5 insertions(+) diff --git a/src/frostfs_testlib/load/load_metrics.py b/src/frostfs_testlib/load/load_metrics.py index 6b44de0..5cec8ea 100644 --- a/src/frostfs_testlib/load/load_metrics.py +++ b/src/frostfs_testlib/load/load_metrics.py @@ -165,6 +165,7 @@ class S3Metrics(MetricsBase): class LocalMetrics(MetricsBase): _WRITE_SUCCESS = "local_obj_put_total" _WRITE_ERRORS = "local_obj_put_fails" + _WRITE_LATENCY = "local_obj_put_duration" _READ_SUCCESS = "local_obj_get_total" _READ_ERRORS = "local_obj_get_fails" diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 6deedfb..dbaac5a 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -4,6 +4,7 @@ class ConfigAttributes: WALLET_PATH = "wallet_path" WALLET_CONFIG = "wallet_config" CONFIG_PATH = "config_path" + SHARD_CONFIG_PATH = "shard_config_path" LOCAL_WALLET_PATH = "local_wallet_path" LOCAL_WALLET_CONFIG = "local_config_path" ENDPOINT_DATA_0 = "endpoint_data0" diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index ccb30d5..ac2885b 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -138,6 +138,9 @@ class StorageNode(NodeBase): ) return health_metric in output + def get_shard_config_path(self) -> str: + return self._get_attribute(ConfigAttributes.SHARD_CONFIG_PATH) + def get_control_endpoint(self) -> str: return self._get_attribute(ConfigAttributes.CONTROL_ENDPOINT) From 98ccd4c38259ef81ef4c5eeacc22bba0c4d761f4 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 3 Oct 2023 15:18:29 +0300 Subject: [PATCH 143/363] [#91] Failover enhancements Signed-off-by: Andrey Berezin --- pyproject.toml | 3 + src/frostfs_testlib/cli/frostfs_cli/cli.py | 2 + src/frostfs_testlib/cli/frostfs_cli/tree.py | 29 +++++++++ .../healthcheck/basic_healthcheck.py | 14 ++++ src/frostfs_testlib/healthcheck/interfaces.py | 9 +++ src/frostfs_testlib/hosting/config.py | 2 + src/frostfs_testlib/load/interfaces.py | 2 +- src/frostfs_testlib/load/k6.py | 39 +++++++++-- src/frostfs_testlib/load/load_config.py | 23 +++++++ src/frostfs_testlib/load/load_metrics.py | 16 +++-- src/frostfs_testlib/load/load_report.py | 21 +++--- src/frostfs_testlib/load/load_verifiers.py | 64 ++++++++++++------- src/frostfs_testlib/load/runners.py | 4 +- src/frostfs_testlib/resources/load_params.py | 3 +- .../controllers/background_load_controller.py | 23 ++++--- src/frostfs_testlib/testing/parallel.py | 2 +- 16 files changed, 200 insertions(+), 56 deletions(-) create mode 100644 src/frostfs_testlib/cli/frostfs_cli/tree.py create mode 100644 src/frostfs_testlib/healthcheck/basic_healthcheck.py create mode 100644 src/frostfs_testlib/healthcheck/interfaces.py diff --git a/pyproject.toml b/pyproject.toml index f85b883..778e2fc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,6 +44,9 @@ allure = "frostfs_testlib.reporter.allure_handler:AllureHandler" [project.entry-points."frostfs.testlib.hosting"] docker = "frostfs_testlib.hosting.docker_host:DockerHost" +[project.entry-points."frostfs.testlib.healthcheck"] +basic = "frostfs_testlib.healthcheck.basic_healthcheck:BasicHealthcheck" + [tool.isort] profile = "black" src_paths = ["src", "tests"] diff --git a/src/frostfs_testlib/cli/frostfs_cli/cli.py b/src/frostfs_testlib/cli/frostfs_cli/cli.py index 5d55f55..a78da8b 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/cli.py +++ b/src/frostfs_testlib/cli/frostfs_cli/cli.py @@ -8,6 +8,7 @@ from frostfs_testlib.cli.frostfs_cli.object import FrostfsCliObject from frostfs_testlib.cli.frostfs_cli.session import FrostfsCliSession from frostfs_testlib.cli.frostfs_cli.shards import FrostfsCliShards from frostfs_testlib.cli.frostfs_cli.storagegroup import FrostfsCliStorageGroup +from frostfs_testlib.cli.frostfs_cli.tree import FrostfsCliTree from frostfs_testlib.cli.frostfs_cli.util import FrostfsCliUtil from frostfs_testlib.cli.frostfs_cli.version import FrostfsCliVersion from frostfs_testlib.shell import Shell @@ -36,3 +37,4 @@ class FrostfsCli: self.storagegroup = FrostfsCliStorageGroup(shell, frostfs_cli_exec_path, config=config_file) self.util = FrostfsCliUtil(shell, frostfs_cli_exec_path, config=config_file) self.version = FrostfsCliVersion(shell, frostfs_cli_exec_path, config=config_file) + self.tree = FrostfsCliTree(shell, frostfs_cli_exec_path, config=config_file) diff --git a/src/frostfs_testlib/cli/frostfs_cli/tree.py b/src/frostfs_testlib/cli/frostfs_cli/tree.py new file mode 100644 index 0000000..af330fe --- /dev/null +++ b/src/frostfs_testlib/cli/frostfs_cli/tree.py @@ -0,0 +1,29 @@ +from typing import Optional + +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult + + +class FrostfsCliTree(CliCommand): + def healthcheck( + self, + wallet: Optional[str] = None, + rpc_endpoint: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Get internal balance of FrostFS account + + Args: + address: Address of wallet account. + owner: Owner of balance account (omit to use owner from private key). + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + + Returns: + Command's result. + + """ + return self._execute( + "tree healthcheck", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/frostfs_testlib/healthcheck/basic_healthcheck.py b/src/frostfs_testlib/healthcheck/basic_healthcheck.py new file mode 100644 index 0000000..9ec8694 --- /dev/null +++ b/src/frostfs_testlib/healthcheck/basic_healthcheck.py @@ -0,0 +1,14 @@ +from frostfs_testlib.healthcheck.interfaces import Healthcheck +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.steps.node_management import storage_node_healthcheck +from frostfs_testlib.storage.cluster import ClusterNode + +reporter = get_reporter() + + +class BasicHealthcheck(Healthcheck): + @reporter.step_deco("Perform healthcheck for {cluster_node}") + def perform_healthcheck(self, cluster_node: ClusterNode): + health_check = storage_node_healthcheck(cluster_node.storage_node) + if health_check.health_status != "READY" or health_check.network_status != "ONLINE": + raise AssertionError("Node {cluster_node} is not healthy") diff --git a/src/frostfs_testlib/healthcheck/interfaces.py b/src/frostfs_testlib/healthcheck/interfaces.py new file mode 100644 index 0000000..0c77957 --- /dev/null +++ b/src/frostfs_testlib/healthcheck/interfaces.py @@ -0,0 +1,9 @@ +from abc import ABC, abstractmethod + +from frostfs_testlib.storage.cluster import ClusterNode + + +class Healthcheck(ABC): + @abstractmethod + def perform(self, cluster_node: ClusterNode): + """Perform healthcheck on the target cluster node""" diff --git a/src/frostfs_testlib/hosting/config.py b/src/frostfs_testlib/hosting/config.py index 6679470..88fe3e7 100644 --- a/src/frostfs_testlib/hosting/config.py +++ b/src/frostfs_testlib/hosting/config.py @@ -52,6 +52,7 @@ class HostConfig: Attributes: plugin_name: Name of plugin that should be used to manage the host. + healthcheck_plugin_name: Name of the plugin for healthcheck operations. address: Address of the machine (IP or DNS name). services: List of services hosted on the machine. clis: List of CLI tools available on the machine. @@ -60,6 +61,7 @@ class HostConfig: """ plugin_name: str + healthcheck_plugin_name: str address: str services: list[ServiceConfig] = field(default_factory=list) clis: list[CLIConfig] = field(default_factory=list) diff --git a/src/frostfs_testlib/load/interfaces.py b/src/frostfs_testlib/load/interfaces.py index 98c9d62..394fff7 100644 --- a/src/frostfs_testlib/load/interfaces.py +++ b/src/frostfs_testlib/load/interfaces.py @@ -50,7 +50,7 @@ class ScenarioRunner(ABC): """Returns True if load is running at the moment""" @abstractmethod - def wait_until_finish(self): + def wait_until_finish(self, soft_timeout: int = 0): """Wait until load is finished""" @abstractmethod diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index cb3576e..e7a2b39 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -3,6 +3,7 @@ import logging import math import os from dataclasses import dataclass +from datetime import datetime from time import sleep from typing import Any from urllib.parse import urlparse @@ -39,6 +40,7 @@ class LoadResults: class K6: _k6_process: RemoteProcess + _start_time: datetime def __init__( self, @@ -122,6 +124,7 @@ class K6: with reporter.step( f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}" ): + self._start_time = int(datetime.utcnow().timestamp()) command = ( f"{self._k6_dir}/k6 run {self._generate_env_variables()} " f"{self._k6_dir}/scenarios/{self.load_params.scenario.value}.js" @@ -131,7 +134,7 @@ class K6: command, self.shell, self.load_params.working_dir, user ) - def wait_until_finished(self) -> None: + def wait_until_finished(self, soft_timeout: int = 0) -> None: with reporter.step( f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}" ): @@ -140,9 +143,36 @@ class K6: else: timeout = self.load_params.load_time or 0 - timeout += int(K6_TEARDOWN_PERIOD) + current_time = int(datetime.utcnow().timestamp()) + working_time = current_time - self._start_time + remaining_time = timeout - working_time + + setup_teardown_time = ( + int(K6_TEARDOWN_PERIOD) + + self.load_params.get_init_time() + + int(self.load_params.setup_timeout.replace("s", "").strip()) + ) + remaining_time_including_setup_and_teardown = remaining_time + setup_teardown_time + timeout = remaining_time_including_setup_and_teardown + + if soft_timeout: + timeout = min(timeout, soft_timeout) + original_timeout = timeout + timeouts = { + "K6 start time": self._start_time, + "Current time": current_time, + "K6 working time": working_time, + "Remaining time for load": remaining_time, + "Setup and teardown": setup_teardown_time, + "Remaining time including setup/teardown": remaining_time_including_setup_and_teardown, + "Soft timeout": soft_timeout, + "Selected timeout": original_timeout, + } + + reporter.attach("\n".join([f"{k}: {v}" for k, v in timeouts.items()]), "timeouts.txt") + min_wait_interval = 10 wait_interval = min_wait_interval if self._k6_process is None: @@ -162,7 +192,8 @@ class K6: return self.stop() - raise TimeoutError(f"Expected K6 to finish after {original_timeout} sec.") + if not soft_timeout: + raise TimeoutError(f"Expected K6 to finish after {original_timeout} sec.") def get_results(self) -> Any: with reporter.step( @@ -187,7 +218,7 @@ class K6: def stop(self) -> None: with reporter.step(f"Stop load from loader {self.loader.ip} on endpoints {self.endpoints}"): - if self.is_running: + if self.is_running(): self._k6_process.stop() self._wait_until_process_end() diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 97f5dd6..678fc38 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -1,3 +1,4 @@ +import math import os from dataclasses import dataclass, field, fields, is_dataclass from enum import Enum @@ -133,6 +134,12 @@ class Preset: # S3 region (AKA placement policy for S3 buckets) s3_location: Optional[str] = metadata_field(s3_preset_scenarios, "location", None, False) + # Delay between containers creation and object upload for preset + object_upload_delay: Optional[int] = metadata_field(all_load_scenarios, "sleep", None, False) + + # Flag to control preset erorrs + ignore_errors: Optional[bool] = metadata_field(all_load_scenarios, "ignore-errors", None, False) + @dataclass class LoadParams: @@ -194,6 +201,12 @@ class LoadParams: # https://k6.io/docs/using-k6/k6-options/reference/#setup-timeout setup_timeout: Optional[str] = metadata_field(all_scenarios, None, "K6_SETUP_TIMEOUT", False) + # Delay for read operations in case if we read from registry + read_age: Optional[int] = metadata_field(all_load_scenarios, None, "READ_AGE", None, False) + + # Initialization time for each VU for k6 load + vu_init_time: Optional[float] = None + # ------- CONSTANT VUS SCENARIO PARAMS ------- # Amount of Writers VU. writers: Optional[int] = metadata_field(constant_vus_scenarios, None, "WRITERS", True, True) @@ -306,6 +319,16 @@ class LoadParams: return command_args + def get_init_time(self) -> int: + return math.ceil(self._get_total_vus() * self.vu_init_time) + + def _get_total_vus(self) -> int: + vu_fields = ["writers", "preallocated_writers"] + data_fields = [ + getattr(self, field.name) or 0 for field in fields(self) if field.name in vu_fields + ] + return sum(data_fields) + def _get_applicable_fields(self): applicable_fields = [ meta_field diff --git a/src/frostfs_testlib/load/load_metrics.py b/src/frostfs_testlib/load/load_metrics.py index 5cec8ea..6c201ec 100644 --- a/src/frostfs_testlib/load/load_metrics.py +++ b/src/frostfs_testlib/load/load_metrics.py @@ -30,7 +30,7 @@ class MetricsBase(ABC): @property def write_success_iterations(self) -> int: return self._get_metric(self._WRITE_SUCCESS) - + @property def write_latency(self) -> dict: return self._get_metric(self._WRITE_LATENCY) @@ -54,7 +54,7 @@ class MetricsBase(ABC): @property def read_success_iterations(self) -> int: return self._get_metric(self._READ_SUCCESS) - + @property def read_latency(self) -> dict: return self._get_metric(self._READ_LATENCY) @@ -78,7 +78,7 @@ class MetricsBase(ABC): @property def delete_success_iterations(self) -> int: return self._get_metric(self._DELETE_SUCCESS) - + @property def delete_latency(self) -> dict: return self._get_metric(self._DELETE_LATENCY) @@ -92,7 +92,11 @@ class MetricsBase(ABC): return self._get_metric_rate(self._DELETE_SUCCESS) def _get_metric(self, metric: str) -> int: - metrics_method_map = {"counter": self._get_counter_metric, "gauge": self._get_gauge_metric, "trend" : self._get_trend_metrics} + metrics_method_map = { + "counter": self._get_counter_metric, + "gauge": self._get_gauge_metric, + "trend": self._get_trend_metrics, + } if metric not in self.metrics: return 0 @@ -129,8 +133,8 @@ class MetricsBase(ABC): def _get_gauge_metric(self, metric: str) -> int: return metric["values"]["value"] - - def _get_trend_metrics(self, metric: str) -> int: + + def _get_trend_metrics(self, metric: str) -> int: return metric["values"] diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py index 26ab542..ec6d539 100644 --- a/src/frostfs_testlib/load/load_report.py +++ b/src/frostfs_testlib/load/load_report.py @@ -2,7 +2,6 @@ from datetime import datetime from typing import Optional import yaml -import os from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario from frostfs_testlib.load.load_metrics import get_metrics_object @@ -110,7 +109,7 @@ class LoadReport: total_rate: float, throughput: float, errors: dict[str, int], - latency: dict[str, dict], + latency: dict[str, dict], ): throughput_html = "" if throughput > 0: @@ -131,12 +130,16 @@ class LoadReport: latency_html = "" if latency: - for node_key, param_dict in latency.items(): - latency_values = "" - for param_name, param_val in param_dict.items(): - latency_values += f"{param_name}={param_val:.2f}ms " + for node_key, latency_dict in latency.items(): + latency_values = "N/A" + if latency_dict: + latency_values = "" + for param_name, param_val in latency_dict.items(): + latency_values += f"{param_name}={param_val:.2f}ms " - latency_html += self._row(f"Put latency {node_key.split(':')[0]}", latency_values) + latency_html += self._row( + f"{operation_type} latency {node_key.split(':')[0]}", latency_values + ) object_size, object_size_unit = calc_unit(self.load_params.object_size, 1) duration = self._seconds_to_formatted_duration(self.load_params.load_time) @@ -145,8 +148,8 @@ class LoadReport: short_summary = f"{operation_type} {object_size}{object_size_unit} {duration} {requested_rate_str} {vus_str} {model} - {throughput:.2f}{unit}/s {total_rate:.2f}/s" errors_percent = 0 if total_operations: - errors_percent = total_errors/total_operations*100.0 - + errors_percent = total_errors / total_operations * 100.0 + html = f"""
{short_summary}
Errors
diff --git a/src/frostfs_testlib/load/load_verifiers.py b/src/frostfs_testlib/load/load_verifiers.py index 80c3962..b691b02 100644 --- a/src/frostfs_testlib/load/load_verifiers.py +++ b/src/frostfs_testlib/load/load_verifiers.py @@ -12,7 +12,7 @@ class LoadVerifier: def __init__(self, load_params: LoadParams) -> None: self.load_params = load_params - def verify_load_results(self, load_summaries: dict[str, dict]): + def collect_load_issues(self, load_summaries: dict[str, dict]) -> list[str]: write_operations = 0 write_errors = 0 @@ -41,38 +41,58 @@ class LoadVerifier: delete_operations += metrics.delete_total_iterations delete_errors += metrics.delete_failed_iterations - exceptions = [] + issues = [] if writers and not write_operations: - exceptions.append(f"No any write operation was performed") + issues.append(f"No any write operation was performed") if readers and not read_operations: - exceptions.append(f"No any read operation was performed") + issues.append(f"No any read operation was performed") if deleters and not delete_operations: - exceptions.append(f"No any delete operation was performed") + issues.append(f"No any delete operation was performed") - if write_operations and writers and write_errors / write_operations * 100 > self.load_params.error_threshold: - exceptions.append( + if ( + write_operations + and writers + and write_errors / write_operations * 100 > self.load_params.error_threshold + ): + issues.append( f"Write error rate is greater than threshold: {write_errors / write_operations * 100} > {self.load_params.error_threshold}" ) - if read_operations and readers and read_errors / read_operations * 100 > self.load_params.error_threshold: - exceptions.append( + if ( + read_operations + and readers + and read_errors / read_operations * 100 > self.load_params.error_threshold + ): + issues.append( f"Read error rate is greater than threshold: {read_errors / read_operations * 100} > {self.load_params.error_threshold}" ) - if delete_operations and deleters and delete_errors / delete_operations * 100 > self.load_params.error_threshold: - exceptions.append( + if ( + delete_operations + and deleters + and delete_errors / delete_operations * 100 > self.load_params.error_threshold + ): + issues.append( f"Delete error rate is greater than threshold: {delete_errors / delete_operations * 100} > {self.load_params.error_threshold}" ) - assert not exceptions, "\n".join(exceptions) + return issues - def check_verify_results(self, load_summaries, verification_summaries) -> None: - for node_or_endpoint in load_summaries: - with reporter.step(f"Check verify scenario results for {node_or_endpoint}"): - self._check_verify_result( - load_summaries[node_or_endpoint], verification_summaries[node_or_endpoint] + def collect_verify_issues(self, load_summaries, verification_summaries) -> list[str]: + verify_issues: list[str] = [] + for k6_process_label in load_summaries: + with reporter.step(f"Check verify scenario results for {k6_process_label}"): + verify_issues.extend( + self._collect_verify_issues_on_process( + k6_process_label, + load_summaries[k6_process_label], + verification_summaries[k6_process_label], + ) ) + return verify_issues - def _check_verify_result(self, load_summary, verification_summary) -> None: - exceptions = [] + def _collect_verify_issues_on_process( + self, label, load_summary, verification_summary + ) -> list[str]: + issues = [] load_metrics = get_metrics_object(self.load_params.scenario, load_summary) @@ -92,8 +112,8 @@ class LoadVerifier: # Due to interruptions we may see total verified objects to be less than written on writers count if abs(total_left_objects - verified_objects) > writers: - exceptions.append( - f"Verified objects mismatch. Total: {total_left_objects}, Verified: {verified_objects}. Writers: {writers}." + issues.append( + f"Verified objects mismatch for {label}. Total: {total_left_objects}, Verified: {verified_objects}. Writers: {writers}." ) - assert not exceptions, "\n".join(exceptions) + return issues diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index a7fa787..489ddcd 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -43,8 +43,8 @@ class RunnerBase(ScenarioRunner): parallel([k6.preset for k6 in self.k6_instances]) @reporter.step_deco("Wait until load finish") - def wait_until_finish(self): - parallel([k6.wait_until_finished for k6 in self.k6_instances]) + def wait_until_finish(self, soft_timeout: int = 0): + parallel([k6.wait_until_finished for k6 in self.k6_instances], soft_timeout=soft_timeout) @property def is_running(self): diff --git a/src/frostfs_testlib/resources/load_params.py b/src/frostfs_testlib/resources/load_params.py index bd99859..97193cc 100644 --- a/src/frostfs_testlib/resources/load_params.py +++ b/src/frostfs_testlib/resources/load_params.py @@ -13,6 +13,7 @@ BACKGROUND_DELETERS_COUNT = os.getenv("BACKGROUND_DELETERS_COUNT", 0) BACKGROUND_VERIFIERS_COUNT = os.getenv("BACKGROUND_VERIFIERS_COUNT", 0) BACKGROUND_LOAD_DEFAULT_TIME = os.getenv("BACKGROUND_LOAD_DEFAULT_TIME", 1800) BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE = os.getenv("BACKGROUND_LOAD_DEFAULT_OBJECT_SIZE", 32) +BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME = float(os.getenv("BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME", 0.8)) BACKGROUND_LOAD_SETUP_TIMEOUT = os.getenv("BACKGROUND_LOAD_SETUP_TIMEOUT", "5s") # This will decrease load params for some weak environments @@ -26,7 +27,7 @@ BACKGROUND_LOAD_CONTAINER_PLACEMENT_POLICY = os.getenv( BACKGROUND_LOAD_S3_LOCATION = os.getenv("BACKGROUND_LOAD_S3_LOCATION", "node-off") PRESET_CONTAINERS_COUNT = os.getenv("CONTAINERS_COUNT", "40") # TODO: At lease one object is required due to bug in xk6 (buckets with no objects produce millions exceptions in read) -PRESET_OBJECTS_COUNT = os.getenv("OBJ_COUNT", "10") +PRESET_OBJECTS_COUNT = os.getenv("OBJ_COUNT", "1") K6_DIRECTORY = os.getenv("K6_DIRECTORY", "/etc/k6") K6_TEARDOWN_PERIOD = os.getenv("K6_TEARDOWN_PERIOD", "30") K6_STOP_SIGNAL_TIMEOUT = int(os.getenv("K6_STOP_SIGNAL_TIMEOUT", 300)) diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index 38cdf0f..a18a603 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -158,25 +158,27 @@ class BackgroundLoadController: @reporter.step_deco("Run post-load verification") def verify(self): try: - self._verify_load_results() + load_issues = self._collect_load_issues() if self.load_params.verify: - self._run_verify_scenario() + load_issues.extend(self._run_verify_scenario()) + + assert not load_issues, "\n".join(load_issues) finally: self._reset_for_consequent_load() @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Verify load results") - def _verify_load_results(self): + @reporter.step_deco("Collect load issues") + def _collect_load_issues(self): verifier = LoadVerifier(self.load_params) - verifier.verify_load_results(self.load_summaries) + return verifier.collect_load_issues(self.load_summaries) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - def wait_until_finish(self): - self.runner.wait_until_finish() + def wait_until_finish(self, soft_timeout: int = 0): + self.runner.wait_until_finish(soft_timeout) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @reporter.step_deco("Verify loaded objects") - def _run_verify_scenario(self): + def _run_verify_scenario(self) -> list[str]: self.verification_params = LoadParams( verify_clients=self.load_params.verify_clients, scenario=LoadScenario.VERIFY, @@ -185,6 +187,7 @@ class BackgroundLoadController: verify_time=self.load_params.verify_time, load_type=self.load_params.load_type, load_id=self.load_params.load_id, + vu_init_time=0, working_dir=self.load_params.working_dir, endpoint_selection_strategy=self.load_params.endpoint_selection_strategy, k6_process_allocation_strategy=self.load_params.k6_process_allocation_strategy, @@ -199,10 +202,10 @@ class BackgroundLoadController: self.runner.start() self.runner.wait_until_finish() - with reporter.step("Check verify results"): + with reporter.step("Collect verify issues"): verification_summaries = self._get_results() verifier = LoadVerifier(self.load_params) - verifier.check_verify_results(self.load_summaries, verification_summaries) + return verifier.collect_verify_issues(self.load_summaries, verification_summaries) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) def _get_results(self) -> dict: diff --git a/src/frostfs_testlib/testing/parallel.py b/src/frostfs_testlib/testing/parallel.py index 7f4ee26..ebddd38 100644 --- a/src/frostfs_testlib/testing/parallel.py +++ b/src/frostfs_testlib/testing/parallel.py @@ -42,7 +42,7 @@ def parallel( exceptions = [future.exception() for future in futures if future.exception()] if exceptions: message = "\n".join([str(e) for e in exceptions]) - raise RuntimeError(f"The following exceptions occured during parallel run: {message}") + raise RuntimeError(f"The following exceptions occured during parallel run:\n {message}") return futures From e919064bb96a44ee4fcb3931250ba182c5d1bbc0 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Thu, 5 Oct 2023 16:42:28 +0300 Subject: [PATCH 144/363] [#92] Fix method name Signed-off-by: Andrey Berezin --- src/frostfs_testlib/healthcheck/basic_healthcheck.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/healthcheck/basic_healthcheck.py b/src/frostfs_testlib/healthcheck/basic_healthcheck.py index 9ec8694..3f4bc79 100644 --- a/src/frostfs_testlib/healthcheck/basic_healthcheck.py +++ b/src/frostfs_testlib/healthcheck/basic_healthcheck.py @@ -8,7 +8,7 @@ reporter = get_reporter() class BasicHealthcheck(Healthcheck): @reporter.step_deco("Perform healthcheck for {cluster_node}") - def perform_healthcheck(self, cluster_node: ClusterNode): + def perform(self, cluster_node: ClusterNode): health_check = storage_node_healthcheck(cluster_node.storage_node) if health_check.health_status != "READY" or health_check.network_status != "ONLINE": raise AssertionError("Node {cluster_node} is not healthy") From d039bcc221a8170b780a5f1fcc60fac18b498191 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 9 Oct 2023 22:30:14 +0300 Subject: [PATCH 145/363] Add IfUpDown utility helper Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/steps/iptables.py | 42 --------- src/frostfs_testlib/steps/network.py | 89 +++++++++++++++++++ .../controllers/cluster_state_controller.py | 24 ++++- .../testing/cluster_test_base.py | 22 ++++- 4 files changed, 130 insertions(+), 47 deletions(-) delete mode 100644 src/frostfs_testlib/steps/iptables.py create mode 100644 src/frostfs_testlib/steps/network.py diff --git a/src/frostfs_testlib/steps/iptables.py b/src/frostfs_testlib/steps/iptables.py deleted file mode 100644 index db0bb22..0000000 --- a/src/frostfs_testlib/steps/iptables.py +++ /dev/null @@ -1,42 +0,0 @@ -from frostfs_testlib.shell import Shell -from frostfs_testlib.storage.cluster import ClusterNode - - -class IpTablesHelper: - @staticmethod - def drop_input_traffic_to_port(node: ClusterNode, ports: list[str]) -> None: - shell = node.host.get_shell() - for port in ports: - shell.exec(f"iptables -A INPUT -p tcp --dport {port} -j DROP") - - @staticmethod - def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[str]) -> None: - shell = node.host.get_shell() - for ip in block_ip: - shell.exec(f"iptables -A INPUT -s {ip} -j DROP") - - @staticmethod - def restore_input_traffic_to_port(node: ClusterNode) -> None: - shell = node.host.get_shell() - ports = ( - shell.exec("iptables -L --numeric | grep DROP | awk '{print $7}'") - .stdout.strip() - .split("\n") - ) - if ports[0] == "": - return - for port in ports: - shell.exec(f"iptables -D INPUT -p tcp --dport {port.split(':')[-1]} -j DROP") - - @staticmethod - def restore_input_traffic_to_node(node: ClusterNode) -> None: - shell = node.host.get_shell() - unlock_ip = ( - shell.exec("iptables -L --numeric | grep DROP | awk '{print $4}'") - .stdout.strip() - .split("\n") - ) - if unlock_ip[0] == "": - return - for ip in unlock_ip: - shell.exec(f"iptables -D INPUT -s {ip} -j DROP") diff --git a/src/frostfs_testlib/steps/network.py b/src/frostfs_testlib/steps/network.py new file mode 100644 index 0000000..a865461 --- /dev/null +++ b/src/frostfs_testlib/steps/network.py @@ -0,0 +1,89 @@ +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.testing.test_control import retry + +reporter = get_reporter() + + +class IpTablesHelper: + @staticmethod + def drop_input_traffic_to_port(node: ClusterNode, ports: list[str]) -> None: + shell = node.host.get_shell() + for port in ports: + shell.exec(f"iptables -A INPUT -p tcp --dport {port} -j DROP") + + @staticmethod + def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[str]) -> None: + shell = node.host.get_shell() + for ip in block_ip: + shell.exec(f"iptables -A INPUT -s {ip} -j DROP") + + @staticmethod + def restore_input_traffic_to_port(node: ClusterNode) -> None: + shell = node.host.get_shell() + ports = ( + shell.exec("iptables -L --numeric | grep DROP | awk '{print $7}'") + .stdout.strip() + .split("\n") + ) + if ports[0] == "": + return + for port in ports: + shell.exec(f"iptables -D INPUT -p tcp --dport {port.split(':')[-1]} -j DROP") + + @staticmethod + def restore_input_traffic_to_node(node: ClusterNode) -> None: + shell = node.host.get_shell() + unlock_ip = ( + shell.exec("iptables -L --numeric | grep DROP | awk '{print $4}'") + .stdout.strip() + .split("\n") + ) + if unlock_ip[0] == "": + return + for ip in unlock_ip: + shell.exec(f"iptables -D INPUT -s {ip} -j DROP") + + +# TODO Move class to HOST +class IfUpDownHelper: + @reporter.step_deco("Down {interface} to {node}") + def down_interface(self, node: ClusterNode, interface: str) -> None: + shell = node.host.get_shell() + shell.exec(f"ifdown {interface}") + + @reporter.step_deco("Up {interface} to {node}") + def up_interface(self, node: ClusterNode, interface: str) -> None: + shell = node.host.get_shell() + shell.exec(f"ifup {interface}") + + @reporter.step_deco("Up all interface to {node}") + def up_all_interface(self, node: ClusterNode) -> None: + shell = node.host.get_shell() + interfaces = list(node.host.config.interfaces.keys()) + shell.exec("ifup -av") + for name_interface in interfaces: + self.check_state_up(node, name_interface) + + @reporter.step_deco("Down all interface to {node}") + def down_all_interface(self, node: ClusterNode) -> None: + shell = node.host.get_shell() + interfaces = list(node.host.config.interfaces.keys()) + shell.exec("ifdown -av") + for name_interface in interfaces: + self.check_state_down(node, name_interface) + + @reporter.step_deco("Check {node} to {interface}") + def check_state(self, node: ClusterNode, interface: str) -> str: + shell = node.host.get_shell() + return shell.exec( + f"ip link show {interface} | sed -z 's/.*state \(.*\) mode .*/\\1/'" + ).stdout.strip() + + @retry(max_attempts=5, sleep_interval=5, expected_result="UP") + def check_state_up(self, node: ClusterNode, interface: str) -> str: + return self.check_state(node=node, interface=interface) + + @retry(max_attempts=5, sleep_interval=5, expected_result="DOWN") + def check_state_down(self, node: ClusterNode, interface: str) -> str: + return self.check_state(node=node, interface=interface) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 0148c0d..ed82167 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -4,7 +4,7 @@ import time import frostfs_testlib.resources.optionals as optionals from frostfs_testlib.reporter import get_reporter from frostfs_testlib.shell import CommandOptions, Shell -from frostfs_testlib.steps.iptables import IpTablesHelper +from frostfs_testlib.steps.network import IfUpDownHelper, IpTablesHelper from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass @@ -18,6 +18,7 @@ from frostfs_testlib.utils.failover_utils import ( ) reporter = get_reporter() +if_up_down_helper = IfUpDownHelper() class ClusterStateController: @@ -31,6 +32,7 @@ class ClusterStateController: self.cluster = cluster self.shell = shell self.suspended_services: dict[str, list[ClusterNode]] = {} + self.nodes_with_modified_interface: list[ClusterNode] = [] @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop host of node {node}") @@ -312,6 +314,26 @@ class ClusterStateController: wait_for_host_online(self.shell, node.storage_node) wait_for_node_online(node.storage_node) + @reporter.step_deco("Down {interface} to {nodes}") + def down_interface(self, nodes: list[ClusterNode], interface: str): + for node in nodes: + if_up_down_helper.down_interface(node=node, interface=interface) + assert if_up_down_helper.check_state(node=node, interface=interface) == "DOWN" + self.nodes_with_modified_interface.append(node) + + @reporter.step_deco("Up {interface} to {nodes}") + def up_interface(self, nodes: list[ClusterNode], interface: str): + for node in nodes: + if_up_down_helper.up_interface(node=node, interface=interface) + assert if_up_down_helper.check_state(node=node, interface=interface) == "UP" + if node in self.nodes_with_modified_interface: + self.nodes_with_modified_interface.remove(node) + + @reporter.step_deco("Restore interface") + def restore_interfaces(self): + for node in self.nodes_with_modified_interface: + if_up_down_helper.up_all_interface(node) + def _get_disk_controller( self, node: StorageNode, device: str, mountpoint: str ) -> DiskController: diff --git a/src/frostfs_testlib/testing/cluster_test_base.py b/src/frostfs_testlib/testing/cluster_test_base.py index 11f67f0..0676813 100644 --- a/src/frostfs_testlib/testing/cluster_test_base.py +++ b/src/frostfs_testlib/testing/cluster_test_base.py @@ -1,10 +1,13 @@ +import time from typing import Optional from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.shell import Shell from frostfs_testlib.steps import epoch from frostfs_testlib.storage.cluster import Cluster from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode +from frostfs_testlib.utils import datetime_utils reporter = get_reporter() @@ -14,13 +17,24 @@ class ClusterTestBase: shell: Shell cluster: Cluster - @reporter.step_deco("Tick {epochs_to_tick} epochs") - def tick_epochs(self, epochs_to_tick: int, alive_node: Optional[StorageNode] = None): + @reporter.step_deco("Tick {epochs_to_tick} epochs, wait {wait_block} block") + def tick_epochs( + self, + epochs_to_tick: int, + alive_node: Optional[StorageNode] = None, + wait_block: int = None, + ): for _ in range(epochs_to_tick): - self.tick_epoch(alive_node) + self.tick_epoch(alive_node, wait_block) - def tick_epoch(self, alive_node: Optional[StorageNode] = None): + def tick_epoch( + self, + alive_node: Optional[StorageNode] = None, + wait_block: int = None, + ): epoch.tick_epoch(self.shell, self.cluster, alive_node=alive_node) + if wait_block: + time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * wait_block) def wait_for_epochs_align(self): epoch.wait_for_epochs_align(self.shell, self.cluster) From 2c2af7f8ed0ca9199d0a21d0091f260083fbc243 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 10 Oct 2023 17:47:46 +0300 Subject: [PATCH 146/363] Keep only one ssh connection per host Signed-off-by: Andrey Berezin --- src/frostfs_testlib/shell/__init__.py | 2 +- src/frostfs_testlib/shell/ssh_shell.py | 188 +++++++++------- .../controllers/cluster_state_controller.py | 15 +- tests/test_ssh_shell.py | 200 ++++++++++++------ 4 files changed, 261 insertions(+), 144 deletions(-) diff --git a/src/frostfs_testlib/shell/__init__.py b/src/frostfs_testlib/shell/__init__.py index 0300ff8..980d119 100644 --- a/src/frostfs_testlib/shell/__init__.py +++ b/src/frostfs_testlib/shell/__init__.py @@ -1,3 +1,3 @@ from frostfs_testlib.shell.interfaces import CommandOptions, CommandResult, InteractiveInput, Shell from frostfs_testlib.shell.local_shell import LocalShell -from frostfs_testlib.shell.ssh_shell import SSHShell +from frostfs_testlib.shell.ssh_shell import SshConnectionProvider, SSHShell diff --git a/src/frostfs_testlib/shell/ssh_shell.py b/src/frostfs_testlib/shell/ssh_shell.py index 435a494..6db7d51 100644 --- a/src/frostfs_testlib/shell/ssh_shell.py +++ b/src/frostfs_testlib/shell/ssh_shell.py @@ -20,12 +20,117 @@ from paramiko import ( from paramiko.ssh_exception import AuthenticationException from frostfs_testlib.reporter import get_reporter -from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell +from frostfs_testlib.shell.interfaces import ( + CommandInspector, + CommandOptions, + CommandResult, + Shell, + SshCredentials, +) logger = logging.getLogger("frostfs.testlib.shell") reporter = get_reporter() +class SshConnectionProvider: + SSH_CONNECTION_ATTEMPTS: ClassVar[int] = 4 + SSH_ATTEMPTS_INTERVAL: ClassVar[int] = 10 + CONNECTION_TIMEOUT = 60 + + instance = None + connections: dict[str, SSHClient] = {} + creds: dict[str, SshCredentials] = {} + + def __new__(cls): + if not cls.instance: + cls.instance = super(SshConnectionProvider, cls).__new__(cls) + return cls.instance + + def store_creds(self, host: str, ssh_creds: SshCredentials): + self.creds[host] = ssh_creds + + def provide(self, host: str, port: str) -> SSHClient: + if host not in self.creds: + raise RuntimeError(f"Please add credentials for host {host}") + + if host in self.connections: + client = self.connections[host] + if client: + return client + + creds = self.creds[host] + client = self._create_connection(host, port, creds) + self.connections[host] = client + return client + + def drop(self, host: str): + if host in self.connections: + client = self.connections.pop(host) + client.close() + + def drop_all(self): + hosts = list(self.connections.keys()) + for host in hosts: + self.drop(host) + + def _create_connection( + self, + host: str, + port: str, + creds: SshCredentials, + ) -> SSHClient: + for attempt in range(self.SSH_CONNECTION_ATTEMPTS): + connection = SSHClient() + connection.set_missing_host_key_policy(AutoAddPolicy()) + try: + if creds.ssh_key_path: + logger.info( + f"Trying to connect to host {host} as {creds.ssh_login} using SSH key " + f"{creds.ssh_key_path} (attempt {attempt})" + ) + connection.connect( + hostname=host, + port=port, + username=creds.ssh_login, + pkey=_load_private_key(creds.ssh_key_path, creds.ssh_key_passphrase), + timeout=self.CONNECTION_TIMEOUT, + ) + else: + logger.info( + f"Trying to connect to host {host} as {creds.ssh_login} using password " + f"(attempt {attempt})" + ) + connection.connect( + hostname=host, + port=port, + username=creds.ssh_login, + password=creds.ssh_password, + timeout=self.CONNECTION_TIMEOUT, + ) + return connection + except AuthenticationException: + connection.close() + logger.exception(f"Can't connect to host {host}") + raise + except ( + SSHException, + ssh_exception.NoValidConnectionsError, + AttributeError, + socket.timeout, + OSError, + ) as exc: + connection.close() + can_retry = attempt + 1 < self.SSH_CONNECTION_ATTEMPTS + if can_retry: + logger.warn( + f"Can't connect to host {host}, will retry after {self.SSH_ATTEMPTS_INTERVAL}s. Error: {exc}" + ) + sleep(self.SSH_ATTEMPTS_INTERVAL) + continue + logger.exception(f"Can't connect to host {host}") + raise HostIsNotAvailable(host) from exc + + class HostIsNotAvailable(Exception): """Raised when host is not reachable via SSH connection.""" @@ -91,10 +196,6 @@ class SSHShell(Shell): # to allow remote command to flush its output buffer DELAY_AFTER_EXIT = 0.2 - SSH_CONNECTION_ATTEMPTS: ClassVar[int] = 4 - SSH_ATTEMPTS_INTERVAL: ClassVar[int] = 10 - CONNECTION_TIMEOUT = 60 - def __init__( self, host: str, @@ -106,23 +207,21 @@ class SSHShell(Shell): command_inspectors: Optional[list[CommandInspector]] = None, ) -> None: super().__init__() + self.connection_provider = SshConnectionProvider() + self.connection_provider.store_creds( + host, SshCredentials(login, password, private_key_path, private_key_passphrase) + ) self.host = host self.port = port - self.login = login - self.password = password - self.private_key_path = private_key_path - self.private_key_passphrase = private_key_passphrase + self.command_inspectors = command_inspectors or [] - self.__connection: Optional[SSHClient] = None @property def _connection(self): - if not self.__connection: - self.__connection = self._create_connection() - return self.__connection + return self.connection_provider.provide(self.host, self.port) def drop(self): - self._reset_connection() + self.connection_provider.drop(self.host) def exec(self, command: str, options: Optional[CommandOptions] = None) -> CommandResult: options = options or CommandOptions() @@ -196,7 +295,7 @@ class SSHShell(Shell): socket.timeout, ) as exc: logger.exception(f"Can't execute command {command} on host: {self.host}") - self._reset_connection() + self.drop() raise HostIsNotAvailable(self.host) from exc def _read_channels( @@ -251,62 +350,3 @@ class SSHShell(Shell): full_stderr = b"".join(stderr_chunks) return (full_stdout.decode(errors="ignore"), full_stderr.decode(errors="ignore")) - - def _create_connection( - self, attempts: int = SSH_CONNECTION_ATTEMPTS, interval: int = SSH_ATTEMPTS_INTERVAL - ) -> SSHClient: - for attempt in range(attempts): - connection = SSHClient() - connection.set_missing_host_key_policy(AutoAddPolicy()) - try: - if self.private_key_path: - logger.info( - f"Trying to connect to host {self.host} as {self.login} using SSH key " - f"{self.private_key_path} (attempt {attempt})" - ) - connection.connect( - hostname=self.host, - port=self.port, - username=self.login, - pkey=_load_private_key(self.private_key_path, self.private_key_passphrase), - timeout=self.CONNECTION_TIMEOUT, - ) - else: - logger.info( - f"Trying to connect to host {self.host} as {self.login} using password " - f"(attempt {attempt})" - ) - connection.connect( - hostname=self.host, - port=self.port, - username=self.login, - password=self.password, - timeout=self.CONNECTION_TIMEOUT, - ) - return connection - except AuthenticationException: - connection.close() - logger.exception(f"Can't connect to host {self.host}") - raise - except ( - SSHException, - ssh_exception.NoValidConnectionsError, - AttributeError, - socket.timeout, - OSError, - ) as exc: - connection.close() - can_retry = attempt + 1 < attempts - if can_retry: - logger.warn( - f"Can't connect to host {self.host}, will retry after {interval}s. Error: {exc}" - ) - sleep(interval) - continue - logger.exception(f"Can't connect to host {self.host}") - raise HostIsNotAvailable(self.host) from exc - - def _reset_connection(self) -> None: - if self.__connection: - self.__connection.close() - self.__connection = None diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index ed82167..c6391f5 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -3,7 +3,7 @@ import time import frostfs_testlib.resources.optionals as optionals from frostfs_testlib.reporter import get_reporter -from frostfs_testlib.shell import CommandOptions, Shell +from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider from frostfs_testlib.steps.network import IfUpDownHelper, IpTablesHelper from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode from frostfs_testlib.storage.controllers.disk_controller import DiskController @@ -37,6 +37,10 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop host of node {node}") def stop_node_host(self, node: ClusterNode, mode: str): + # Drop ssh connection for this node before shutdown + provider = SshConnectionProvider() + provider.drop(node.host_ip) + with reporter.step(f"Stop host {node.host.config.address}"): node.host.stop_host(mode=mode) wait_for_host_offline(self.shell, node.storage_node) @@ -48,6 +52,11 @@ class ClusterStateController: nodes = ( reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes ) + + # Drop all ssh connections before shutdown + provider = SshConnectionProvider() + provider.drop_all() + for node in nodes: with reporter.step(f"Stop host {node.host.config.address}"): self.stopped_nodes.append(node) @@ -307,6 +316,10 @@ class ClusterStateController: options = CommandOptions(close_stdin=True, timeout=1, check=False) shell.exec('sudo sh -c "echo b > /proc/sysrq-trigger"', options) + # Drop ssh connection for this node + provider = SshConnectionProvider() + provider.drop(node.host_ip) + if wait_for_return: # Let the things to be settled # A little wait here to prevent ssh stuck during panic diff --git a/tests/test_ssh_shell.py b/tests/test_ssh_shell.py index 4d1c0fd..ecd8c3c 100644 --- a/tests/test_ssh_shell.py +++ b/tests/test_ssh_shell.py @@ -1,50 +1,68 @@ import os -from unittest import SkipTest, TestCase + +import pytest from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput -from frostfs_testlib.shell.ssh_shell import SSHShell +from frostfs_testlib.shell.ssh_shell import SshConnectionProvider, SSHShell from helpers import format_error_details, get_output_lines -def init_shell() -> SSHShell: - host = os.getenv("SSH_SHELL_HOST") +def get_shell(host: str): port = os.getenv("SSH_SHELL_PORT", "22") login = os.getenv("SSH_SHELL_LOGIN") - private_key_path = os.getenv("SSH_SHELL_PRIVATE_KEY_PATH") - private_key_passphrase = os.getenv("SSH_SHELL_PRIVATE_KEY_PASSPHRASE") + + password = os.getenv("SSH_SHELL_PASSWORD", "") + private_key_path = os.getenv("SSH_SHELL_PRIVATE_KEY_PATH", "") + private_key_passphrase = os.getenv("SSH_SHELL_PRIVATE_KEY_PASSPHRASE", "") if not all([host, login, private_key_path, private_key_passphrase]): # TODO: in the future we might use https://pypi.org/project/mock-ssh-server, # at the moment it is not suitable for us because of its issues with stdin - raise SkipTest("SSH connection is not configured") + pytest.skip("SSH connection is not configured") return SSHShell( host=host, port=port, login=login, + password=password, private_key_path=private_key_path, private_key_passphrase=private_key_passphrase, ) -class TestSSHShellInteractive(TestCase): - @classmethod - def setUpClass(cls): - cls.shell = init_shell() +@pytest.fixture(scope="module") +def shell() -> SSHShell: + return get_shell(host=os.getenv("SSH_SHELL_HOST")) - def test_command_with_one_prompt(self): + +@pytest.fixture(scope="module") +def shell_same_host() -> SSHShell: + return get_shell(host=os.getenv("SSH_SHELL_HOST")) + + +@pytest.fixture(scope="module") +def shell_another_host() -> SSHShell: + return get_shell(host=os.getenv("SSH_SHELL_HOST_2")) + + +@pytest.fixture(scope="function", autouse=True) +def reset_connection(): + provider = SshConnectionProvider() + provider.drop_all() + + +class TestSSHShellInteractive: + def test_command_with_one_prompt(self, shell: SSHShell): script = "password = input('Password: '); print('\\n' + password)" inputs = [InteractiveInput(prompt_pattern="Password", input="test")] - result = self.shell.exec( - f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs) - ) + result = shell.exec(f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs)) - self.assertEqual(0, result.return_code) - self.assertEqual(["Password: test", "test"], get_output_lines(result)) - self.assertEqual("", result.stderr) + assert result.return_code == 0 + assert ["Password: test", "test"] == get_output_lines(result) + assert not result.stderr - def test_command_with_several_prompts(self): + def test_command_with_several_prompts(self, shell: SSHShell): script = ( "input1 = input('Input1: '); print('\\n' + input1); " "input2 = input('Input2: '); print('\\n' + input2)" @@ -54,86 +72,132 @@ class TestSSHShellInteractive(TestCase): InteractiveInput(prompt_pattern="Input2", input="test2"), ] - result = self.shell.exec( - f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs) - ) + result = shell.exec(f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs)) - self.assertEqual(0, result.return_code) - self.assertEqual( - ["Input1: test1", "test1", "Input2: test2", "test2"], get_output_lines(result) - ) - self.assertEqual("", result.stderr) + assert result.return_code == 0 + assert ["Input1: test1", "test1", "Input2: test2", "test2"] == get_output_lines(result) + assert not result.stderr - def test_invalid_command_with_check(self): + def test_invalid_command_with_check(self, shell: SSHShell): script = "invalid script" inputs = [InteractiveInput(prompt_pattern=".*", input="test")] - with self.assertRaises(RuntimeError) as raised: - self.shell.exec(f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs)) + with pytest.raises(RuntimeError) as raised: + shell.exec(f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs)) - error = format_error_details(raised.exception) - self.assertIn("SyntaxError", error) - self.assertIn("return code: 1", error) + error = format_error_details(raised.value) + assert "SyntaxError" in error + assert "return code: 1" in error - def test_invalid_command_without_check(self): + def test_invalid_command_without_check(self, shell: SSHShell): script = "invalid script" inputs = [InteractiveInput(prompt_pattern=".*", input="test")] - result = self.shell.exec( + result = shell.exec( f'python3 -c "{script}"', CommandOptions(interactive_inputs=inputs, check=False), ) - self.assertIn("SyntaxError", result.stdout) - self.assertEqual(1, result.return_code) + assert "SyntaxError" in result.stdout + assert result.return_code == 1 - def test_non_existing_binary(self): + def test_non_existing_binary(self, shell: SSHShell): inputs = [InteractiveInput(prompt_pattern=".*", input="test")] - with self.assertRaises(RuntimeError) as raised: - self.shell.exec("not-a-command", CommandOptions(interactive_inputs=inputs)) + with pytest.raises(RuntimeError) as raised: + shell.exec("not-a-command", CommandOptions(interactive_inputs=inputs)) - error = format_error_details(raised.exception) - self.assertIn("return code: 127", error) + error = format_error_details(raised.value) + assert "return code: 127" in error -class TestSSHShellNonInteractive(TestCase): - @classmethod - def setUpClass(cls): - cls.shell = init_shell() - - def test_correct_command(self): +class TestSSHShellNonInteractive: + def test_correct_command(self, shell: SSHShell): script = "print('test')" - result = self.shell.exec(f'python3 -c "{script}"') + result = shell.exec(f'python3 -c "{script}"') - self.assertEqual(0, result.return_code) - self.assertEqual("test", result.stdout.strip()) - self.assertEqual("", result.stderr) + assert result.return_code == 0 + assert result.stdout.strip() == "test" + assert not result.stderr - def test_invalid_command_with_check(self): + def test_invalid_command_with_check(self, shell: SSHShell): script = "invalid script" - with self.assertRaises(RuntimeError) as raised: - self.shell.exec(f'python3 -c "{script}"') + with pytest.raises(RuntimeError) as raised: + shell.exec(f'python3 -c "{script}"') - error = format_error_details(raised.exception) - self.assertIn("Error", error) - self.assertIn("return code: 1", error) + error = format_error_details(raised.value) + assert "Error" in error + assert "return code: 1" in error - def test_invalid_command_without_check(self): + def test_invalid_command_without_check(self, shell: SSHShell): script = "invalid script" - result = self.shell.exec(f'python3 -c "{script}"', CommandOptions(check=False)) + result = shell.exec(f'python3 -c "{script}"', CommandOptions(check=False)) - self.assertEqual(1, result.return_code) + assert result.return_code == 1 # TODO: we have inconsistency with local shell here, the local shell captures error info # in stdout while ssh shell captures it in stderr - self.assertIn("Error", result.stderr) + assert "Error" in result.stderr - def test_non_existing_binary(self): - with self.assertRaises(RuntimeError) as exc: - self.shell.exec("not-a-command") + def test_non_existing_binary(self, shell: SSHShell): + with pytest.raises(RuntimeError) as raised: + shell.exec("not-a-command") - error = format_error_details(exc.exception) - self.assertIn("Error", error) - self.assertIn("return code: 127", error) + error = format_error_details(raised.value) + assert "Error" in error + assert "return code: 127" in error + + +class TestSSHShellConnection: + def test_connection_provider_is_singleton(self): + provider = SshConnectionProvider() + provider2 = SshConnectionProvider() + assert id(provider) == id(provider2) + + def test_connection_provider_has_creds(self, shell: SSHShell): + provider = SshConnectionProvider() + assert len(provider.creds) == 1 + assert len(provider.connections) == 0 + + def test_connection_provider_has_only_one_connection(self, shell: SSHShell): + provider = SshConnectionProvider() + assert len(provider.connections) == 0 + shell.exec("echo 1") + assert len(provider.connections) == 1 + shell.exec("echo 2") + assert len(provider.connections) == 1 + shell.drop() + assert len(provider.connections) == 0 + + def test_connection_same_host(self, shell: SSHShell, shell_same_host: SSHShell): + provider = SshConnectionProvider() + assert len(provider.connections) == 0 + + shell.exec("echo 1") + assert len(provider.connections) == 1 + + shell_same_host.exec("echo 2") + assert len(provider.connections) == 1 + + shell.drop() + assert len(provider.connections) == 0 + + shell.exec("echo 3") + assert len(provider.connections) == 1 + + def test_connection_another_host(self, shell: SSHShell, shell_another_host: SSHShell): + provider = SshConnectionProvider() + assert len(provider.connections) == 0 + + shell.exec("echo 1") + assert len(provider.connections) == 1 + + shell_another_host.exec("echo 2") + assert len(provider.connections) == 2 + + shell.drop() + assert len(provider.connections) == 1 + + shell_another_host.drop() + assert len(provider.connections) == 0 From 98f9c78f099d9740a669c66cb65d5a7674e7d041 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 11 Oct 2023 18:21:40 +0300 Subject: [PATCH 147/363] [#97] Probe fix for filedescriptor issue Signed-off-by: Andrey Berezin --- src/frostfs_testlib/shell/local_shell.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/shell/local_shell.py b/src/frostfs_testlib/shell/local_shell.py index 56d19b2..fa07890 100644 --- a/src/frostfs_testlib/shell/local_shell.py +++ b/src/frostfs_testlib/shell/local_shell.py @@ -39,7 +39,7 @@ class LocalShell(Shell): log_file = tempfile.TemporaryFile() # File is reliable cross-platform way to capture output try: - command_process = pexpect.spawn(command, timeout=options.timeout) + command_process = pexpect.spawn(command, timeout=options.timeout, use_poll=True) except (pexpect.ExceptionPexpect, OSError) as exc: raise RuntimeError(f"Command: {command}") from exc From dd347dd8fbb9aa3fa777e358ee71cba56038ecae Mon Sep 17 00:00:00 2001 From: Dmitry Anurin Date: Wed, 11 Oct 2023 11:10:58 +0300 Subject: [PATCH 148/363] Added unit to logs getter Signed-off-by: Dmitry Anurin --- src/frostfs_testlib/hosting/docker_host.py | 1 + src/frostfs_testlib/hosting/interfaces.py | 1 + 2 files changed, 2 insertions(+) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index e2bc949..ffc2082 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -217,6 +217,7 @@ class DockerHost(Host): message_regex: str, since: Optional[datetime] = None, until: Optional[datetime] = None, + unit: Optional[str] = None, ) -> bool: client = self._get_docker_client() for service_config in self._config.services: diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index b4f67fb..48344cc 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -257,6 +257,7 @@ class Host(ABC): message_regex: str, since: Optional[datetime] = None, until: Optional[datetime] = None, + unit: Optional[str] = None, ) -> bool: """Checks logs on host for specified message regex. From 1c3bbe26f72a7e66199037e7b524ea27033a7571 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 17 Oct 2023 17:45:23 +0300 Subject: [PATCH 149/363] [#98] Small dependency cleanup Signed-off-by: Andrey Berezin --- pyproject.toml | 2 +- requirements.txt | 1 - src/frostfs_testlib/plugins/__init__.py | 8 +------- 3 files changed, 2 insertions(+), 9 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 778e2fc..bf65d15 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,7 +18,7 @@ keywords = ["frostfs", "test"] dependencies = [ "allure-python-commons>=2.13.2", "docker>=4.4.0", - "importlib_metadata>=5.0; python_version < '3.10'", + "pyyaml==6.0.1", "neo-mamba==1.0.0", "paramiko>=2.10.3", "pexpect>=4.8.0", diff --git a/requirements.txt b/requirements.txt index 1fdf844..32e604f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,5 @@ allure-python-commons==2.13.2 docker==4.4.0 -importlib_metadata==5.0.0 neo-mamba==1.0.0 paramiko==2.10.3 pexpect==4.8.0 diff --git a/src/frostfs_testlib/plugins/__init__.py b/src/frostfs_testlib/plugins/__init__.py index fcd7acc..6914b9b 100644 --- a/src/frostfs_testlib/plugins/__init__.py +++ b/src/frostfs_testlib/plugins/__init__.py @@ -1,12 +1,6 @@ -import sys +from importlib.metadata import entry_points from typing import Any -if sys.version_info < (3, 10): - # On Python prior 3.10 we need to use backport of entry points - from importlib_metadata import entry_points -else: - from importlib.metadata import entry_points - def load_plugin(plugin_group: str, name: str) -> Any: """Loads plugin using entry point specification. From cff5db5a6786963df34be2afc58e956d37db6a6f Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Wed, 18 Oct 2023 10:55:22 +0300 Subject: [PATCH 150/363] Change func parsing netmap Signed-off-by: Dmitriy Zayakin --- .../dataclasses/storage_object_info.py | 5 +- src/frostfs_testlib/utils/cli_utils.py | 62 ++++++++++++------- 2 files changed, 42 insertions(+), 25 deletions(-) diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index 21a820f..f7d51db 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -29,14 +29,15 @@ class StorageObjectInfo(ObjectRef): class NodeNetmapInfo: node_id: str = None node_status: str = None - node_data_ip: str = None + node_data_ips: list[str] = None cluster_name: str = None continent: str = None country: str = None country_code: str = None - external_address: str = None + external_address: list[str] = None location: str = None node: str = None + price: int = None sub_div: str = None sub_div_code: int = None un_locode: str = None diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index 5bd4695..0fa6cde 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -8,6 +8,7 @@ Helper functions to use with `frostfs-cli`, `neo-go` and other CLIs. import csv import json import logging +import re import subprocess import sys from contextlib import suppress @@ -138,32 +139,47 @@ def log_command_execution(cmd: str, output: Union[str, TypedDict]) -> None: def parse_netmap_output(output: str) -> list[NodeNetmapInfo]: """ - The cli command will return something like. - - Epoch: 240 - Node 1: 01234 ONLINE /ip4/10.10.10.10/tcp/8080 - Continent: Europe - Country: Russia - CountryCode: RU - ExternalAddr: /ip4/10.10.11.18/tcp/8080 - Location: Moskva - Node: 10.10.10.12 - Price: 5 - SubDiv: Moskva - SubDivCode: MOW - UN-LOCODE: RU MOW - role: alphabet - The code will parse each line and return each node as dataclass. """ - netmap_list = output.split("Node ")[1:] - dataclass_list = [] - for node in netmap_list: - node = node.replace("\t", "").split("\n") - node = *node[0].split(" ")[1:-1], *[row.split(": ")[-1] for row in node[1:-1]] - dataclass_list.append(NodeNetmapInfo(*node)) + netmap_nodes = output.split("Node ")[1:] + dataclasses_netmap = [] + result_netmap = {} - return dataclass_list + regexes = { + "node_id": r"\d+: (?P\w+)", + "node_data_ips": r"(?P/ip4/.+?)$", + "node_status": r"(?PONLINE|OFFLINE)", + "cluster_name": r"ClusterName: (?P\w+)", + "continent": r"Continent: (?P\w+)", + "country": r"Country: (?P\w+)", + "country_code": r"CountryCode: (?P\w+)", + "external_address": r"ExternalAddr: (?P/ip[4].+?)$", + "location": r"Location: (?P\w+.*)", + "node": r"Node: (?P\d+\.\d+\.\d+\.\d+)", + "price": r"Price: (?P\d+)", + "sub_div": r"SubDiv: (?P.*)", + "sub_div_code": r"SubDivCode: (?P\w+)", + "un_locode": r"UN-LOCODE: (?P\w+.*)", + "role": r"role: (?P\w+)", + } + + for node in netmap_nodes: + for key, regex in regexes.items(): + search_result = re.search(regex, node, flags=re.MULTILINE) + if key == "node_data_ips": + result_netmap[key] = search_result[key].strip().split(" ") + continue + if key == "external_address": + result_netmap[key] = search_result[key].strip().split(",") + continue + if search_result == None: + result_netmap[key] = None + continue + result_netmap[key] = search_result[key].strip() + + dataclasses_netmap.append(NodeNetmapInfo(**result_netmap)) + + return dataclasses_netmap def parse_cmd_table(output: str, delimiter="|") -> list[dict[str, str]]: From e1f3444e9252f9ead667841d83957b816134ba2e Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 20 Oct 2023 18:08:22 +0300 Subject: [PATCH 151/363] [#100] Add new method for logs gathering Signed-off-by: Andrey Berezin --- src/frostfs_testlib/hosting/docker_host.py | 24 +++++++++++++++++++ src/frostfs_testlib/hosting/interfaces.py | 28 ++++++++++++++++++---- 2 files changed, 48 insertions(+), 4 deletions(-) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index ffc2082..289c94d 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -212,6 +212,30 @@ class DockerHost(Host): with open(file_path, "wb") as file: file.write(logs) + def get_filtered_logs( + self, + filter_regex: str, + since: Optional[datetime] = None, + until: Optional[datetime] = None, + unit: Optional[str] = None, + ) -> str: + client = self._get_docker_client() + filtered_logs = "" + for service_config in self._config.services: + container_name = self._get_service_attributes(service_config.name).container_name + try: + filtered_logs = client.logs(container_name, since=since, until=until) + except HTTPError as exc: + logger.info(f"Got exception while dumping logs of '{container_name}': {exc}") + continue + + matches = re.findall(filter_regex, filtered_logs, re.IGNORECASE + re.MULTILINE) + found = list(matches) + if found: + filtered_logs += f"{container_name}:\n{os.linesep.join(found)}" + + return filtered_logs + def is_message_in_logs( self, message_regex: str, diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 48344cc..4c94ca0 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -115,7 +115,6 @@ class Host(ABC): service_name: Name of the service to restart. """ - @abstractmethod def get_data_directory(self, service_name: str) -> str: """ @@ -126,7 +125,6 @@ class Host(ABC): service_name: Name of storage node service. """ - @abstractmethod def wait_success_suspend_process(self, process_name: str) -> None: """Search for a service ID by its name and stop the process @@ -251,6 +249,27 @@ class Host(ABC): filter_regex: regex to filter output """ + @abstractmethod + def get_filtered_logs( + self, + filter_regex: str, + since: Optional[datetime] = None, + until: Optional[datetime] = None, + unit: Optional[str] = None, + ) -> str: + """Get logs from host filtered by regex. + + Args: + filter_regex: regex filter for logs. + since: If set, limits the time from which logs should be collected. Must be in UTC. + until: If set, limits the time until which logs should be collected. Must be in UTC. + unit: required unit. + + Returns: + Found entries as str if any found. + Empty string otherwise. + """ + @abstractmethod def is_message_in_logs( self, @@ -270,10 +289,11 @@ class Host(ABC): True if message found in logs in the given time frame. False otherwise. """ - @abstractmethod - def wait_for_service_to_be_in_state(self, systemd_service_name: str, expected_state: str, timeout: int) -> None: + def wait_for_service_to_be_in_state( + self, systemd_service_name: str, expected_state: str, timeout: int + ) -> None: """ Waites for service to be in specified state. From 0c3bb20af5c353887cec98f6c0dc203f2b3ed26c Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 24 Oct 2023 13:57:11 +0300 Subject: [PATCH 152/363] Add method to interfaces Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/storage/cluster.py | 35 +++++++++++++++++++ .../storage/dataclasses/frostfs_services.py | 8 +++-- .../dataclasses/storage_object_info.py | 11 ++++++ 3 files changed, 52 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index 0e24ebb..7a48a1d 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -17,6 +17,7 @@ from frostfs_testlib.storage.dataclasses.frostfs_services import ( StorageNode, ) from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass +from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.service_registry import ServiceRegistry reporter = get_reporter() @@ -121,6 +122,40 @@ class ClusterNode: config.attributes[ConfigAttributes.SERVICE_NAME] for config in self.host.config.services ] + def get_all_interfaces(self) -> dict[str, str]: + return self.host.config.interfaces + + def get_interface(self, interface: Interfaces) -> str: + return self.host.config.interfaces[interface.value] + + def get_data_interfaces(self) -> list[str]: + return [ + ip_address + for name_interface, ip_address in self.host.config.interfaces.items() + if "data" in name_interface + ] + + def get_data_interface(self, search_interface: str) -> list[str]: + return [ + self.host.config.interfaces[interface] + for interface in self.host.config.interfaces.keys() + if search_interface == interface + ] + + def get_internal_interfaces(self) -> list[str]: + return [ + ip_address + for name_interface, ip_address in self.host.config.interfaces.items() + if "internal" in name_interface + ] + + def get_internal_interface(self, search_internal: str) -> list[str]: + return [ + self.host.config.interfaces[interface] + for interface in self.host.config.interfaces.keys() + if search_internal == interface + ] + class Cluster: """ diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index ac2885b..9e6783c 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -110,6 +110,10 @@ class MorphChain(NodeBase): def label(self) -> str: return f"{self.name}: {self.get_endpoint()}" + def get_http_endpoint(self) -> str: + return self._get_attribute("http_endpoint") + + class StorageNode(NodeBase): """ Class represents storage node in a storage cluster @@ -149,10 +153,10 @@ class StorageNode(NodeBase): def get_data_directory(self) -> str: return self.host.get_data_directory(self.name) - + def get_http_hostname(self) -> str: return self._get_attribute(ConfigAttributes.HTTP_HOSTNAME) - + def get_s3_hostname(self) -> str: return self._get_attribute(ConfigAttributes.S3_HOSTNAME) diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index f7d51db..d670d8e 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -1,6 +1,9 @@ from dataclasses import dataclass +from enum import Enum from typing import Optional +from frostfs_testlib.testing.readable import HumanReadableEnum + @dataclass class ObjectRef: @@ -42,3 +45,11 @@ class NodeNetmapInfo: sub_div_code: int = None un_locode: str = None role: str = None + + +class Interfaces(HumanReadableEnum): + DATA_O: str = "data0" + DATA_1: str = "data1" + MGMT: str = "mgmt" + INTERNAL_0: str = "internal0" + INTERNAL_1: str = "internal1" From b1a3d740e99e7d7e9ec7fdcb939c3b0572ce989a Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 25 Oct 2023 15:57:38 +0300 Subject: [PATCH 153/363] [#102] Updates for failover Signed-off-by: Andrey Berezin --- pyproject.toml | 2 +- src/frostfs_testlib/storage/cluster.py | 36 +++++++++++++++++++ .../controllers/cluster_state_controller.py | 4 +-- .../storage/dataclasses/frostfs_services.py | 3 ++ .../storage/dataclasses/node_base.py | 14 +++++--- 5 files changed, 51 insertions(+), 8 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index bf65d15..3178bbe 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,7 +22,7 @@ dependencies = [ "neo-mamba==1.0.0", "paramiko>=2.10.3", "pexpect>=4.8.0", - "requests>=2.28.0", + "requests==2.28.1", "docstring_parser>=0.15", "testrail-api>=1.12.0", "pytest==7.1.2", diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index 7a48a1d..fa4ee0a 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -208,6 +208,42 @@ class Cluster: def morph_chain(self) -> list[MorphChain]: return self.services(MorphChain) + def nodes(self, services: list[ServiceClass]) -> list[ClusterNode]: + """ + Resolve which cluster nodes hosting the specified services. + + Args: + services: list of services to resolve hosting cluster nodes. + + Returns: + list of cluster nodes which host specified services. + """ + + cluster_nodes = set() + for service in services: + cluster_nodes.update( + [node for node in self.cluster_nodes if node.service(type(service)) == service] + ) + + return list(cluster_nodes) + + def node(self, service: ServiceClass) -> ClusterNode: + """ + Resolve single cluster node hosting the specified service. + + Args: + services: list of services to resolve hosting cluster nodes. + + Returns: + list of cluster nodes which host specified services. + """ + + nodes = [node for node in self.cluster_nodes if node.service(type(service)) == service] + if not len(nodes): + raise RuntimeError(f"Cannot find service {service} on any node") + + return nodes[0] + def services(self, service_type: type[ServiceClass]) -> list[ServiceClass]: """ Get all services in a cluster of specified type. diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index c6391f5..7304f5d 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -41,10 +41,10 @@ class ClusterStateController: provider = SshConnectionProvider() provider.drop(node.host_ip) + self.stopped_nodes.append(node) with reporter.step(f"Stop host {node.host.config.address}"): node.host.stop_host(mode=mode) wait_for_host_offline(self.shell, node.storage_node) - self.stopped_nodes.append(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Shutdown whole cluster") @@ -136,8 +136,8 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop storage service on {node}") def stop_storage_service(self, node: ClusterNode): - node.storage_node.stop_service() self.stopped_storage_nodes.append(node) + node.storage_node.stop_service() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop all {service_type} services") diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index 9e6783c..6413ded 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -145,6 +145,9 @@ class StorageNode(NodeBase): def get_shard_config_path(self) -> str: return self._get_attribute(ConfigAttributes.SHARD_CONFIG_PATH) + def get_shards_config(self) -> tuple[str, dict]: + return self.get_config(self.get_shard_config_path()) + def get_control_endpoint(self) -> str: return self._get_attribute(ConfigAttributes.CONTROL_ENDPOINT) diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 3b1964c..5352080 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -1,6 +1,6 @@ from abc import abstractmethod from dataclasses import dataclass -from typing import Optional, Tuple, TypedDict, TypeVar +from typing import Optional, TypedDict, TypeVar import yaml @@ -103,8 +103,10 @@ class NodeBase(HumanReadableABC): ConfigAttributes.WALLET_CONFIG, ) - def get_config(self) -> Tuple[str, dict]: - config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH) + def get_config(self, config_file_path: Optional[str] = None) -> tuple[str, dict]: + if config_file_path is None: + config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH) + shell = self.host.get_shell() result = shell.exec(f"cat {config_file_path}") @@ -113,8 +115,10 @@ class NodeBase(HumanReadableABC): config = yaml.safe_load(config_text) return config_file_path, config - def save_config(self, new_config: dict) -> None: - config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH) + def save_config(self, new_config: dict, config_file_path: Optional[str] = None) -> None: + if config_file_path is None: + config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH) + shell = self.host.get_shell() config_str = yaml.dump(new_config) From f4111a1374e8c28f3908de2f59a7bcb3ae1c5bc6 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Thu, 26 Oct 2023 13:34:42 +0300 Subject: [PATCH 154/363] [#103] Add host_status method to Host Signed-off-by: Andrey Berezin --- src/frostfs_testlib/hosting/docker_host.py | 22 +++++++++++++++---- src/frostfs_testlib/hosting/interfaces.py | 11 ++++++++++ src/frostfs_testlib/storage/constants.py | 1 + .../storage/dataclasses/node_base.py | 3 +++ 4 files changed, 33 insertions(+), 4 deletions(-) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 289c94d..d582418 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -11,7 +11,7 @@ import docker from requests import HTTPError from frostfs_testlib.hosting.config import ParsedAttributes -from frostfs_testlib.hosting.interfaces import DiskInfo, Host +from frostfs_testlib.hosting.interfaces import DiskInfo, Host, HostStatus from frostfs_testlib.shell import LocalShell, Shell, SSHShell from frostfs_testlib.shell.command_inspectors import SudoInspector @@ -87,6 +87,15 @@ class DockerHost(Host): for service_config in self._config.services: self.start_service(service_config.name) + def get_host_status(self) -> HostStatus: + # We emulate host status by checking all services. + for service_config in self._config.services: + state = self._get_container_state(service_config.name) + if state != "running": + return HostStatus.OFFLINE + + return HostStatus.ONLINE + def stop_host(self) -> None: # We emulate stopping machine by stopping all services # As an alternative we can probably try to stop docker service... @@ -293,11 +302,16 @@ class DockerHost(Host): # To speed things up, we break timeout in smaller iterations and check container state # several times. This way waiting stops as soon as container reaches the expected state for _ in range(iterations): - container = self._get_container_by_name(container_name) - logger.debug(f"Current container state\n:{json.dumps(container, indent=2)}") + state = self._get_container_state(container_name) - if container and container["State"] == expected_state: + if state == expected_state: return time.sleep(iteration_wait_time) raise RuntimeError(f"Container {container_name} is not in {expected_state} state.") + + def _get_container_state(self, container_name: str) -> str: + container = self._get_container_by_name(container_name) + logger.debug(f"Current container state\n:{json.dumps(container, indent=2)}") + + return container.get("State", None) diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 4c94ca0..4388791 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -4,6 +4,13 @@ from typing import Optional from frostfs_testlib.hosting.config import CLIConfig, HostConfig, ServiceConfig from frostfs_testlib.shell.interfaces import Shell +from frostfs_testlib.testing.readable import HumanReadableEnum + + +class HostStatus(HumanReadableEnum): + ONLINE = "Online" + OFFLINE = "Offline" + UNKNOWN = "Unknown" class DiskInfo(dict): @@ -79,6 +86,10 @@ class Host(ABC): def start_host(self) -> None: """Starts the host machine.""" + @abstractmethod + def get_host_status(self) -> HostStatus: + """Check host status.""" + @abstractmethod def stop_host(self, mode: str) -> None: """Stops the host machine. diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index dbaac5a..2284ce3 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -10,6 +10,7 @@ class ConfigAttributes: ENDPOINT_DATA_0 = "endpoint_data0" ENDPOINT_DATA_1 = "endpoint_data1" ENDPOINT_INTERNAL = "endpoint_internal0" + ENDPOINT_PROMETHEUS = "endpoint_prometheus" CONTROL_ENDPOINT = "control_endpoint" UN_LOCODE = "un_locode" HTTP_HOSTNAME = "http_hostname" diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 5352080..ecfe61c 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -64,6 +64,9 @@ class NodeBase(HumanReadableABC): def service_healthcheck(self) -> bool: """Service healthcheck.""" + def get_metrics_endpoint(self) -> str: + return self._get_attribute(ConfigAttributes.ENDPOINT_PROMETHEUS) + def stop_service(self): with reporter.step(f"Stop {self.name} service on {self.host.config.address}"): self.host.stop_service(self.name) From 8a360683aeb770c3174e695105ab8bc09abee7d3 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Thu, 26 Oct 2023 17:31:33 +0300 Subject: [PATCH 155/363] [#104] Add mask/unmask for services Signed-off-by: Andrey Berezin --- src/frostfs_testlib/hosting/docker_host.py | 8 +++ src/frostfs_testlib/hosting/interfaces.py | 20 ++++++ src/frostfs_testlib/steps/node_management.py | 63 +------------------ .../controllers/cluster_state_controller.py | 14 +++-- .../storage/dataclasses/node_base.py | 9 ++- 5 files changed, 45 insertions(+), 69 deletions(-) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index d582418..0e4ea11 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -126,6 +126,14 @@ class DockerHost(Host): timeout=service_attributes.stop_timeout, ) + def mask_service(self, service_name: str) -> None: + # Not required for Docker + return + + def unmask_service(self, service_name: str) -> None: + # Not required for Docker + return + def wait_success_suspend_process(self, service_name: str): raise NotImplementedError("Not supported for docker") diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 4388791..84b7911 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -118,6 +118,26 @@ class Host(ABC): service_name: Name of the service to stop. """ + @abstractmethod + def mask_service(self, service_name: str) -> None: + """Prevent the service from start by any activity by masking it. + + The service must be hosted on this host. + + Args: + service_name: Name of the service to mask. + """ + + @abstractmethod + def unmask_service(self, service_name: str) -> None: + """Allow the service to start by any activity by unmasking it. + + The service must be hosted on this host. + + Args: + service_name: Name of the service to unmask. + """ + @abstractmethod def restart_service(self, service_name: str) -> None: """Restarts the service with specified name and waits until it starts. diff --git a/src/frostfs_testlib/steps/node_management.py b/src/frostfs_testlib/steps/node_management.py index 4b46b62..9c0c6b0 100644 --- a/src/frostfs_testlib/steps/node_management.py +++ b/src/frostfs_testlib/steps/node_management.py @@ -15,8 +15,7 @@ from frostfs_testlib.resources.cli import ( ) from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.shell import Shell -from frostfs_testlib.steps.epoch import tick_epoch -from frostfs_testlib.steps.epoch import wait_for_epochs_align +from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align from frostfs_testlib.storage.cluster import Cluster, StorageNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate from frostfs_testlib.utils import datetime_utils @@ -41,44 +40,6 @@ class HealthStatus: return HealthStatus(network, health) -@reporter.step_deco("Stop random storage nodes") -def stop_random_storage_nodes(number: int, nodes: list[StorageNode]) -> list[StorageNode]: - """ - Shuts down the given number of randomly selected storage nodes. - Args: - number: the number of storage nodes to stop - nodes: the list of storage nodes to stop - Returns: - the list of nodes that were stopped - """ - nodes_to_stop = random.sample(nodes, number) - for node in nodes_to_stop: - node.stop_service() - return nodes_to_stop - - -@reporter.step_deco("Start storage node") -def start_storage_nodes(nodes: list[StorageNode]) -> None: - """ - The function starts specified storage nodes. - Args: - nodes: the list of nodes to start - """ - for node in nodes: - node.start_service() - - -@reporter.step_deco("Stop storage node") -def stop_storage_nodes(nodes: list[StorageNode]) -> None: - """ - The function starts specified storage nodes. - Args: - nodes: the list of nodes to start - """ - for node in nodes: - node.stop_service() - - @reporter.step_deco("Get Locode from random storage node") def get_locode_from_random_node(cluster: Cluster) -> str: node = random.choice(cluster.services(StorageNode)) @@ -329,25 +290,3 @@ def _run_control_command(node: StorageNode, command: str) -> None: f"--wallet {wallet_path} --config {wallet_config_path}" ) return result.stdout - - -@reporter.step_deco("Start services s3gate ") -def start_s3gates(cluster: Cluster) -> None: - """ - The function starts specified storage nodes. - Args: - cluster: cluster instance under test - """ - for gate in cluster.services(S3Gate): - gate.start_service() - - -@reporter.step_deco("Stop services s3gate ") -def stop_s3gates(cluster: Cluster) -> None: - """ - The function starts specified storage nodes. - Args: - cluster: cluster instance under test - """ - for gate in cluster.services(S3Gate): - gate.stop_service() diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 7304f5d..c18b8d8 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -135,9 +135,9 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop storage service on {node}") - def stop_storage_service(self, node: ClusterNode): + def stop_storage_service(self, node: ClusterNode, mask: bool = True): self.stopped_storage_nodes.append(node) - node.storage_node.stop_service() + node.storage_node.stop_service(mask) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop all {service_type} services") @@ -171,9 +171,11 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop {service_type} service on {node}") - def stop_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass]): + def stop_service_of_type( + self, node: ClusterNode, service_type: type[ServiceClass], mask: bool = True + ): service = node.service(service_type) - service.stop_service() + service.stop_service(mask) self.stopped_services.add(service) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @@ -207,8 +209,8 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop s3 gate on {node}") - def stop_s3_gate(self, node: ClusterNode): - node.s3_gate.stop_service() + def stop_s3_gate(self, node: ClusterNode, mask: bool = True): + node.s3_gate.stop_service(mask) self.stopped_s3_gates.append(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index ecfe61c..8708520 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -57,6 +57,9 @@ class NodeBase(HumanReadableABC): return self._process_name def start_service(self): + with reporter.step(f"Unmask {self.name} service on {self.host.config.address}"): + self.host.unmask_service(self.name) + with reporter.step(f"Start {self.name} service on {self.host.config.address}"): self.host.start_service(self.name) @@ -67,7 +70,11 @@ class NodeBase(HumanReadableABC): def get_metrics_endpoint(self) -> str: return self._get_attribute(ConfigAttributes.ENDPOINT_PROMETHEUS) - def stop_service(self): + def stop_service(self, mask: bool = True): + if mask: + with reporter.step(f"Mask {self.name} service on {self.host.config.address}"): + self.host.mask_service(self.name) + with reporter.step(f"Stop {self.name} service on {self.host.config.address}"): self.host.stop_service(self.name) From 3af4dfd977cb60744060a51c184b3b48400965ea Mon Sep 17 00:00:00 2001 From: "m.malygina" Date: Fri, 27 Oct 2023 10:56:27 +0300 Subject: [PATCH 156/363] multipart scenario Signed-off-by: m.malygina --- src/frostfs_testlib/load/load_config.py | 16 +++++++++++++--- src/frostfs_testlib/load/load_metrics.py | 1 + src/frostfs_testlib/load/load_report.py | 1 + 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 678fc38..a5d8535 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -19,6 +19,7 @@ class LoadScenario(Enum): gRPC_CAR = "grpc_car" S3 = "s3" S3_CAR = "s3_car" + S3_MULTIPART = "s3_multipart" HTTP = "http" VERIFY = "verify" LOCAL = "local" @@ -37,10 +38,11 @@ all_load_scenarios = [ LoadScenario.S3_CAR, LoadScenario.gRPC_CAR, LoadScenario.LOCAL, + LoadScenario.S3_MULTIPART ] all_scenarios = all_load_scenarios.copy() + [LoadScenario.VERIFY] -constant_vus_scenarios = [LoadScenario.gRPC, LoadScenario.S3, LoadScenario.HTTP, LoadScenario.LOCAL] +constant_vus_scenarios = [LoadScenario.gRPC, LoadScenario.S3, LoadScenario.HTTP, LoadScenario.LOCAL, LoadScenario.S3_MULTIPART] constant_arrival_rate_scenarios = [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR] grpc_preset_scenarios = [ @@ -49,7 +51,7 @@ grpc_preset_scenarios = [ LoadScenario.gRPC_CAR, LoadScenario.LOCAL, ] -s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR] +s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.S3_MULTIPART] @dataclass @@ -172,7 +174,7 @@ class LoadParams: k6_url: Optional[str] = None # No ssl verification flag no_verify_ssl: Optional[bool] = metadata_field( - [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.VERIFY, LoadScenario.HTTP], + [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.S3_MULTIPART, LoadScenario.VERIFY, LoadScenario.HTTP], "no-verify-ssl", "NO_VERIFY_SSL", False, @@ -258,6 +260,14 @@ class LoadParams: constant_arrival_rate_scenarios, None, "MAX_DELETERS", False, True ) + # Multipart + # Number of parts to upload in parallel + writers_multipart: Optional[int] = metadata_field( + [LoadScenario.S3_MULTIPART], None, "WRITERS_MULTIPART", False, True + ) + # part size must be greater than (5 MB) + write_object_part_size: Optional[int] = metadata_field([LoadScenario.S3_MULTIPART], None, "WRITE_OBJ_PART_SIZE", False) + # Period of time to apply the rate value. time_unit: Optional[str] = metadata_field( constant_arrival_rate_scenarios, None, "TIME_UNIT", False diff --git a/src/frostfs_testlib/load/load_metrics.py b/src/frostfs_testlib/load/load_metrics.py index 6c201ec..474a96b 100644 --- a/src/frostfs_testlib/load/load_metrics.py +++ b/src/frostfs_testlib/load/load_metrics.py @@ -196,6 +196,7 @@ def get_metrics_object(load_type: LoadScenario, summary: dict[str, Any]) -> Metr LoadScenario.HTTP: GrpcMetrics, LoadScenario.S3: S3Metrics, LoadScenario.S3_CAR: S3Metrics, + LoadScenario.S3_MULTIPART: S3Metrics, LoadScenario.VERIFY: VerifyMetrics, LoadScenario.LOCAL: LocalMetrics, } diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py index ec6d539..b648bc2 100644 --- a/src/frostfs_testlib/load/load_report.py +++ b/src/frostfs_testlib/load/load_report.py @@ -92,6 +92,7 @@ class LoadReport: model_map = { LoadScenario.gRPC: "closed model", LoadScenario.S3: "closed model", + LoadScenario.S3_MULTIPART: "closed model", LoadScenario.HTTP: "closed model", LoadScenario.gRPC_CAR: "open model", LoadScenario.S3_CAR: "open model", From f3c160f313c5c5926643ec5b81400b56705705ed Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 27 Oct 2023 14:10:01 +0300 Subject: [PATCH 157/363] [#107] Add passwd change protection for local runner --- src/frostfs_testlib/load/runners.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index 489ddcd..b65f129 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -285,6 +285,7 @@ class LocalRunner(RunnerBase): self.cluster_state_controller = cluster_state_controller self.file_keeper = file_keeper self.loaders = [NodeLoader(node) for node in nodes_under_load] + self.nodes_under_load = nodes_under_load @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @reporter.step_deco("Preparation steps") @@ -301,6 +302,7 @@ class LocalRunner(RunnerBase): with reporter.step("Allow storage user to login into system"): shell.exec(f"sudo chsh -s /bin/bash {STORAGE_USER_NAME}") + shell.exec("sudo chattr +i /etc/passwd") with reporter.step("Update limits.conf"): limits_path = "/etc/security/limits.conf" @@ -381,6 +383,13 @@ class LocalRunner(RunnerBase): for k6_instance in self.k6_instances: k6_instance.stop() + @reporter.step_deco("Restore passwd on {cluster_node}") + def restore_passwd_attr_on_node(cluster_node: ClusterNode): + shell = cluster_node.host.get_shell() + shell.exec("sudo chattr -i /etc/passwd") + + parallel(restore_passwd_attr_on_node, self.nodes_under_load) + self.cluster_state_controller.start_stopped_storage_services() self.cluster_state_controller.start_stopped_s3_gates() From 137fd2156145572b2af0fbbb4005f95314ac5d0a Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Fri, 27 Oct 2023 13:36:32 +0300 Subject: [PATCH 158/363] Add local shell and small fix Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/s3/aws_cli_client.py | 107 +++++++++--------- src/frostfs_testlib/steps/http/http_gate.py | 70 +++++++++--- .../controllers/cluster_state_controller.py | 9 +- src/frostfs_testlib/utils/cli_utils.py | 49 -------- 4 files changed, 113 insertions(+), 122 deletions(-) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 2e61679..dbece66 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -14,13 +14,15 @@ from frostfs_testlib.resources.common import ( S3_SYNC_WAIT_TIME, ) from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict +from frostfs_testlib.shell import CommandOptions +from frostfs_testlib.shell.local_shell import LocalShell # TODO: Refactor this code to use shell instead of _cmd_run -from frostfs_testlib.utils.cli_utils import _cmd_run, _configure_aws_cli +from frostfs_testlib.utils.cli_utils import _configure_aws_cli reporter = get_reporter() logger = logging.getLogger("NeoLogger") -LONG_TIMEOUT = 240 +command_options = CommandOptions(timeout=240) class AwsCliClient(S3ClientWrapper): @@ -34,10 +36,13 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("Configure S3 client (aws cli)") def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None: self.s3gate_endpoint = s3gate_endpoint + self.local_shell = LocalShell() try: _configure_aws_cli("aws configure", access_key_id, secret_access_key) - _cmd_run(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS}") - _cmd_run(f"aws configure set retry_mode {RETRY_MODE}") + self.local_shell.exec(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS}") + self.local_shell.exec( + f"aws configure set retry_mode {RETRY_MODE}", + ) except Exception as err: raise RuntimeError("Error while configuring AwsCliClient") from err @@ -79,7 +84,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --grant-read {grant_read}" if location_constraint: cmd += f" --create-bucket-configuration LocationConstraint={location_constraint}" - _cmd_run(cmd) + self.local_shell.exec(cmd) sleep(S3_SYNC_WAIT_TIME) return bucket @@ -87,20 +92,20 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("List buckets S3") def list_buckets(self) -> list[str]: cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {self.s3gate_endpoint}" - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout buckets_json = self._to_json(output) return [bucket["Name"] for bucket in buckets_json["Buckets"]] @reporter.step_deco("Delete bucket S3") def delete_bucket(self, bucket: str) -> None: cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint}" - _cmd_run(cmd, LONG_TIMEOUT) + self.local_shell.exec(cmd, command_options) sleep(S3_SYNC_WAIT_TIME) @reporter.step_deco("Head bucket S3") def head_bucket(self, bucket: str) -> None: cmd = f"aws {self.common_flags} s3api head-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint}" - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Put bucket versioning status") def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: @@ -109,7 +114,7 @@ class AwsCliClient(S3ClientWrapper): f"--versioning-configuration Status={status.value} " f"--endpoint {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Get bucket versioning status") def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: @@ -117,7 +122,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-bucket-versioning --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Status") @@ -130,7 +135,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api put-bucket-tagging --bucket {bucket} " f"--tagging '{json.dumps(tags_json)}' --endpoint {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Get bucket tagging") def get_bucket_tagging(self, bucket: str) -> list: @@ -138,7 +143,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("TagSet") @@ -148,7 +153,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Grants") @@ -158,7 +163,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("LocationConstraint") @@ -168,7 +173,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) obj_list = [obj["Key"] for obj in response.get("Contents", [])] @@ -182,7 +187,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) obj_list = [obj["Key"] for obj in response.get("Contents", [])] @@ -196,7 +201,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response if full_output else response.get("Versions", []) @@ -206,7 +211,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response if full_output else response.get("DeleteMarkers", []) @@ -245,7 +250,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --tagging-directive {tagging_directive}" if tagging: cmd += f" --tagging {tagging}" - _cmd_run(cmd, LONG_TIMEOUT) + self.local_shell.exec(cmd, command_options) return key @reporter.step_deco("Put object S3") @@ -288,7 +293,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --grant-full-control '{grant_full_control}'" if grant_read: cmd += f" --grant-read {grant_read}" - output = _cmd_run(cmd, LONG_TIMEOUT) + output = self.local_shell.exec(cmd, command_options).stdout response = self._to_json(output) return response.get("VersionId") @@ -299,7 +304,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api head-object --bucket {bucket} --key {key} " f"{version} --endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response @@ -320,7 +325,7 @@ class AwsCliClient(S3ClientWrapper): ) if object_range: cmd += f" --range bytes={object_range[0]}-{object_range[1]}" - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response if full_output else file_path @@ -331,7 +336,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-object-acl --bucket {bucket} --key {key} " f"{version} --endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Grants") @@ -354,7 +359,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --grant-write {grant_write}" if grant_read: cmd += f" --grant-read {grant_read}" - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Grants") @@ -376,7 +381,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --grant-write {grant_write}" if grant_read: cmd += f" --grant-read {grant_read}" - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Delete objects S3") def delete_objects(self, bucket: str, keys: list[str]) -> dict: @@ -390,7 +395,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api delete-objects --bucket {bucket} " f"--delete file://{file_path} --endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd, LONG_TIMEOUT) + output = self.local_shell.exec(cmd, command_options).stdout response = self._to_json(output) sleep(S3_SYNC_WAIT_TIME) return response @@ -402,7 +407,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api delete-object --bucket {bucket} " f"--key {key} {version} --endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd, LONG_TIMEOUT) + output = self.local_shell.exec(cmd, command_options).stdout sleep(S3_SYNC_WAIT_TIME) return self._to_json(output) @@ -429,7 +434,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api delete-objects --bucket {bucket} " f"--delete file://{file_path} --endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd, LONG_TIMEOUT) + output = self.local_shell.exec(cmd, command_options).stdout sleep(S3_SYNC_WAIT_TIME) return self._to_json(output) @@ -462,7 +467,7 @@ class AwsCliClient(S3ClientWrapper): f"--key {key} {version} {parts} {part_number_str} --object-attributes {attrs} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) for attr in attributes: @@ -479,7 +484,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Policy") @@ -496,7 +501,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api put-bucket-policy --bucket {bucket} " f"--policy {dumped_policy} --endpoint {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Get bucket cors") def get_bucket_cors(self, bucket: str) -> dict: @@ -504,7 +509,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("CORSRules") @@ -514,7 +519,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api put-bucket-cors --bucket {bucket} " f"--cors-configuration '{json.dumps(cors_configuration)}' --endpoint {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Delete bucket cors") def delete_bucket_cors(self, bucket: str) -> None: @@ -522,7 +527,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Delete bucket tagging") def delete_bucket_tagging(self, bucket: str) -> None: @@ -530,7 +535,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Put object retention") def put_object_retention( @@ -548,7 +553,7 @@ class AwsCliClient(S3ClientWrapper): ) if bypass_governance_retention is not None: cmd += " --bypass-governance-retention" - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Put object legal hold") def put_object_legal_hold( @@ -564,7 +569,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api put-object-legal-hold --bucket {bucket} --key {key} " f"{version} --legal-hold '{legal_hold}' --endpoint {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Put object tagging") def put_object_tagging(self, bucket: str, key: str, tags: list) -> None: @@ -574,7 +579,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api put-object-tagging --bucket {bucket} --key {key} " f"--tagging '{json.dumps(tagging)}' --endpoint {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Get object tagging") def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: @@ -583,7 +588,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-object-tagging --bucket {bucket} --key {key} " f"{version} --endpoint {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("TagSet") @@ -593,7 +598,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} " f"--key {key} --endpoint {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Sync directory S3") def sync( @@ -613,7 +618,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" {key}={value}" if acl: cmd += f" --acl {acl}" - output = _cmd_run(cmd, LONG_TIMEOUT) + output = self.local_shell.exec(cmd, command_options).stdout return self._to_json(output) @reporter.step_deco("CP directory S3") @@ -634,7 +639,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" {key}={value}" if acl: cmd += f" --acl {acl}" - output = _cmd_run(cmd, LONG_TIMEOUT) + output = self.local_shell.exec(cmd, command_options).stdout return self._to_json(output) @reporter.step_deco("Create multipart upload S3") @@ -643,7 +648,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api create-multipart-upload --bucket {bucket} " f"--key {key} --endpoint-url {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) assert response.get("UploadId"), f"Expected UploadId in response:\n{response}" @@ -656,7 +661,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api list-multipart-uploads --bucket {bucket} " f"--endpoint-url {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Uploads") @@ -666,7 +671,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api abort-multipart-upload --bucket {bucket} " f"--key {key} --upload-id {upload_id} --endpoint-url {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Upload part S3") def upload_part( @@ -677,7 +682,7 @@ class AwsCliClient(S3ClientWrapper): f"--upload-id {upload_id} --part-number {part_num} --body {filepath} " f"--endpoint-url {self.s3gate_endpoint}" ) - output = _cmd_run(cmd, LONG_TIMEOUT) + output = self.local_shell.exec(cmd, command_options).stdout response = self._to_json(output) assert response.get("ETag"), f"Expected ETag in response:\n{response}" return response["ETag"] @@ -691,7 +696,7 @@ class AwsCliClient(S3ClientWrapper): f"--upload-id {upload_id} --part-number {part_num} --copy-source {copy_source} " f"--endpoint-url {self.s3gate_endpoint}" ) - output = _cmd_run(cmd, LONG_TIMEOUT) + output = self.local_shell.exec(cmd, command_options).stdout response = self._to_json(output) assert response.get("CopyPartResult", []).get( "ETag" @@ -705,7 +710,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api list-parts --bucket {bucket} --key {key} " f"--upload-id {upload_id} --endpoint-url {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) assert response.get("Parts"), f"Expected Parts in response:\n{response}" @@ -727,7 +732,7 @@ class AwsCliClient(S3ClientWrapper): f"--key {key} --upload-id {upload_id} --multipart-upload file://{file_path} " f"--endpoint-url {self.s3gate_endpoint}" ) - _cmd_run(cmd) + self.local_shell.exec(cmd) @reporter.step_deco("Put object lock configuration") def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: @@ -735,7 +740,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api put-object-lock-configuration --bucket {bucket} " f"--object-lock-configuration '{json.dumps(configuration)}' --endpoint-url {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout return self._to_json(output) @reporter.step_deco("Get object lock configuration") @@ -744,7 +749,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-object-lock-configuration --bucket {bucket} " f"--endpoint-url {self.s3gate_endpoint}" ) - output = _cmd_run(cmd) + output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("ObjectLockConfiguration") diff --git a/src/frostfs_testlib/steps/http/http_gate.py b/src/frostfs_testlib/steps/http/http_gate.py index 8080689..2b70d6c 100644 --- a/src/frostfs_testlib/steps/http/http_gate.py +++ b/src/frostfs_testlib/steps/http/http_gate.py @@ -12,12 +12,13 @@ import requests from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.common import SIMPLE_OBJECT_SIZE -from frostfs_testlib.s3.aws_cli_client import LONG_TIMEOUT +from frostfs_testlib.s3.aws_cli_client import command_options from frostfs_testlib.shell import Shell +from frostfs_testlib.shell.local_shell import LocalShell from frostfs_testlib.steps.cli.object import get_object from frostfs_testlib.steps.storage_policy import get_nodes_without_object from frostfs_testlib.storage.cluster import StorageNode -from frostfs_testlib.utils.cli_utils import _cmd_run +from frostfs_testlib.testing.test_control import retry from frostfs_testlib.utils.file_utils import get_file_hash reporter = get_reporter() @@ -25,6 +26,7 @@ reporter = get_reporter() logger = logging.getLogger("NeoLogger") ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/") +local_shell = LocalShell() @reporter.step_deco("Get via HTTP Gate") @@ -51,7 +53,9 @@ def get_via_http_gate( else: request = f"{endpoint}{request_path}" - resp = requests.get(request, headers={"Host": http_hostname}, stream=True, timeout=timeout, verify=False) + resp = requests.get( + request, headers={"Host": http_hostname}, stream=True, timeout=timeout, verify=False + ) if not resp.ok: raise Exception( @@ -72,7 +76,9 @@ def get_via_http_gate( @reporter.step_deco("Get via Zip HTTP Gate") -def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str, http_hostname: str, timeout: Optional[int] = 300): +def get_via_zip_http_gate( + cid: str, prefix: str, endpoint: str, http_hostname: str, timeout: Optional[int] = 300 +): """ This function gets given object from HTTP gate cid: container id to get object from @@ -130,7 +136,9 @@ def get_via_http_gate_by_attribute( else: request = f"{endpoint}{request_path}" - resp = requests.get(request, stream=True, timeout=timeout, verify=False, headers={"Host": http_hostname}) + resp = requests.get( + request, stream=True, timeout=timeout, verify=False, headers={"Host": http_hostname} + ) if not resp.ok: raise Exception( @@ -165,7 +173,9 @@ def upload_via_http_gate( request = f"{endpoint}/upload/{cid}" files = {"upload_file": open(path, "rb")} body = {"filename": path} - resp = requests.post(request, files=files, data=body, headers=headers, timeout=timeout, verify=False) + resp = requests.post( + request, files=files, data=body, headers=headers, timeout=timeout, verify=False + ) if not resp.ok: raise Exception( @@ -223,16 +233,16 @@ def upload_via_http_gate_curl( large_object = is_object_large(filepath) if large_object: # pre-clean - _cmd_run("rm pipe -f") + local_shell.exec("rm pipe -f") files = f"file=@pipe;filename={os.path.basename(filepath)}" cmd = f"mkfifo pipe;cat {filepath} > pipe & curl -k --no-buffer -F '{files}' {attributes} {request}" - output = _cmd_run(cmd, LONG_TIMEOUT) + output = local_shell.exec(cmd, command_options) # clean up pipe - _cmd_run("rm pipe") + local_shell.exec("rm pipe") else: files = f"file=@{filepath};filename={os.path.basename(filepath)}" cmd = f"curl -k -F '{files}' {attributes} {request}" - output = _cmd_run(cmd) + output = local_shell.exec(cmd) if error_pattern: match = error_pattern.casefold() in str(output).casefold() @@ -245,6 +255,7 @@ def upload_via_http_gate_curl( return oid_re.group(1) +@retry(max_attempts=3, sleep_interval=1) @reporter.step_deco("Get via HTTP Gate using Curl") def get_via_http_curl(cid: str, oid: str, endpoint: str, http_hostname: str) -> str: """ @@ -257,8 +268,8 @@ def get_via_http_curl(cid: str, oid: str, endpoint: str, http_hostname: str) -> request = f"{endpoint}/get/{cid}/{oid}" file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}") - cmd = f"curl -k -H \"Host: {http_hostname}\" {request} > {file_path}" - _cmd_run(cmd) + cmd = f'curl -k -H "Host: {http_hostname}" {request} > {file_path}' + local_shell.exec(cmd) return file_path @@ -271,7 +282,11 @@ def _attach_allure_step(request: str, status_code: int, req_type="GET"): @reporter.step_deco("Try to get object and expect error") def try_to_get_object_and_expect_error( - cid: str, oid: str, error_pattern: str, endpoint: str, http_hostname: str, + cid: str, + oid: str, + error_pattern: str, + endpoint: str, + http_hostname: str, ) -> None: try: get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname) @@ -283,9 +298,16 @@ def try_to_get_object_and_expect_error( @reporter.step_deco("Verify object can be get using HTTP header attribute") def get_object_by_attr_and_verify_hashes( - oid: str, file_name: str, cid: str, attrs: dict, endpoint: str, http_hostname: str, + oid: str, + file_name: str, + cid: str, + attrs: dict, + endpoint: str, + http_hostname: str, ) -> None: - got_file_path_http = get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname) + got_file_path_http = get_via_http_gate( + cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname + ) got_file_path_http_attr = get_via_http_gate_by_attribute( cid=cid, attribute=attrs, endpoint=endpoint, http_hostname=http_hostname ) @@ -326,7 +348,9 @@ def verify_object_hash( shell=shell, endpoint=random_node.get_rpc_endpoint(), ) - got_file_path_http = object_getter(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname) + got_file_path_http = object_getter( + cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname + ) assert_hashes_are_equal(file_name, got_file_path, got_file_path_http) @@ -369,10 +393,20 @@ def try_to_get_object_via_passed_request_and_expect_error( ) -> None: try: if attrs is None: - get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, request_path=http_request_path, http_hostname=http_hostname) + get_via_http_gate( + cid=cid, + oid=oid, + endpoint=endpoint, + request_path=http_request_path, + http_hostname=http_hostname, + ) else: get_via_http_gate_by_attribute( - cid=cid, attribute=attrs, endpoint=endpoint, request_path=http_request_path, http_hostname=http_hostname + cid=cid, + attribute=attrs, + endpoint=endpoint, + request_path=http_request_path, + http_hostname=http_hostname, ) raise AssertionError(f"Expected error on getting object with cid: {cid}") except Exception as err: diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index c18b8d8..deb8c7f 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -241,10 +241,11 @@ class ClusterStateController: @reporter.step_deco("Resume {process_name} service in {node}") def resume_service(self, process_name: str, node: ClusterNode): node.host.wait_success_resume_process(process_name) - if self.suspended_services.get(process_name): - self.suspended_services[process_name].append(node) - else: - self.suspended_services[process_name] = [node] + if ( + self.suspended_services.get(process_name) + and node in self.suspended_services[process_name] + ): + self.suspended_services[process_name].remove(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start suspend processes services") diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index 0fa6cde..e1dfcd1 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -28,55 +28,6 @@ COLOR_GREEN = "\033[92m" COLOR_OFF = "\033[0m" -def _cmd_run(cmd: str, timeout: int = 90) -> str: - """ - Runs given shell command , in case of success returns its stdout, - in case of failure returns error message. - """ - compl_proc = None - start_time = datetime.now() - try: - logger.info(f"{COLOR_GREEN}Executing command: {cmd}{COLOR_OFF}") - start_time = datetime.utcnow() - compl_proc = subprocess.run( - cmd, - check=True, - universal_newlines=True, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - timeout=timeout, - shell=True, - ) - output = compl_proc.stdout - return_code = compl_proc.returncode - end_time = datetime.utcnow() - logger.info(f"{COLOR_GREEN}Output: {output}{COLOR_OFF}") - _attach_allure_log(cmd, output, return_code, start_time, end_time) - - return output - except subprocess.CalledProcessError as exc: - logger.info( - f"Command: {cmd}\n" f"Error:\nreturn code: {exc.returncode} " f"\nOutput: {exc.output}" - ) - end_time = datetime.now() - return_code, cmd_output = subprocess.getstatusoutput(cmd) - _attach_allure_log(cmd, cmd_output, return_code, start_time, end_time) - - raise RuntimeError( - f"Command: {cmd}\n" f"Error:\nreturn code: {exc.returncode}\n" f"Output: {exc.output}" - ) from exc - except OSError as exc: - raise RuntimeError(f"Command: {cmd}\n" f"Output: {exc.strerror}") from exc - except Exception as exc: - return_code, cmd_output = subprocess.getstatusoutput(cmd) - end_time = datetime.now() - _attach_allure_log(cmd, cmd_output, return_code, start_time, end_time) - logger.info( - f"Command: {cmd}\n" f"Error:\nreturn code: {return_code}\n" f"Output: {cmd_output}" - ) - raise - - def _run_with_passwd(cmd: str) -> str: child = pexpect.spawn(cmd) child.delaybeforesend = 1 From 8ee2985c899dd8dd1a72d157e6ac7604a94390b3 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 30 Oct 2023 14:37:23 +0300 Subject: [PATCH 159/363] [#108] Update user with couple retries Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/runners.py | 89 +++++++++++++++++------------ 1 file changed, 54 insertions(+), 35 deletions(-) diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index b65f129..635247e 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -3,7 +3,6 @@ import itertools import math import re import time -from concurrent.futures import ThreadPoolExecutor from dataclasses import fields from typing import Optional from urllib.parse import urlparse @@ -24,12 +23,14 @@ from frostfs_testlib.resources.load_params import ( LOAD_NODE_SSH_USER, LOAD_NODES, ) +from frostfs_testlib.shell.command_inspectors import SuInspector from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing import parallel, run_optionally +from frostfs_testlib.testing.test_control import retry from frostfs_testlib.utils import FileKeeper, datetime_utils reporter = get_reporter() @@ -296,40 +297,53 @@ class LocalRunner(RunnerBase): nodes_under_load: list[ClusterNode], k6_dir: str, ): - @reporter.step_deco("Prepare node {cluster_node}") - def prepare_node(cluster_node: ClusterNode): - shell = cluster_node.host.get_shell() + parallel(self.prepare_node, nodes_under_load, k6_dir, load_params) - with reporter.step("Allow storage user to login into system"): - shell.exec(f"sudo chsh -s /bin/bash {STORAGE_USER_NAME}") - shell.exec("sudo chattr +i /etc/passwd") + @retry(3, 5, expected_result=True) + def allow_user_to_login_in_system(self, cluster_node: ClusterNode): + shell = cluster_node.host.get_shell() - with reporter.step("Update limits.conf"): - limits_path = "/etc/security/limits.conf" - self.file_keeper.add(cluster_node.storage_node, limits_path) - content = f"{STORAGE_USER_NAME} hard nofile 65536\n{STORAGE_USER_NAME} soft nofile 65536\n" - shell.exec(f"echo '{content}' | sudo tee {limits_path}") + result = None + try: + shell.exec(f"sudo chsh -s /bin/bash {STORAGE_USER_NAME}") + self.lock_passwd_on_node(cluster_node) + options = CommandOptions(check=False, extra_inspectors=[SuInspector(STORAGE_USER_NAME)]) + result = shell.exec("whoami", options) + finally: + if not result or result.return_code: + self.restore_passwd_on_node(cluster_node) + return False - with reporter.step("Download K6"): - shell.exec(f"sudo rm -rf {k6_dir};sudo mkdir {k6_dir}") - shell.exec(f"sudo curl -so {k6_dir}/k6.tar.gz {load_params.k6_url}") - shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz -C {k6_dir}") - shell.exec(f"sudo chmod -R 777 {k6_dir}") + return True - with reporter.step("Create empty_passwd"): - self.wallet = WalletInfo( - f"{k6_dir}/scenarios/files/wallet.json", "", "/tmp/empty_passwd.yml" - ) - content = yaml.dump({"password": ""}) - shell.exec(f'echo "{content}" | sudo tee {self.wallet.config_path}') - shell.exec(f"sudo chmod -R 777 {self.wallet.config_path}") + @reporter.step_deco("Prepare node {cluster_node}") + def prepare_node(self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams): + shell = cluster_node.host.get_shell() - with ThreadPoolExecutor(max_workers=len(nodes_under_load)) as executor: - result = executor.map(prepare_node, nodes_under_load) + with reporter.step("Allow storage user to login into system"): + self.allow_user_to_login_in_system(cluster_node) - # Check for exceptions - for _ in result: - pass + with reporter.step("Update limits.conf"): + limits_path = "/etc/security/limits.conf" + self.file_keeper.add(cluster_node.storage_node, limits_path) + content = ( + f"{STORAGE_USER_NAME} hard nofile 65536\n{STORAGE_USER_NAME} soft nofile 65536\n" + ) + shell.exec(f"echo '{content}' | sudo tee {limits_path}") + + with reporter.step("Download K6"): + shell.exec(f"sudo rm -rf {k6_dir};sudo mkdir {k6_dir}") + shell.exec(f"sudo curl -so {k6_dir}/k6.tar.gz {load_params.k6_url}") + shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz -C {k6_dir}") + shell.exec(f"sudo chmod -R 777 {k6_dir}") + + with reporter.step("Create empty_passwd"): + self.wallet = WalletInfo( + f"{k6_dir}/scenarios/files/wallet.json", "", "/tmp/empty_passwd.yml" + ) + content = yaml.dump({"password": ""}) + shell.exec(f'echo "{content}" | sudo tee {self.wallet.config_path}') + shell.exec(f"sudo chmod -R 777 {self.wallet.config_path}") @reporter.step_deco("Init k6 instances") def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): @@ -379,16 +393,21 @@ class LocalRunner(RunnerBase): ): time.sleep(wait_after_start_time) + @reporter.step_deco("Restore passwd on {cluster_node}") + def restore_passwd_on_node(self, cluster_node: ClusterNode): + shell = cluster_node.host.get_shell() + shell.exec("sudo chattr -i /etc/passwd") + + @reporter.step_deco("Lock passwd on {cluster_node}") + def lock_passwd_on_node(self, cluster_node: ClusterNode): + shell = cluster_node.host.get_shell() + shell.exec("sudo chattr +i /etc/passwd") + def stop(self): for k6_instance in self.k6_instances: k6_instance.stop() - @reporter.step_deco("Restore passwd on {cluster_node}") - def restore_passwd_attr_on_node(cluster_node: ClusterNode): - shell = cluster_node.host.get_shell() - shell.exec("sudo chattr -i /etc/passwd") - - parallel(restore_passwd_attr_on_node, self.nodes_under_load) + parallel(self.restore_passwd_on_node, self.nodes_under_load) self.cluster_state_controller.start_stopped_storage_services() self.cluster_state_controller.start_stopped_s3_gates() From e970fe2788949673913d16a73fbbb738829a9515 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 31 Oct 2023 14:17:54 +0300 Subject: [PATCH 160/363] [#109] Update CSC with healthchecks --- .../healthcheck/basic_healthcheck.py | 35 +++- src/frostfs_testlib/healthcheck/interfaces.py | 8 + src/frostfs_testlib/load/runners.py | 3 +- .../controllers/cluster_state_controller.py | 180 +++++++++++------- .../storage/dataclasses/node_base.py | 7 + src/frostfs_testlib/utils/__init__.py | 3 - 6 files changed, 158 insertions(+), 78 deletions(-) diff --git a/src/frostfs_testlib/healthcheck/basic_healthcheck.py b/src/frostfs_testlib/healthcheck/basic_healthcheck.py index 3f4bc79..9c1d151 100644 --- a/src/frostfs_testlib/healthcheck/basic_healthcheck.py +++ b/src/frostfs_testlib/healthcheck/basic_healthcheck.py @@ -1,5 +1,7 @@ +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli from frostfs_testlib.healthcheck.interfaces import Healthcheck from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC from frostfs_testlib.steps.node_management import storage_node_healthcheck from frostfs_testlib.storage.cluster import ClusterNode @@ -9,6 +11,33 @@ reporter = get_reporter() class BasicHealthcheck(Healthcheck): @reporter.step_deco("Perform healthcheck for {cluster_node}") def perform(self, cluster_node: ClusterNode): - health_check = storage_node_healthcheck(cluster_node.storage_node) - if health_check.health_status != "READY" or health_check.network_status != "ONLINE": - raise AssertionError("Node {cluster_node} is not healthy") + result = self.storage_healthcheck(cluster_node) + if result: + raise AssertionError(result) + + @reporter.step_deco("Tree healthcheck on {cluster_node}") + def tree_healthcheck(self, cluster_node: ClusterNode) -> str | None: + host = cluster_node.host + service_config = host.get_service_config(cluster_node.storage_node.name) + wallet_path = service_config.attributes["wallet_path"] + wallet_password = service_config.attributes["wallet_password"] + + shell = host.get_shell() + wallet_config_path = f"/tmp/{cluster_node.storage_node.name}-config.yaml" + wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' + shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") + + remote_cli = FrostfsCli( + shell, + host.get_cli_config(FROSTFS_CLI_EXEC).exec_path, + config_file=wallet_config_path, + ) + result = remote_cli.tree.healthcheck(rpc_endpoint="127.0.0.1:8080") + if result.return_code != 0: + return f"Error during tree healthcheck (rc={result.return_code}): {result.stdout}. \n Stderr: {result.stderr}" + + @reporter.step_deco("Storage healthcheck on {cluster_node}") + def storage_healthcheck(self, cluster_node: ClusterNode) -> str | None: + result = storage_node_healthcheck(cluster_node.storage_node) + if result.health_status != "READY" or result.network_status != "ONLINE": + return f"Node {cluster_node} is not healthy. Health={result.health_status}. Network={result.network_status}" diff --git a/src/frostfs_testlib/healthcheck/interfaces.py b/src/frostfs_testlib/healthcheck/interfaces.py index 0c77957..a036a82 100644 --- a/src/frostfs_testlib/healthcheck/interfaces.py +++ b/src/frostfs_testlib/healthcheck/interfaces.py @@ -7,3 +7,11 @@ class Healthcheck(ABC): @abstractmethod def perform(self, cluster_node: ClusterNode): """Perform healthcheck on the target cluster node""" + + @abstractmethod + def tree_healthcheck(self, cluster_node: ClusterNode): + """Check tree sync status on target cluster node""" + + @abstractmethod + def storage_healthcheck(self, cluster_node: ClusterNode): + """Perform storage node healthcheck on target cluster node""" diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index 635247e..4c07100 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -31,7 +31,8 @@ from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, Storage from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing import parallel, run_optionally from frostfs_testlib.testing.test_control import retry -from frostfs_testlib.utils import FileKeeper, datetime_utils +from frostfs_testlib.utils import datetime_utils +from frostfs_testlib.utils.file_keeper import FileKeeper reporter = get_reporter() diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index deb8c7f..2cf1451 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -1,15 +1,15 @@ -import copy import time import frostfs_testlib.resources.optionals as optionals +from frostfs_testlib.healthcheck.interfaces import Healthcheck from frostfs_testlib.reporter import get_reporter from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider from frostfs_testlib.steps.network import IfUpDownHelper, IpTablesHelper -from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode +from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass from frostfs_testlib.testing import parallel -from frostfs_testlib.testing.test_control import run_optionally +from frostfs_testlib.testing.test_control import run_optionally, wait_for_success from frostfs_testlib.utils.failover_utils import ( wait_all_storage_nodes_returned, wait_for_host_offline, @@ -22,18 +22,36 @@ if_up_down_helper = IfUpDownHelper() class ClusterStateController: - def __init__(self, shell: Shell, cluster: Cluster) -> None: + def __init__(self, shell: Shell, cluster: Cluster, healthcheck: Healthcheck) -> None: self.stopped_nodes: list[ClusterNode] = [] self.detached_disks: dict[str, DiskController] = {} - self.stopped_storage_nodes: list[ClusterNode] = [] - self.stopped_s3_gates: list[ClusterNode] = [] self.dropped_traffic: list[ClusterNode] = [] self.stopped_services: set[NodeBase] = set() self.cluster = cluster + self.healthcheck = healthcheck self.shell = shell self.suspended_services: dict[str, list[ClusterNode]] = {} self.nodes_with_modified_interface: list[ClusterNode] = [] + def _get_stopped_by_node(self, node: ClusterNode) -> set[NodeBase]: + stopped_by_node = [svc for svc in self.stopped_services if svc.host == node.host] + return set(stopped_by_node) + + def _get_stopped_by_type(self, service_type: type[ServiceClass]) -> set[ServiceClass]: + stopped_by_type = [svc for svc in self.stopped_services if isinstance(svc, service_type)] + return set(stopped_by_type) + + def _from_stopped_nodes(self, service_type: type[ServiceClass]) -> set[ServiceClass]: + stopped_on_nodes = set([node.service(service_type) for node in self.stopped_nodes]) + return set(stopped_on_nodes) + + def _get_online(self, service_type: type[ServiceClass]) -> set[ServiceClass]: + stopped_svc = self._get_stopped_by_type(service_type).union( + self._from_stopped_nodes(service_type) + ) + online_svc = set(self.cluster.services(service_type)) - stopped_svc + return online_svc + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop host of node {node}") def stop_node_host(self, node: ClusterNode, mode: str): @@ -65,26 +83,6 @@ class ClusterStateController: for node in nodes: wait_for_host_offline(self.shell, node.storage_node) - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop all storage services on cluster") - def stop_all_storage_services(self, reversed_order: bool = False): - nodes = ( - reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes - ) - - for node in nodes: - self.stop_storage_service(node) - - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop all S3 gates on cluster") - def stop_all_s3_gates(self, reversed_order: bool = False): - nodes = ( - reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes - ) - - for node in nodes: - self.stop_s3_gate(node) - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start host of node {node}") def start_node_host(self, node: ClusterNode): @@ -104,13 +102,10 @@ class ClusterStateController: for node in nodes: with reporter.step(f"Start host {node.host.config.address}"): node.host.start_host() - if node in self.stopped_storage_nodes: - self.stopped_storage_nodes.remove(node) + self.stopped_services.difference_update(self._get_stopped_by_node(node)) - if node in self.stopped_s3_gates: - self.stopped_s3_gates.remove(node) self.stopped_nodes = [] - wait_all_storage_nodes_returned(self.shell, self.cluster) + self.wait_after_storage_startup() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Detach disk {device} at {mountpoint} on node {node}") @@ -133,42 +128,57 @@ class ClusterStateController: disk_controller.attach() self.detached_disks = {} - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop storage service on {node}") - def stop_storage_service(self, node: ClusterNode, mask: bool = True): - self.stopped_storage_nodes.append(node) - node.storage_node.stop_service(mask) - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop all {service_type} services") - def stop_services_of_type(self, service_type: type[ServiceClass]): + def stop_services_of_type(self, service_type: type[ServiceClass], mask: bool = True): services = self.cluster.services(service_type) self.stopped_services.update(services) - parallel([service.stop_service for service in services]) + parallel([service.stop_service for service in services], mask=mask) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start all {service_type} services") def start_services_of_type(self, service_type: type[ServiceClass]): services = self.cluster.services(service_type) parallel([service.start_service for service in services]) + self.stopped_services.difference_update(set(services)) if service_type == StorageNode: - wait_all_storage_nodes_returned(self.shell, self.cluster) + self.wait_after_storage_startup() - self.stopped_services = self.stopped_services - set(services) + @wait_for_success(600, 60) + def wait_s3gate(self, s3gate: S3Gate): + with reporter.step(f"Wait for {s3gate} reconnection"): + result = s3gate.get_metric("frostfs_s3_gw_pool_current_nodes") + assert ( + 'address="127.0.0.1' in result.stdout + ), "S3Gate should connect to local storage node" + + @reporter.step_deco("Wait for S3Gates reconnection to local storage") + def wait_s3gates(self): + online_s3gates = self._get_online(S3Gate) + parallel(self.wait_s3gate, online_s3gates) + + @wait_for_success(600, 60) + def wait_tree_healthcheck(self): + nodes = self.cluster.nodes(self._get_online(StorageNode)) + parallel(self.healthcheck.tree_healthcheck, nodes) + + @reporter.step_deco("Wait for storage reconnection to the system") + def wait_after_storage_startup(self): + wait_all_storage_nodes_returned(self.shell, self.cluster) + self.wait_s3gates() + self.wait_tree_healthcheck() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start all stopped services") def start_all_stopped_services(self): + stopped_storages = self._get_stopped_by_type(StorageNode) parallel([service.start_service for service in self.stopped_services]) - - for service in self.stopped_services: - if isinstance(service, StorageNode): - wait_all_storage_nodes_returned(self.shell, self.cluster) - break - self.stopped_services.clear() + if stopped_storages: + self.wait_after_storage_startup() + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop {service_type} service on {node}") def stop_service_of_type( @@ -183,50 +193,78 @@ class ClusterStateController: def start_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass]): service = node.service(service_type) service.start_service() - if service in self.stopped_services: - self.stopped_services.remove(service) + self.stopped_services.discard(service) + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Start all stopped {service_type} services") + def start_stopped_services_of_type(self, service_type: type[ServiceClass]): + stopped_svc = self._get_stopped_by_type(service_type) + if not stopped_svc: + return + + parallel([svc.start_service for svc in stopped_svc]) + self.stopped_services.difference_update(stopped_svc) + + if service_type == StorageNode: + self.wait_after_storage_startup() + + # TODO: Deprecated + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Stop all storage services on cluster") + def stop_all_storage_services(self, reversed_order: bool = False): + nodes = ( + reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes + ) + + for node in nodes: + self.stop_service_of_type(node, StorageNode) + + # TODO: Deprecated + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Stop all S3 gates on cluster") + def stop_all_s3_gates(self, reversed_order: bool = False): + nodes = ( + reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes + ) + + for node in nodes: + self.stop_service_of_type(node, S3Gate) + + # TODO: Deprecated + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step_deco("Stop storage service on {node}") + def stop_storage_service(self, node: ClusterNode, mask: bool = True): + self.stop_service_of_type(node, StorageNode, mask) + + # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start storage service on {node}") def start_storage_service(self, node: ClusterNode): - node.storage_node.start_service() - self.stopped_storage_nodes.remove(node) + self.start_service_of_type(node, StorageNode) + # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start stopped storage services") def start_stopped_storage_services(self): - if not self.stopped_storage_nodes: - return - - # In case if we stopped couple services, for example (s01-s04): - # After starting only s01, it may require connections to s02-s04, which is still down, and fail to start. - # Also, if something goes wrong here, we might skip s02-s04 start at all, and cluster will be left in a bad state. - # So in order to make sure that services are at least attempted to be started, using parallel runs here. - parallel(self.start_storage_service, copy.copy(self.stopped_storage_nodes)) - - wait_all_storage_nodes_returned(self.shell, self.cluster) - self.stopped_storage_nodes = [] + self.start_stopped_services_of_type(StorageNode) + # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop s3 gate on {node}") def stop_s3_gate(self, node: ClusterNode, mask: bool = True): - node.s3_gate.stop_service(mask) - self.stopped_s3_gates.append(node) + self.stop_service_of_type(node, S3Gate, mask) + # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start s3 gate on {node}") def start_s3_gate(self, node: ClusterNode): - node.s3_gate.start_service() - self.stopped_s3_gates.remove(node) + self.start_service_of_type(node, S3Gate) + # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start stopped S3 gates") def start_stopped_s3_gates(self): - if not self.stopped_s3_gates: - return - - parallel(self.start_s3_gate, copy.copy(self.stopped_s3_gates)) - self.stopped_s3_gates = [] + self.start_stopped_services_of_type(S3Gate) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Suspend {process_name} service in {node}") diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 8708520..1e23c7e 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -7,6 +7,7 @@ import yaml from frostfs_testlib.hosting.config import ServiceConfig from frostfs_testlib.hosting.interfaces import Host from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.shell.interfaces import CommandResult from frostfs_testlib.storage.constants import ConfigAttributes from frostfs_testlib.testing.readable import HumanReadableABC from frostfs_testlib.utils import wallet_utils @@ -67,6 +68,12 @@ class NodeBase(HumanReadableABC): def service_healthcheck(self) -> bool: """Service healthcheck.""" + # TODO: Migrate to sub-class Metrcis (not yet exists :)) + def get_metric(self, metric: str) -> CommandResult: + shell = self.host.get_shell() + result = shell.exec(f"curl -s {self.get_metrics_endpoint()} | grep -e '^{metric}'") + return result + def get_metrics_endpoint(self) -> str: return self._get_attribute(ConfigAttributes.ENDPOINT_PROMETHEUS) diff --git a/src/frostfs_testlib/utils/__init__.py b/src/frostfs_testlib/utils/__init__.py index 0ac903a..fbc4a8f 100644 --- a/src/frostfs_testlib/utils/__init__.py +++ b/src/frostfs_testlib/utils/__init__.py @@ -3,6 +3,3 @@ import frostfs_testlib.utils.datetime_utils import frostfs_testlib.utils.json_utils import frostfs_testlib.utils.string_utils import frostfs_testlib.utils.wallet_utils - -# TODO: Circullar dependency FileKeeper -> NodeBase -> Utils -> FileKeeper -> NodeBase -from frostfs_testlib.utils.file_keeper import FileKeeper From 03c45d7592979d3f09b0c05b5bd7921139e382de Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 31 Oct 2023 18:17:21 +0300 Subject: [PATCH 161/363] [#110] Move chattr call after get_results call Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/runners.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index 4c07100..9859256 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -408,8 +408,6 @@ class LocalRunner(RunnerBase): for k6_instance in self.k6_instances: k6_instance.stop() - parallel(self.restore_passwd_on_node, self.nodes_under_load) - self.cluster_state_controller.start_stopped_storage_services() self.cluster_state_controller.start_stopped_s3_gates() @@ -419,4 +417,6 @@ class LocalRunner(RunnerBase): result = k6_instance.get_results() results[k6_instance.loader.ip] = result + parallel(self.restore_passwd_on_node, self.nodes_under_load) + return results From 1f50166e78845aee6e94bb1a448eb78add2f4e98 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Thu, 2 Nov 2023 11:13:34 +0300 Subject: [PATCH 162/363] Add method for work time Signed-off-by: Dmitriy Zayakin --- pyproject.toml | 4 +- .../controllers/cluster_state_controller.py | 73 ++++++++++++------- 2 files changed, 50 insertions(+), 27 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 3178bbe..ba38c03 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -50,10 +50,10 @@ basic = "frostfs_testlib.healthcheck.basic_healthcheck:BasicHealthcheck" [tool.isort] profile = "black" src_paths = ["src", "tests"] -line_length = 100 +line_length = 120 [tool.black] -line-length = 100 +line-length = 120 target-version = ["py310"] [tool.bumpver] diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 2cf1451..473af10 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -1,3 +1,4 @@ +import datetime import time import frostfs_testlib.resources.optionals as optionals @@ -46,9 +47,7 @@ class ClusterStateController: return set(stopped_on_nodes) def _get_online(self, service_type: type[ServiceClass]) -> set[ServiceClass]: - stopped_svc = self._get_stopped_by_type(service_type).union( - self._from_stopped_nodes(service_type) - ) + stopped_svc = self._get_stopped_by_type(service_type).union(self._from_stopped_nodes(service_type)) online_svc = set(self.cluster.services(service_type)) - stopped_svc return online_svc @@ -67,9 +66,7 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Shutdown whole cluster") def shutdown_cluster(self, mode: str, reversed_order: bool = False): - nodes = ( - reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes - ) + nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes # Drop all ssh connections before shutdown provider = SshConnectionProvider() @@ -149,9 +146,7 @@ class ClusterStateController: def wait_s3gate(self, s3gate: S3Gate): with reporter.step(f"Wait for {s3gate} reconnection"): result = s3gate.get_metric("frostfs_s3_gw_pool_current_nodes") - assert ( - 'address="127.0.0.1' in result.stdout - ), "S3Gate should connect to local storage node" + assert 'address="127.0.0.1' in result.stdout, "S3Gate should connect to local storage node" @reporter.step_deco("Wait for S3Gates reconnection to local storage") def wait_s3gates(self): @@ -181,9 +176,7 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop {service_type} service on {node}") - def stop_service_of_type( - self, node: ClusterNode, service_type: type[ServiceClass], mask: bool = True - ): + def stop_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass], mask: bool = True): service = node.service(service_type) service.stop_service(mask) self.stopped_services.add(service) @@ -212,9 +205,7 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop all storage services on cluster") def stop_all_storage_services(self, reversed_order: bool = False): - nodes = ( - reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes - ) + nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes for node in nodes: self.stop_service_of_type(node, StorageNode) @@ -223,9 +214,7 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Stop all S3 gates on cluster") def stop_all_s3_gates(self, reversed_order: bool = False): - nodes = ( - reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes - ) + nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes for node in nodes: self.stop_service_of_type(node, S3Gate) @@ -279,10 +268,7 @@ class ClusterStateController: @reporter.step_deco("Resume {process_name} service in {node}") def resume_service(self, process_name: str, node: ClusterNode): node.host.wait_success_resume_process(process_name) - if ( - self.suspended_services.get(process_name) - and node in self.suspended_services[process_name] - ): + if self.suspended_services.get(process_name) and node in self.suspended_services[process_name]: self.suspended_services[process_name].remove(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @@ -388,9 +374,46 @@ class ClusterStateController: for node in self.nodes_with_modified_interface: if_up_down_helper.up_all_interface(node) - def _get_disk_controller( - self, node: StorageNode, device: str, mountpoint: str - ) -> DiskController: + @reporter.step_deco("Get node time") + def get_node_date(self, node: ClusterNode) -> datetime: + shell = node.host.get_shell() + return datetime.datetime.strptime(shell.exec("hwclock -r").stdout.strip(), "%Y-%m-%d %H:%M:%S.%f%z") + + @reporter.step_deco("Set node time to {in_date}") + def change_node_date(self, node: ClusterNode, in_date: datetime) -> None: + shell = node.host.get_shell() + shell.exec(f"hwclock --set --date='{in_date}'") + shell.exec("hwclock --hctosys") + node_time = self.get_node_date(node) + with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"): + assert (self.get_node_date(node) - in_date) < datetime.timedelta(minutes=1) + + @reporter.step_deco(f"Restore time") + def restore_node_date(self, node: ClusterNode) -> None: + shell = node.host.get_shell() + now_time = datetime.datetime.now(datetime.timezone.utc) + with reporter.step(f"Set {now_time} time"): + shell.exec(f"hwclock --set --date='{now_time}'") + shell.exec("hwclock --hctosys") + + @reporter.step_deco("Change the synchronizer status to {status}") + def set_sync_date_all_nodes(self, status: str): + if status == "active": + parallel(self._enable_date_synchronizer, self.cluster.cluster_nodes) + return + parallel(self._disable_date_synchronizer, self.cluster.cluster_nodes) + + def _enable_date_synchronizer(self, cluster_node: ClusterNode): + shell = cluster_node.host.get_shell() + shell.exec("timedatectl set-ntp true") + cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "active", 5) + + def _disable_date_synchronizer(self, cluster_node: ClusterNode): + shell = cluster_node.host.get_shell() + shell.exec("timedatectl set-ntp false") + cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "inactive", 5) + + def _get_disk_controller(self, node: StorageNode, device: str, mountpoint: str) -> DiskController: disk_controller_id = DiskController.get_id(node, device) if disk_controller_id in self.detached_disks.keys(): disk_controller = self.detached_disks[disk_controller_id] From c8227e80afb613caa9d98cd3c1d79e7c0df71c62 Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Thu, 2 Nov 2023 19:18:31 +0300 Subject: [PATCH 163/363] update-remaining time --- src/frostfs_testlib/load/k6.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index e7a2b39..e46221e 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -180,7 +180,9 @@ class K6: while timeout > 0: if not self._k6_process.running(): return - logger.info(f"K6 is running. Waiting {wait_interval} seconds...") + remaining_time_hours = f"{timeout//3600}h" if timeout//3600 != 0 else "" + remaining_time_minutes = f"{timeout//60%60}m" if timeout//60%60 != 0 else "" + logger.info(f"K6 is running. Remaining time {remaining_time_hours}{remaining_time_minutes}{timeout%60}s. Next check after {wait_interval} seconds...") sleep(wait_interval) timeout -= min(timeout, wait_interval) wait_interval = max( From f8562da7e03e1c06716b171896bb605790f02bb0 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Wed, 8 Nov 2023 19:49:20 +0300 Subject: [PATCH 164/363] Add AWS retries Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/resources/common.py | 2 +- src/frostfs_testlib/s3/aws_cli_client.py | 71 ++++++------------------ 2 files changed, 17 insertions(+), 56 deletions(-) diff --git a/src/frostfs_testlib/resources/common.py b/src/frostfs_testlib/resources/common.py index 131bf8a..7f8d2c4 100644 --- a/src/frostfs_testlib/resources/common.py +++ b/src/frostfs_testlib/resources/common.py @@ -43,6 +43,6 @@ with open(DEFAULT_WALLET_CONFIG, "w") as file: # Number of attempts that S3 clients will attempt per each request (1 means single attempt # without any retries) -MAX_REQUEST_ATTEMPTS = 1 +MAX_REQUEST_ATTEMPTS = 5 RETRY_MODE = "standard" CREDENTIALS_CREATE_TIMEOUT = "1m" diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index dbece66..320d74b 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -7,12 +7,7 @@ from time import sleep from typing import Literal, Optional, Union from frostfs_testlib.reporter import get_reporter -from frostfs_testlib.resources.common import ( - ASSETS_DIR, - MAX_REQUEST_ATTEMPTS, - RETRY_MODE, - S3_SYNC_WAIT_TIME, -) +from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.shell import CommandOptions from frostfs_testlib.shell.local_shell import LocalShell @@ -128,9 +123,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("Put bucket tagging") def put_bucket_tagging(self, bucket: str, tags: list) -> None: - tags_json = { - "TagSet": [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] - } + tags_json = {"TagSet": [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]} cmd = ( f"aws {self.common_flags} s3api put-bucket-tagging --bucket {bucket} " f"--tagging '{json.dumps(tags_json)}' --endpoint {self.s3gate_endpoint}" @@ -140,8 +133,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("Get bucket tagging") def get_bucket_tagging(self, bucket: str) -> list: cmd = ( - f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint}" + f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -149,10 +141,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("Get bucket acl") def get_bucket_acl(self, bucket: str) -> list: - cmd = ( - f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint}" - ) + cmd = f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Grants") @@ -160,8 +149,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("Get bucket location") def get_bucket_location(self, bucket: str) -> dict: cmd = ( - f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint}" + f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -169,10 +157,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("List objects S3") def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - cmd = ( - f"aws {self.common_flags} s3api list-objects --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint}" - ) + cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -183,10 +168,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("List objects S3 v2") def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - cmd = ( - f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint}" - ) + cmd = f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -371,10 +353,7 @@ class AwsCliClient(S3ClientWrapper): grant_write: Optional[str] = None, grant_read: Optional[str] = None, ) -> None: - cmd = ( - f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} " - f" --endpoint {self.s3gate_endpoint}" - ) + cmd = f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} " f" --endpoint {self.s3gate_endpoint}" if acl: cmd += f" --acl {acl}" if grant_write: @@ -442,9 +421,7 @@ class AwsCliClient(S3ClientWrapper): def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: # Delete objects without creating delete markers for object_version in object_versions: - self.delete_object( - bucket=bucket, key=object_version["Key"], version_id=object_version["VersionId"] - ) + self.delete_object(bucket=bucket, key=object_version["Key"], version_id=object_version["VersionId"]) @reporter.step_deco("Get object attributes") def get_object_attributes( @@ -480,10 +457,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("Get bucket policy") def get_bucket_policy(self, bucket: str) -> dict: - cmd = ( - f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint}" - ) + cmd = f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Policy") @@ -505,10 +479,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("Get bucket cors") def get_bucket_cors(self, bucket: str) -> dict: - cmd = ( - f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint}" - ) + cmd = f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("CORSRules") @@ -524,8 +495,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("Delete bucket cors") def delete_bucket_cors(self, bucket: str) -> None: cmd = ( - f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint}" + f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" ) self.local_shell.exec(cmd) @@ -608,10 +578,7 @@ class AwsCliClient(S3ClientWrapper): acl: Optional[str] = None, metadata: Optional[dict] = None, ) -> dict: - cmd = ( - f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " - f"--endpoint-url {self.s3gate_endpoint}" - ) + cmd = f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " f"--endpoint-url {self.s3gate_endpoint}" if metadata: cmd += " --metadata" for key, value in metadata.items(): @@ -674,9 +641,7 @@ class AwsCliClient(S3ClientWrapper): self.local_shell.exec(cmd) @reporter.step_deco("Upload part S3") - def upload_part( - self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str - ) -> str: + def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: cmd = ( f"aws {self.common_flags} s3api upload-part --bucket {bucket} --key {key} " f"--upload-id {upload_id} --part-number {part_num} --body {filepath} " @@ -688,9 +653,7 @@ class AwsCliClient(S3ClientWrapper): return response["ETag"] @reporter.step_deco("Upload copy part S3") - def upload_part_copy( - self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str - ) -> str: + def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: cmd = ( f"aws {self.common_flags} s3api upload-part-copy --bucket {bucket} --key {key} " f"--upload-id {upload_id} --part-number {part_num} --copy-source {copy_source} " @@ -698,9 +661,7 @@ class AwsCliClient(S3ClientWrapper): ) output = self.local_shell.exec(cmd, command_options).stdout response = self._to_json(output) - assert response.get("CopyPartResult", []).get( - "ETag" - ), f"Expected ETag in response:\n{response}" + assert response.get("CopyPartResult", []).get("ETag"), f"Expected ETag in response:\n{response}" return response["CopyPartResult"]["ETag"] From 72bd467c53b3ed451adef8f1d10db09928559f2e Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 10 Nov 2023 22:43:13 +0300 Subject: [PATCH 165/363] [#114] Add yaml configuration controllers Signed-off-by: Andrey Berezin --- pyproject.toml | 4 ++ src/frostfs_testlib/plugins/__init__.py | 13 ++++ src/frostfs_testlib/storage/cluster.py | 29 +++----- .../storage/configuration/interfaces.py | 65 ++++++++++++++++++ .../configuration/service_configuration.py | 67 +++++++++++++++++++ src/frostfs_testlib/storage/constants.py | 1 + .../controllers/cluster_state_controller.py | 22 ++++++ .../state_managers/config_state_manager.py | 51 ++++++++++++++ .../storage/dataclasses/node_base.py | 14 +++- 9 files changed, 244 insertions(+), 22 deletions(-) create mode 100644 src/frostfs_testlib/storage/configuration/interfaces.py create mode 100644 src/frostfs_testlib/storage/configuration/service_configuration.py create mode 100644 src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py diff --git a/pyproject.toml b/pyproject.toml index ba38c03..48cc418 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,6 +47,10 @@ docker = "frostfs_testlib.hosting.docker_host:DockerHost" [project.entry-points."frostfs.testlib.healthcheck"] basic = "frostfs_testlib.healthcheck.basic_healthcheck:BasicHealthcheck" +[project.entry-points."frostfs.testlib.csc_managers"] +config = "frostfs_testlib.storage.controllers.state_managers.config_state_manager:ConfigStateManager" + + [tool.isort] profile = "black" src_paths = ["src", "tests"] diff --git a/src/frostfs_testlib/plugins/__init__.py b/src/frostfs_testlib/plugins/__init__.py index 6914b9b..79de340 100644 --- a/src/frostfs_testlib/plugins/__init__.py +++ b/src/frostfs_testlib/plugins/__init__.py @@ -17,3 +17,16 @@ def load_plugin(plugin_group: str, name: str) -> Any: return None plugin = plugins[name] return plugin.load() + + +def load_all(group: str) -> Any: + """Loads all plugins using entry point specification. + + Args: + plugin_group: Name of plugin group. + + Returns: + Classes from specified group. + """ + plugins = entry_points(group=group) + return [plugin.load() for plugin in plugins] diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index fa4ee0a..b8c32ca 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -8,14 +8,10 @@ from frostfs_testlib.hosting import Host, Hosting from frostfs_testlib.hosting.config import ServiceConfig from frostfs_testlib.reporter import get_reporter from frostfs_testlib.storage import get_service_registry +from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml +from frostfs_testlib.storage.configuration.service_configuration import ServiceConfiguration from frostfs_testlib.storage.constants import ConfigAttributes -from frostfs_testlib.storage.dataclasses.frostfs_services import ( - HTTPGate, - InnerRing, - MorphChain, - S3Gate, - StorageNode, -) +from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.service_registry import ServiceRegistry @@ -93,6 +89,9 @@ class ClusterNode: config_str = yaml.dump(new_config) shell.exec(f"echo '{config_str}' | sudo tee {config_file_path}") + def config(self, service_type: type[ServiceClass]) -> ServiceConfigurationYml: + return ServiceConfiguration(self.service(service_type)) + def service(self, service_type: type[ServiceClass]) -> ServiceClass: """ Get a service cluster node of specified type. @@ -118,9 +117,7 @@ class ClusterNode: ) def get_list_of_services(self) -> list[str]: - return [ - config.attributes[ConfigAttributes.SERVICE_NAME] for config in self.host.config.services - ] + return [config.attributes[ConfigAttributes.SERVICE_NAME] for config in self.host.config.services] def get_all_interfaces(self) -> dict[str, str]: return self.host.config.interfaces @@ -130,9 +127,7 @@ class ClusterNode: def get_data_interfaces(self) -> list[str]: return [ - ip_address - for name_interface, ip_address in self.host.config.interfaces.items() - if "data" in name_interface + ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "data" in name_interface ] def get_data_interface(self, search_interface: str) -> list[str]: @@ -221,9 +216,7 @@ class Cluster: cluster_nodes = set() for service in services: - cluster_nodes.update( - [node for node in self.cluster_nodes if node.service(type(service)) == service] - ) + cluster_nodes.update([node for node in self.cluster_nodes if node.service(type(service)) == service]) return list(cluster_nodes) @@ -331,8 +324,6 @@ class Cluster: return [node.get_endpoint() for node in nodes] def get_nodes_by_ip(self, ips: list[str]) -> list[ClusterNode]: - cluster_nodes = [ - node for node in self.cluster_nodes if URL(node.morph_chain.get_endpoint()).host in ips - ] + cluster_nodes = [node for node in self.cluster_nodes if URL(node.morph_chain.get_endpoint()).host in ips] with reporter.step(f"Return cluster nodes - {cluster_nodes}"): return cluster_nodes diff --git a/src/frostfs_testlib/storage/configuration/interfaces.py b/src/frostfs_testlib/storage/configuration/interfaces.py new file mode 100644 index 0000000..b2bc683 --- /dev/null +++ b/src/frostfs_testlib/storage/configuration/interfaces.py @@ -0,0 +1,65 @@ +from abc import ABC, abstractmethod +from typing import Any + + +class ServiceConfigurationYml(ABC): + """ + Class to manipulate yml configuration for service + """ + + def _find_option(self, key: str, data: dict): + tree = key.split(":") + current = data + for node in tree: + if isinstance(current, list) and len(current) - 1 >= int(node): + current = current[int(node)] + continue + + if node not in current: + return None + + current = current[node] + + return current + + def _set_option(self, key: str, value: Any, data: dict): + tree = key.split(":") + current = data + for node in tree[:-1]: + if isinstance(current, list) and len(current) - 1 >= int(node): + current = current[int(node)] + continue + + if node not in current: + current[node] = {} + + current = current[node] + + current[tree[-1]] = value + + @abstractmethod + def get(self, key: str) -> str: + """ + Get parameter value from current configuration + + Args: + key: key of the parameter in yaml format like 'storage:shard:default:resync_metabase' + + Returns: + value of the parameter + """ + + @abstractmethod + def set(self, values: dict[str, Any]): + """ + Sets parameters to configuration + + Args: + values: dict where key is the key of the parameter in yaml format like 'storage:shard:default:resync_metabase' and value is the value of the option to set + """ + + @abstractmethod + def revert(self): + """ + Revert changes + """ diff --git a/src/frostfs_testlib/storage/configuration/service_configuration.py b/src/frostfs_testlib/storage/configuration/service_configuration.py new file mode 100644 index 0000000..1aa7846 --- /dev/null +++ b/src/frostfs_testlib/storage/configuration/service_configuration.py @@ -0,0 +1,67 @@ +import os +import re +from typing import Any + +import yaml + +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.shell.interfaces import CommandOptions +from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml +from frostfs_testlib.storage.dataclasses.node_base import ServiceClass + +reporter = get_reporter() + + +class ServiceConfiguration(ServiceConfigurationYml): + def __init__(self, service: "ServiceClass") -> None: + self.service = service + self.shell = self.service.host.get_shell() + self.confd_path = os.path.join(self.service.config_dir, "conf.d") + self.custom_file = os.path.join(self.confd_path, "99_changes.yml") + + def _path_exists(self, path: str) -> bool: + return not self.shell.exec(f"test -e {path}", options=CommandOptions(check=False)).return_code + + def _get_data_from_file(self, path: str) -> dict: + content = self.shell.exec(f"cat {path}").stdout + data = yaml.safe_load(content) + return data + + def get(self, key: str) -> str: + with reporter.step(f"Get {key} configuration value for {self.service}"): + config_files = [self.service.main_config_path] + + if self._path_exists(self.confd_path): + files = self.shell.exec(f"find {self.confd_path} -type f").stdout.strip().split() + # Sorting files in backwards order from latest to first one + config_files.extend(sorted(files, key=lambda x: -int(re.findall("^\d+", os.path.basename(x))[0]))) + + result = None + for file in files: + data = self._get_data_from_file(file) + result = self._find_option(key, data) + if result is not None: + break + + return result + + def set(self, values: dict[str, Any]): + with reporter.step(f"Change configuration for {self.service}"): + if not self._path_exists(self.confd_path): + self.shell.exec(f"mkdir {self.confd_path}") + + if self._path_exists(self.custom_file): + data = self._get_data_from_file(self.custom_file) + else: + data = {} + + for key, value in values.items(): + self._set_option(key, value, data) + + content = yaml.dump(data) + self.shell.exec(f"echo '{content}' | sudo tee {self.custom_file}") + self.shell.exec(f"chmod 777 {self.custom_file}") + + def revert(self): + with reporter.step(f"Revert changed options for {self.service}"): + self.shell.exec(f"rm -rf {self.custom_file}") diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 2284ce3..9ad24eb 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -3,6 +3,7 @@ class ConfigAttributes: WALLET_PASSWORD = "wallet_password" WALLET_PATH = "wallet_path" WALLET_CONFIG = "wallet_config" + CONFIG_DIR = "service_config_dir" CONFIG_PATH = "config_path" SHARD_CONFIG_PATH = "shard_config_path" LOCAL_WALLET_PATH = "local_wallet_path" diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 473af10..479f4dc 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -1,8 +1,10 @@ import datetime import time +from typing import TypeVar import frostfs_testlib.resources.optionals as optionals from frostfs_testlib.healthcheck.interfaces import Healthcheck +from frostfs_testlib.plugins import load_all from frostfs_testlib.reporter import get_reporter from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider from frostfs_testlib.steps.network import IfUpDownHelper, IpTablesHelper @@ -22,6 +24,14 @@ reporter = get_reporter() if_up_down_helper = IfUpDownHelper() +class StateManager: + def __init__(self, cluster_state_controller: "ClusterStateController") -> None: + self.csc = cluster_state_controller + + +StateManagerClass = TypeVar("StateManagerClass", bound=StateManager) + + class ClusterStateController: def __init__(self, shell: Shell, cluster: Cluster, healthcheck: Healthcheck) -> None: self.stopped_nodes: list[ClusterNode] = [] @@ -33,6 +43,18 @@ class ClusterStateController: self.shell = shell self.suspended_services: dict[str, list[ClusterNode]] = {} self.nodes_with_modified_interface: list[ClusterNode] = [] + self.managers: list[StateManagerClass] = [] + + # TODO: move all functionality to managers + managers = set(load_all(group="frostfs.testlib.csc_managers")) + for manager in managers: + self.managers.append(manager(self)) + + def manager(self, manager_type: type[StateManagerClass]) -> StateManagerClass: + for manager in self.managers: + # Subclasses here for the future if we have overriding subclasses of base interface + if issubclass(type(manager), manager_type): + return manager def _get_stopped_by_node(self, node: ClusterNode) -> set[NodeBase]: stopped_by_node = [svc for svc in self.stopped_services if svc.host == node.host] diff --git a/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py b/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py new file mode 100644 index 0000000..078d483 --- /dev/null +++ b/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py @@ -0,0 +1,51 @@ +from typing import Any + +from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController, StateManager +from frostfs_testlib.storage.dataclasses.node_base import ServiceClass +from frostfs_testlib.testing import parallel + +reporter = get_reporter() + + +class ConfigStateManager(StateManager): + def __init__(self, cluster_state_controller: ClusterStateController) -> None: + super().__init__(cluster_state_controller) + self.services_with_changed_config: set[tuple[ClusterNode, ServiceClass]] = set() + self.cluster = self.csc.cluster + + @reporter.step_deco("Change configuration for {service_type} on all nodes") + def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any]): + services = self.cluster.services(service_type) + nodes = self.cluster.nodes(services) + self.services_with_changed_config.update([(node, service_type) for node in nodes]) + + self.csc.stop_services_of_type(service_type) + parallel([node.config(service_type).set for node in nodes], values=values) + self.csc.start_services_of_type(service_type) + + @reporter.step_deco("Change configuration for {service_type} on {node}") + def set_on_node(self, node: ClusterNode, service_type: type[ServiceClass], values: dict[str, Any]): + self.services_with_changed_config.add((node, service_type)) + + self.csc.stop_service_of_type(node, service_type) + node.config(service_type).set(values) + self.csc.start_service_of_type(node, service_type) + + @reporter.step_deco("Revert all configuration changes") + def revert_all(self): + if not self.services_with_changed_config: + return + + parallel(self._revert_svc, self.services_with_changed_config) + self.services_with_changed_config.clear() + + self.csc.start_all_stopped_services() + + # TODO: parallel can't have multiple parallel_items :( + @reporter.step_deco("Revert all configuration {node_and_service}") + def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass]): + node, service_type = node_and_service + self.csc.stop_service_of_type(node, service_type) + node.config(service_type).revert() diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 1e23c7e..4b9ffc2 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -120,6 +120,15 @@ class NodeBase(HumanReadableABC): ConfigAttributes.WALLET_CONFIG, ) + @property + def config_dir(self) -> str: + return self._get_attribute(ConfigAttributes.CONFIG_DIR) + + @property + def main_config_path(self) -> str: + return self._get_attribute(ConfigAttributes.CONFIG_PATH) + + # TODO: Deprecated def get_config(self, config_file_path: Optional[str] = None) -> tuple[str, dict]: if config_file_path is None: config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH) @@ -132,6 +141,7 @@ class NodeBase(HumanReadableABC): config = yaml.safe_load(config_text) return config_file_path, config + # TODO: Deprecated def save_config(self, new_config: dict, config_file_path: Optional[str] = None) -> None: if config_file_path is None: config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH) @@ -146,9 +156,7 @@ class NodeBase(HumanReadableABC): storage_wallet_pass = self.get_wallet_password() return wallet_utils.get_wallet_public_key(storage_wallet_path, storage_wallet_pass) - def _get_attribute( - self, attribute_name: str, default_attribute_name: Optional[str] = None - ) -> str: + def _get_attribute(self, attribute_name: str, default_attribute_name: Optional[str] = None) -> str: config = self.host.get_service_config(self.name) if attribute_name not in config.attributes: From 6519cfafc96e8880ce8fd69b989b83a2e5013da9 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 13 Nov 2023 13:34:49 +0300 Subject: [PATCH 166/363] [#116] Updates for local scenario teardown Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/runners.py | 68 ++++++------------- .../controllers/cluster_state_controller.py | 3 +- 2 files changed, 21 insertions(+), 50 deletions(-) diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index 9859256..583c8e6 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -18,11 +18,7 @@ from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources import optionals from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC from frostfs_testlib.resources.common import STORAGE_USER_NAME -from frostfs_testlib.resources.load_params import ( - BACKGROUND_LOAD_VUS_COUNT_DIVISOR, - LOAD_NODE_SSH_USER, - LOAD_NODES, -) +from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_VUS_COUNT_DIVISOR, LOAD_NODE_SSH_USER, LOAD_NODES from frostfs_testlib.shell.command_inspectors import SuInspector from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput from frostfs_testlib.storage.cluster import ClusterNode @@ -83,14 +79,10 @@ class DefaultRunner(RunnerBase): with reporter.step("Init s3 client on loaders"): storage_node = nodes_under_load[0].service(StorageNode) - s3_public_keys = [ - node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes - ] + s3_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes] grpc_peer = storage_node.get_rpc_endpoint() - parallel( - self._prepare_loader, self.loaders, load_params, grpc_peer, s3_public_keys, k6_dir - ) + parallel(self._prepare_loader, self.loaders, load_params, grpc_peer, s3_public_keys, k6_dir) def _prepare_loader( self, @@ -112,9 +104,9 @@ class DefaultRunner(RunnerBase): wallet_password=self.loaders_wallet.password, ).stdout aws_access_key_id = str( - re.search( - r"access_key_id.*:\s.(?P\w*)", issue_secret_output - ).group("aws_access_key_id") + re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group( + "aws_access_key_id" + ) ) aws_secret_access_key = str( re.search( @@ -125,9 +117,7 @@ class DefaultRunner(RunnerBase): configure_input = [ InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id), - InteractiveInput( - prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key - ), + InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key), InteractiveInput(prompt_pattern=r".*", input=""), InteractiveInput(prompt_pattern=r".*", input=""), ] @@ -144,16 +134,12 @@ class DefaultRunner(RunnerBase): } endpoints_generators = { K6ProcessAllocationStrategy.PER_LOAD_NODE: itertools.cycle([endpoints]), - K6ProcessAllocationStrategy.PER_ENDPOINT: itertools.cycle( - [[endpoint] for endpoint in endpoints] - ), + K6ProcessAllocationStrategy.PER_ENDPOINT: itertools.cycle([[endpoint] for endpoint in endpoints]), } k6_processes_count = k6_distribution_count[load_params.k6_process_allocation_strategy] endpoints_gen = endpoints_generators[load_params.k6_process_allocation_strategy] - distributed_load_params_list = self._get_distributed_load_params_list( - load_params, k6_processes_count - ) + distributed_load_params_list = self._get_distributed_load_params_list(load_params, k6_processes_count) futures = parallel( self._init_k6_instance, @@ -164,9 +150,7 @@ class DefaultRunner(RunnerBase): ) self.k6_instances = [future.result() for future in futures] - def _init_k6_instance( - self, load_params_for_loader: LoadParams, loader: Loader, endpoints: list[str], k6_dir: str - ): + def _init_k6_instance(self, load_params_for_loader: LoadParams, loader: Loader, endpoints: list[str], k6_dir: str): shell = loader.get_shell() with reporter.step(f"Init K6 instance on {loader.ip} for endpoints {endpoints}"): with reporter.step(f"Make working directory"): @@ -204,9 +188,7 @@ class DefaultRunner(RunnerBase): and getattr(original_load_params, field.name) is not None ): original_value = getattr(original_load_params, field.name) - distribution = self._get_distribution( - math.ceil(original_value / divisor), workers_count - ) + distribution = self._get_distribution(math.ceil(original_value / divisor), workers_count) for i in range(workers_count): setattr(distributed_load_params[i], field.name, distribution[i]) @@ -233,10 +215,7 @@ class DefaultRunner(RunnerBase): # Remainder of clients left to be distributed remainder = clients_count - clients_per_worker * workers_count - distribution = [ - clients_per_worker + 1 if i < remainder else clients_per_worker - for i in range(workers_count) - ] + distribution = [clients_per_worker + 1 if i < remainder else clients_per_worker for i in range(workers_count)] return distribution def start(self): @@ -245,9 +224,7 @@ class DefaultRunner(RunnerBase): parallel([k6.start for k6 in self.k6_instances]) wait_after_start_time = datetime_utils.parse_time(load_params.setup_timeout) + 5 - with reporter.step( - f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on" - ): + with reporter.step(f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on"): time.sleep(wait_after_start_time) def stop(self): @@ -327,9 +304,7 @@ class LocalRunner(RunnerBase): with reporter.step("Update limits.conf"): limits_path = "/etc/security/limits.conf" self.file_keeper.add(cluster_node.storage_node, limits_path) - content = ( - f"{STORAGE_USER_NAME} hard nofile 65536\n{STORAGE_USER_NAME} soft nofile 65536\n" - ) + content = f"{STORAGE_USER_NAME} hard nofile 65536\n{STORAGE_USER_NAME} soft nofile 65536\n" shell.exec(f"echo '{content}' | sudo tee {limits_path}") with reporter.step("Download K6"): @@ -339,9 +314,7 @@ class LocalRunner(RunnerBase): shell.exec(f"sudo chmod -R 777 {k6_dir}") with reporter.step("Create empty_passwd"): - self.wallet = WalletInfo( - f"{k6_dir}/scenarios/files/wallet.json", "", "/tmp/empty_passwd.yml" - ) + self.wallet = WalletInfo(f"{k6_dir}/scenarios/files/wallet.json", "", "/tmp/empty_passwd.yml") content = yaml.dump({"password": ""}) shell.exec(f'echo "{content}" | sudo tee {self.wallet.config_path}') shell.exec(f"sudo chmod -R 777 {self.wallet.config_path}") @@ -383,15 +356,13 @@ class LocalRunner(RunnerBase): def start(self): load_params = self.k6_instances[0].load_params - self.cluster_state_controller.stop_all_s3_gates() - self.cluster_state_controller.stop_all_storage_services() + self.cluster_state_controller.stop_services_of_type(S3Gate) + self.cluster_state_controller.stop_services_of_type(StorageNode) parallel([k6.start for k6 in self.k6_instances]) wait_after_start_time = datetime_utils.parse_time(load_params.setup_timeout) + 5 - with reporter.step( - f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on" - ): + with reporter.step(f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on"): time.sleep(wait_after_start_time) @reporter.step_deco("Restore passwd on {cluster_node}") @@ -408,8 +379,7 @@ class LocalRunner(RunnerBase): for k6_instance in self.k6_instances: k6_instance.stop() - self.cluster_state_controller.start_stopped_storage_services() - self.cluster_state_controller.start_stopped_s3_gates() + self.cluster_state_controller.start_all_stopped_services() def get_results(self) -> dict: results = {} diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 479f4dc..45c08b3 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -173,7 +173,8 @@ class ClusterStateController: @reporter.step_deco("Wait for S3Gates reconnection to local storage") def wait_s3gates(self): online_s3gates = self._get_online(S3Gate) - parallel(self.wait_s3gate, online_s3gates) + if online_s3gates: + parallel(self.wait_s3gate, online_s3gates) @wait_for_success(600, 60) def wait_tree_healthcheck(self): From 61a1b2865241374fede87a7d2fd9ae6f849c34b1 Mon Sep 17 00:00:00 2001 From: "m.malygina" Date: Tue, 14 Nov 2023 14:00:08 +0300 Subject: [PATCH 167/363] s3local.js scenario Signed-off-by: m.malygina --- src/frostfs_testlib/load/__init__.py | 2 +- src/frostfs_testlib/load/load_config.py | 18 +++- src/frostfs_testlib/load/load_metrics.py | 9 ++ src/frostfs_testlib/load/load_report.py | 1 + src/frostfs_testlib/load/runners.py | 126 +++++++++++++++++++++++ 5 files changed, 150 insertions(+), 6 deletions(-) diff --git a/src/frostfs_testlib/load/__init__.py b/src/frostfs_testlib/load/__init__.py index 74b710f..ca2f120 100644 --- a/src/frostfs_testlib/load/__init__.py +++ b/src/frostfs_testlib/load/__init__.py @@ -11,4 +11,4 @@ from frostfs_testlib.load.load_config import ( ) from frostfs_testlib.load.load_report import LoadReport from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader -from frostfs_testlib.load.runners import DefaultRunner, LocalRunner +from frostfs_testlib.load.runners import DefaultRunner, LocalRunner, S3LocalRunner diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index a5d8535..735d8ec 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -20,6 +20,7 @@ class LoadScenario(Enum): S3 = "s3" S3_CAR = "s3_car" S3_MULTIPART = "s3_multipart" + S3_LOCAL = "s3local" HTTP = "http" VERIFY = "verify" LOCAL = "local" @@ -38,11 +39,12 @@ all_load_scenarios = [ LoadScenario.S3_CAR, LoadScenario.gRPC_CAR, LoadScenario.LOCAL, - LoadScenario.S3_MULTIPART + LoadScenario.S3_MULTIPART, + LoadScenario.S3_LOCAL ] all_scenarios = all_load_scenarios.copy() + [LoadScenario.VERIFY] -constant_vus_scenarios = [LoadScenario.gRPC, LoadScenario.S3, LoadScenario.HTTP, LoadScenario.LOCAL, LoadScenario.S3_MULTIPART] +constant_vus_scenarios = [LoadScenario.gRPC, LoadScenario.S3, LoadScenario.HTTP, LoadScenario.LOCAL, LoadScenario.S3_MULTIPART, LoadScenario.S3_LOCAL] constant_arrival_rate_scenarios = [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR] grpc_preset_scenarios = [ @@ -51,7 +53,7 @@ grpc_preset_scenarios = [ LoadScenario.gRPC_CAR, LoadScenario.LOCAL, ] -s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.S3_MULTIPART] +s3_preset_scenarios = [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.S3_MULTIPART, LoadScenario.S3_LOCAL] @dataclass @@ -172,9 +174,13 @@ class LoadParams: preset: Optional[Preset] = None # K6 download url k6_url: Optional[str] = None + # Requests module url + requests_module_url: Optional[str] = None + # aws cli download url + awscli_url: Optional[str] = None # No ssl verification flag no_verify_ssl: Optional[bool] = metadata_field( - [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.S3_MULTIPART, LoadScenario.VERIFY, LoadScenario.HTTP], + [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.S3_MULTIPART, LoadScenario.S3_LOCAL, LoadScenario.VERIFY, LoadScenario.HTTP], "no-verify-ssl", "NO_VERIFY_SSL", False, @@ -283,7 +289,9 @@ class LoadParams: # ------- LOCAL SCENARIO PARAMS ------- # Config file location (filled automatically) - config_file: Optional[str] = metadata_field([LoadScenario.LOCAL], None, "CONFIG_FILE", False) + config_file: Optional[str] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "CONFIG_FILE", False) + # Config directory location (filled automatically) + config_dir: Optional[str] = metadata_field([LoadScenario.S3_LOCAL], None, "CONFIG_DIR", False) def set_id(self, load_id): self.load_id = load_id diff --git a/src/frostfs_testlib/load/load_metrics.py b/src/frostfs_testlib/load/load_metrics.py index 474a96b..3f175cf 100644 --- a/src/frostfs_testlib/load/load_metrics.py +++ b/src/frostfs_testlib/load/load_metrics.py @@ -165,6 +165,14 @@ class S3Metrics(MetricsBase): _DELETE_ERRORS = "aws_obj_delete_fails" _DELETE_LATENCY = "aws_obj_delete_duration" +class S3LocalMetrics(MetricsBase): + _WRITE_SUCCESS = "s3local_obj_put_total" + _WRITE_ERRORS = "s3local_obj_put_fails" + _WRITE_LATENCY = "s3local_obj_put_duration" + + _READ_SUCCESS = "s3local_obj_get_total" + _READ_ERRORS = "s3local_obj_get_fails" + _READ_LATENCY = "s3local_obj_get_duration" class LocalMetrics(MetricsBase): _WRITE_SUCCESS = "local_obj_put_total" @@ -197,6 +205,7 @@ def get_metrics_object(load_type: LoadScenario, summary: dict[str, Any]) -> Metr LoadScenario.S3: S3Metrics, LoadScenario.S3_CAR: S3Metrics, LoadScenario.S3_MULTIPART: S3Metrics, + LoadScenario.S3_LOCAL: S3LocalMetrics, LoadScenario.VERIFY: VerifyMetrics, LoadScenario.LOCAL: LocalMetrics, } diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py index b648bc2..ad3a26d 100644 --- a/src/frostfs_testlib/load/load_report.py +++ b/src/frostfs_testlib/load/load_report.py @@ -97,6 +97,7 @@ class LoadReport: LoadScenario.gRPC_CAR: "open model", LoadScenario.S3_CAR: "open model", LoadScenario.LOCAL: "local fill", + LoadScenario.S3_LOCAL: "local fill" } return model_map[self.load_params.scenario] diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index 583c8e6..982cfcc 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -390,3 +390,129 @@ class LocalRunner(RunnerBase): parallel(self.restore_passwd_on_node, self.nodes_under_load) return results + +class S3LocalRunner(LocalRunner): + endpoints: list[str] + k6_dir: str + + @reporter.step_deco("Run preset on loaders") + def preset(self): + LocalRunner.preset(self) + with reporter.step(f"Resolve containers in preset"): + parallel(self._resolve_containers_in_preset, self.k6_instances) + + @reporter.step_deco("Resolve containers in preset") + def _resolve_containers_in_preset(self, k6_instance: K6): + k6_instance.shell.exec( + f"sudo {self.k6_dir}/scenarios/preset/resolve_containers_in_preset.py --endpoint {k6_instance.endpoints[0]} --preset_file {k6_instance.load_params.preset.pregen_json}") + + @reporter.step_deco("Init k6 instances") + def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): + self.k6_instances = [] + futures = parallel( + self._init_k6_instance_, + self.loaders, + load_params, + endpoints, + k6_dir, + ) + self.k6_instances = [future.result() for future in futures] + + def _init_k6_instance_(self, loader: Loader, load_params: LoadParams, endpoints: list[str], k6_dir: str): + shell = loader.get_shell() + with reporter.step(f"Init K6 instance on {loader.ip} for endpoints {endpoints}"): + with reporter.step(f"Make working directory"): + shell.exec(f"sudo mkdir -p {load_params.working_dir}") + # If we chmod /home/ folder we can no longer ssh to the node + # !! IMPORTANT !! + if ( + load_params.working_dir + and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}" + and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}/" + ): + shell.exec(f"sudo chmod -R 777 {load_params.working_dir}") + + return K6( + load_params, + self.endpoints, + k6_dir, + shell, + loader, + self.wallet, + ) + + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step_deco("Preparation steps") + def prepare( + self, + load_params: LoadParams, + cluster_nodes: list[ClusterNode], + nodes_under_load: list[ClusterNode], + k6_dir: str, + ): + self.k6_dir = k6_dir + with reporter.step("Init s3 client on loaders"): + storage_node = nodes_under_load[0].service(StorageNode) + s3_public_keys = [ + node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes + ] + grpc_peer = storage_node.get_rpc_endpoint() + + parallel(self.prepare_node, nodes_under_load, k6_dir, load_params, s3_public_keys, grpc_peer) + + @reporter.step_deco("Prepare node {cluster_node}") + def prepare_node(self, + cluster_node: ClusterNode, + k6_dir: str, + load_params: LoadParams, + s3_public_keys: list[str], + grpc_peer: str): + LocalRunner.prepare_node(self,cluster_node, k6_dir, load_params) + self.endpoints = cluster_node.s3_gate.get_all_endpoints() + shell = cluster_node.host.get_shell() + + with reporter.step("Uninstall previous installation of aws cli"): + shell.exec(f"sudo rm -rf /usr/local/aws-cli") + shell.exec(f"sudo rm -rf /usr/local/bin/aws") + shell.exec(f"sudo rm -rf /usr/local/bin/aws_completer") + + with reporter.step("Install aws cli"): + shell.exec(f"sudo curl {load_params.awscli_url} -o {k6_dir}/awscliv2.zip") + shell.exec(f"sudo unzip -q {k6_dir}/awscliv2.zip -d {k6_dir}") + shell.exec(f"sudo {k6_dir}/aws/install") + + with reporter.step("Install requests python module"): + shell.exec(f"sudo apt-get -y install python3-pip") + shell.exec(f"sudo curl -so {k6_dir}/requests.tar.gz {load_params.requests_module_url}") + shell.exec(f"sudo python3 -m pip install -I {k6_dir}/requests.tar.gz") + + with reporter.step(f"Init s3 client on {cluster_node.host_ip}"): + frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC) + issue_secret_output = frostfs_authmate_exec.secret.issue( + wallet=self.wallet.path, + peer=grpc_peer, + gate_public_key=s3_public_keys, + container_placement_policy=load_params.preset.container_placement_policy, + container_policy=f"{k6_dir}/scenarios/files/policy.json", + wallet_password=self.wallet.password, + ).stdout + aws_access_key_id = str( + re.search( + r"access_key_id.*:\s.(?P\w*)", issue_secret_output + ).group("aws_access_key_id") + ) + aws_secret_access_key = str( + re.search( + r"secret_access_key.*:\s.(?P\w*)", + issue_secret_output, + ).group("aws_secret_access_key") + ) + configure_input = [ + InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id), + InteractiveInput( + prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key + ), + InteractiveInput(prompt_pattern=r".*", input=""), + InteractiveInput(prompt_pattern=r".*", input=""), + ] + shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input)) \ No newline at end of file From 22647c6d594d3b914676600a85b9d1632c4e426b Mon Sep 17 00:00:00 2001 From: mkadilov Date: Wed, 15 Nov 2023 13:08:58 +0300 Subject: [PATCH 168/363] [#119] Renamed Github to Gitea in links Some links changed to git.frostfs from github Signed-off-by: Mikhail Kadilov --- CONTRIBUTING.md | 14 +++++++------- README.md | 2 +- pyproject.toml | 2 +- src/frostfs_testlib/cli/frostfs_adm/morph.py | 4 ++-- src/frostfs_testlib/cli/frostfs_cli/acl.py | 2 +- src/frostfs_testlib/steps/node_management.py | 2 +- 6 files changed, 13 insertions(+), 13 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fdcaec7..69417d2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -3,8 +3,8 @@ First, thank you for contributing! We love and encourage pull requests from everyone. Please follow the guidelines: -- Check the open [issues](https://github.com/TrueCloudLab/frostfs-testlib/issues) and - [pull requests](https://github.com/TrueCloudLab/frostfs-testlib/pulls) for existing +- Check the open [issues](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/issues) and + [pull requests](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/pulls) for existing discussions. - Open an issue first, to discuss a new feature or enhancement. @@ -26,8 +26,8 @@ Start by forking the `frostfs-testlib` repository, make changes in a branch and send a pull request. We encourage pull requests to discuss code changes. Here are the steps in details: -### Set up your GitHub Repository -Fork [FrostFS testlib upstream](https://github.com/TrueCloudLab/frostfs-testlib/fork) source +### Set up your Git Repository +Fork [FrostFS testlib upstream](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/forks) source repository to your own personal repository. Copy the URL of your fork and clone it: ```shell @@ -37,7 +37,7 @@ $ git clone ### Set up git remote as ``upstream`` ```shell $ cd frostfs-testlib -$ git remote add upstream https://github.com/TrueCloudLab/frostfs-testlib +$ git remote add upstream https://git.frostfs.info/TrueCloudLab/frostfs-testlib $ git fetch upstream ``` @@ -99,8 +99,8 @@ $ git push origin feature/123-something_awesome ``` ### Create a Pull Request -Pull requests can be created via GitHub. Refer to [this -document](https://help.github.com/articles/creating-a-pull-request/) for +Pull requests can be created via Git. Refer to [this +document](https://docs.codeberg.org/collaborating/pull-requests-and-git-flow/) for detailed steps on how to create a pull request. After a Pull Request gets peer reviewed and approved, it will be merged. diff --git a/README.md b/README.md index c194df9..2f8751f 100644 --- a/README.md +++ b/README.md @@ -92,4 +92,4 @@ The library provides the following primary components: ## Contributing -Any contributions to the library should conform to the [contribution guideline](https://github.com/TrueCloudLab/frostfs-testlib/blob/master/CONTRIBUTING.md). +Any contributions to the library should conform to the [contribution guideline](https://git.frostfs.info/TrueCloudLab/frostfs-testlib/src/branch/master/CONTRIBUTING.md). diff --git a/pyproject.toml b/pyproject.toml index 48cc418..34a37e3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,7 +36,7 @@ requires-python = ">=3.10" dev = ["black", "bumpver", "isort", "pre-commit"] [project.urls] -Homepage = "https://github.com/TrueCloudLab/frostfs-testlib" +Homepage = "https://git.frostfs.info/TrueCloudLab/frostfs-testlib" [project.entry-points."frostfs.testlib.reporter"] allure = "frostfs_testlib.reporter.allure_handler:AllureHandler" diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index 3faa875..a1693ac 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -219,7 +219,7 @@ class FrostfsAdmMorph(CliCommand): container_alias_fee: Container alias fee (default 500). container_fee: Container registration fee (default 1000). contracts: Path to archive with compiled FrostFS contracts - (default fetched from latest github release). + (default fetched from latest git release). epoch_duration: Amount of side chain blocks in one FrostFS epoch (default 240). homomorphic_disabled: Disable object homomorphic hashing. local_dump: Path to the blocks dump file. @@ -340,7 +340,7 @@ class FrostfsAdmMorph(CliCommand): Args: alphabet_wallets: Path to alphabet wallets dir. contracts: Path to archive with compiled FrostFS contracts - (default fetched from latest github release). + (default fetched from latest git release). rpc_endpoint: N3 RPC node endpoint. Returns: diff --git a/src/frostfs_testlib/cli/frostfs_cli/acl.py b/src/frostfs_testlib/cli/frostfs_cli/acl.py index bd0f80e..3e60582 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/acl.py +++ b/src/frostfs_testlib/cli/frostfs_cli/acl.py @@ -22,7 +22,7 @@ class FrostfsCliACL(CliCommand): Well-known system object headers start with '$Object:' prefix. User defined headers start without prefix. Read more about filter keys at: - http://github.com/TrueCloudLab/frostfs-api/blob/master/proto-docs/acl.md#message-eaclrecordfilter + https://git.frostfs.info/TrueCloudLab/frostfs-api/src/branch/master/proto-docs/acl.md#message-eaclrecord-filter Match is '=' for matching and '!=' for non-matching filter. Value is a valid unicode string corresponding to object or request header value. diff --git a/src/frostfs_testlib/steps/node_management.py b/src/frostfs_testlib/steps/node_management.py index 9c0c6b0..d91721c 100644 --- a/src/frostfs_testlib/steps/node_management.py +++ b/src/frostfs_testlib/steps/node_management.py @@ -169,7 +169,7 @@ def include_node_to_network_map( storage_node_set_status(node_to_include, status="online") # Per suggestion of @fyrchik we need to wait for 2 blocks after we set status and after tick epoch. - # First sleep can be omitted after https://github.com/TrueCloudLab/frostfs-node/issues/60 complete. + # First sleep can be omitted after https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/60 complete. time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2) tick_epoch(shell, cluster) From ed70dada963229cfdd2033df4b1dc9ec556f56bf Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 20 Nov 2023 13:54:47 +0300 Subject: [PATCH 169/363] Add support test maintenance Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/cli/frostfs_adm/morph.py | 96 +++++++------------ src/frostfs_testlib/cli/frostfs_cli/cli.py | 3 + .../cli/frostfs_cli/control.py | 58 +++++++++++ src/frostfs_testlib/cli/netmap_parser.py | 86 +++++++++++++++++ src/frostfs_testlib/shell/local_shell.py | 7 +- src/frostfs_testlib/shell/ssh_shell.py | 33 ++----- .../controllers/cluster_state_controller.py | 78 +++++++++++++++ .../dataclasses/storage_object_info.py | 25 ++++- 8 files changed, 290 insertions(+), 96 deletions(-) create mode 100644 src/frostfs_testlib/cli/frostfs_cli/control.py create mode 100644 src/frostfs_testlib/cli/netmap_parser.py diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index a1693ac..1d753d9 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -27,11 +27,7 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph deposit-notary", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def dump_balances( @@ -56,11 +52,7 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph dump-balances", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def dump_config(self, rpc_endpoint: str) -> CommandResult: @@ -74,11 +66,25 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph dump-config", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, + ) + + def set_config( + self, set_key_value: str, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None + ) -> CommandResult: + """Add/update global config value in the FrostFS network. + + Args: + set_key_value: key1=val1 [key2=val2 ...] + alphabet_wallets: Path to alphabet wallets dir + rpc_endpoint: N3 RPC node endpoint + + Returns: + Command's result. + """ + return self._execute( + f"morph set-config {set_key_value}", + **{param: param_value for param, param_value in locals().items() if param not in ["self", "set_key_value"]}, ) def dump_containers( @@ -101,11 +107,7 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph dump-containers", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def dump_hashes(self, rpc_endpoint: str) -> CommandResult: @@ -119,11 +121,7 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph dump-hashes", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def force_new_epoch( @@ -140,11 +138,7 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph force-new-epoch", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def generate_alphabet( @@ -165,11 +159,7 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph generate-alphabet", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def generate_storage_wallet( @@ -192,11 +182,7 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph generate-storage-wallet", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def init( @@ -232,11 +218,7 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph init", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def refill_gas( @@ -259,11 +241,7 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph refill-gas", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def restore_containers( @@ -286,11 +264,7 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph restore-containers", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def set_policy( @@ -348,17 +322,13 @@ class FrostfsAdmMorph(CliCommand): """ return self._execute( "morph update-contracts", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) def remove_nodes( self, node_netmap_keys: list[str], rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None ) -> CommandResult: - """ Move node to the Offline state in the candidates list + """Move node to the Offline state in the candidates list and tick an epoch to update the netmap using frostfs-adm Args: @@ -371,7 +341,7 @@ class FrostfsAdmMorph(CliCommand): """ if not len(node_netmap_keys): raise AttributeError("Got empty node_netmap_keys list") - + return self._execute( f"morph remove-nodes {' '.join(node_netmap_keys)}", **{ @@ -379,4 +349,4 @@ class FrostfsAdmMorph(CliCommand): for param, param_value in locals().items() if param not in ["self", "node_netmap_keys"] }, - ) \ No newline at end of file + ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/cli.py b/src/frostfs_testlib/cli/frostfs_cli/cli.py index a78da8b..c20a987 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/cli.py +++ b/src/frostfs_testlib/cli/frostfs_cli/cli.py @@ -3,6 +3,7 @@ from typing import Optional from frostfs_testlib.cli.frostfs_cli.accounting import FrostfsCliAccounting from frostfs_testlib.cli.frostfs_cli.acl import FrostfsCliACL from frostfs_testlib.cli.frostfs_cli.container import FrostfsCliContainer +from frostfs_testlib.cli.frostfs_cli.control import FrostfsCliControl from frostfs_testlib.cli.frostfs_cli.netmap import FrostfsCliNetmap from frostfs_testlib.cli.frostfs_cli.object import FrostfsCliObject from frostfs_testlib.cli.frostfs_cli.session import FrostfsCliSession @@ -25,6 +26,7 @@ class FrostfsCli: storagegroup: FrostfsCliStorageGroup util: FrostfsCliUtil version: FrostfsCliVersion + control: FrostfsCliControl def __init__(self, shell: Shell, frostfs_cli_exec_path: str, config_file: Optional[str] = None): self.accounting = FrostfsCliAccounting(shell, frostfs_cli_exec_path, config=config_file) @@ -38,3 +40,4 @@ class FrostfsCli: self.util = FrostfsCliUtil(shell, frostfs_cli_exec_path, config=config_file) self.version = FrostfsCliVersion(shell, frostfs_cli_exec_path, config=config_file) self.tree = FrostfsCliTree(shell, frostfs_cli_exec_path, config=config_file) + self.control = FrostfsCliControl(shell, frostfs_cli_exec_path, config=config_file) diff --git a/src/frostfs_testlib/cli/frostfs_cli/control.py b/src/frostfs_testlib/cli/frostfs_cli/control.py new file mode 100644 index 0000000..bfcd6ec --- /dev/null +++ b/src/frostfs_testlib/cli/frostfs_cli/control.py @@ -0,0 +1,58 @@ +from typing import Optional + +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult + + +class FrostfsCliControl(CliCommand): + def set_status( + self, + endpoint: str, + status: str, + wallet: Optional[str] = None, + force: Optional[bool] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Set status of the storage node in FrostFS network map + + Args: + wallet: Path to the wallet or binary key + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + force: Force turning to local maintenance + status: New netmap status keyword ('online', 'offline', 'maintenance') + timeout: Timeout for an operation (default 15s) + + Returns: + Command`s result. + """ + return self._execute( + "control set-status", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def healthcheck( + self, + endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Set status of the storage node in FrostFS network map + + Args: + wallet: Path to the wallet or binary key + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + force: Force turning to local maintenance + status: New netmap status keyword ('online', 'offline', 'maintenance') + timeout: Timeout for an operation (default 15s) + + Returns: + Command`s result. + """ + return self._execute( + "control healthcheck", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/frostfs_testlib/cli/netmap_parser.py b/src/frostfs_testlib/cli/netmap_parser.py new file mode 100644 index 0000000..6d2eaaa --- /dev/null +++ b/src/frostfs_testlib/cli/netmap_parser.py @@ -0,0 +1,86 @@ +import re + +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo + + +class NetmapParser: + @staticmethod + def netinfo(output: str) -> NodeNetInfo: + regexes = { + "epoch": r"Epoch: (?P\d+)", + "network_magic": r"Network magic: (?P.*$)", + "time_per_block": r"Time per block: (?P\d+\w+)", + "container_fee": r"Container fee: (?P\d+)", + "epoch_duration": r"Epoch duration: (?P\d+)", + "inner_ring_candidate_fee": r"Inner Ring candidate fee: (?P\d+)", + "maximum_object_size": r"Maximum object size: (?P\d+)", + "withdrawal_fee": r"Withdrawal fee: (?P\d+)", + "homomorphic_hashing_disabled": r"Homomorphic hashing disabled: (?Ptrue|false)", + "maintenance_mode_allowed": r"Maintenance mode allowed: (?Ptrue|false)", + "eigen_trust_alpha": r"EigenTrustAlpha: (?P\d+\w+$)", + "eigen_trust_iterations": r"EigenTrustIterations: (?P\d+)", + } + parse_result = {} + + for key, regex in regexes.items(): + search_result = re.search(regex, output, flags=re.MULTILINE) + if search_result == None: + parse_result[key] = None + continue + parse_result[key] = search_result[key].strip() + + node_netinfo = NodeNetInfo(**parse_result) + + return node_netinfo + + @staticmethod + def snapshot_all_nodes(output: str) -> list[NodeNetmapInfo]: + """The code will parse each line and return each node as dataclass.""" + netmap_nodes = output.split("Node ")[1:] + dataclasses_netmap = [] + result_netmap = {} + + regexes = { + "node_id": r"\d+: (?P\w+)", + "node_data_ips": r"(?P/ip4/.+?)$", + "node_status": r"(?PONLINE|OFFLINE)", + "cluster_name": r"ClusterName: (?P\w+)", + "continent": r"Continent: (?P\w+)", + "country": r"Country: (?P\w+)", + "country_code": r"CountryCode: (?P\w+)", + "external_address": r"ExternalAddr: (?P/ip[4].+?)$", + "location": r"Location: (?P\w+.*)", + "node": r"Node: (?P\d+\.\d+\.\d+\.\d+)", + "price": r"Price: (?P\d+)", + "sub_div": r"SubDiv: (?P.*)", + "sub_div_code": r"SubDivCode: (?P\w+)", + "un_locode": r"UN-LOCODE: (?P\w+.*)", + "role": r"role: (?P\w+)", + } + + for node in netmap_nodes: + for key, regex in regexes.items(): + search_result = re.search(regex, node, flags=re.MULTILINE) + if key == "node_data_ips": + result_netmap[key] = search_result[key].strip().split(" ") + continue + if key == "external_address": + result_netmap[key] = search_result[key].strip().split(",") + continue + if search_result == None: + result_netmap[key] = None + continue + result_netmap[key] = search_result[key].strip() + + dataclasses_netmap.append(NodeNetmapInfo(**result_netmap)) + + return dataclasses_netmap + + @staticmethod + def snapshot_one_node(output: str, cluster_node: ClusterNode) -> NodeNetmapInfo | None: + snapshot_nodes = NetmapParser.snapshot_all_nodes(output=output) + snapshot_node = [node for node in snapshot_nodes if node.node == cluster_node.host_ip] + if not snapshot_node: + return None + return snapshot_node[0] diff --git a/src/frostfs_testlib/shell/local_shell.py b/src/frostfs_testlib/shell/local_shell.py index fa07890..26c7e9b 100644 --- a/src/frostfs_testlib/shell/local_shell.py +++ b/src/frostfs_testlib/shell/local_shell.py @@ -62,7 +62,8 @@ class LocalShell(Shell): if options.check and result.return_code != 0: raise RuntimeError( f"Command: {command}\nreturn code: {result.return_code}\n" - f"Output: {result.stdout}" + f"Output: {result.stdout}\n" + f"Stderr: {result.stderr}\n" ) return result @@ -94,9 +95,7 @@ class LocalShell(Shell): return_code=exc.returncode, ) raise RuntimeError( - f"Command: {command}\nError:\n" - f"return code: {exc.returncode}\n" - f"output: {exc.output}" + f"Command: {command}\nError:\n" f"return code: {exc.returncode}\n" f"output: {exc.output}" ) from exc except OSError as exc: raise RuntimeError(f"Command: {command}\nOutput: {exc.strerror}") from exc diff --git a/src/frostfs_testlib/shell/ssh_shell.py b/src/frostfs_testlib/shell/ssh_shell.py index 6db7d51..6b12f81 100644 --- a/src/frostfs_testlib/shell/ssh_shell.py +++ b/src/frostfs_testlib/shell/ssh_shell.py @@ -6,27 +6,11 @@ from functools import lru_cache, wraps from time import sleep from typing import ClassVar, Optional, Tuple -from paramiko import ( - AutoAddPolicy, - Channel, - ECDSAKey, - Ed25519Key, - PKey, - RSAKey, - SSHClient, - SSHException, - ssh_exception, -) +from paramiko import AutoAddPolicy, Channel, ECDSAKey, Ed25519Key, PKey, RSAKey, SSHClient, SSHException, ssh_exception from paramiko.ssh_exception import AuthenticationException from frostfs_testlib.reporter import get_reporter -from frostfs_testlib.shell.interfaces import ( - CommandInspector, - CommandOptions, - CommandResult, - Shell, - SshCredentials, -) +from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell, SshCredentials logger = logging.getLogger("frostfs.testlib.shell") reporter = get_reporter() @@ -97,8 +81,7 @@ class SshConnectionProvider: ) else: logger.info( - f"Trying to connect to host {host} as {creds.ssh_login} using password " - f"(attempt {attempt})" + f"Trying to connect to host {host} as {creds.ssh_login} using password " f"(attempt {attempt})" ) connection.connect( hostname=host, @@ -141,9 +124,7 @@ class HostIsNotAvailable(Exception): def log_command(func): @wraps(func) - def wrapper( - shell: "SSHShell", command: str, options: CommandOptions, *args, **kwargs - ) -> CommandResult: + def wrapper(shell: "SSHShell", command: str, options: CommandOptions, *args, **kwargs) -> CommandResult: command_info = command.removeprefix("$ProgressPreference='SilentlyContinue'\n") with reporter.step(command_info): logger.info(f'Execute command "{command}" on "{shell.host}"') @@ -238,15 +219,13 @@ class SSHShell(Shell): if options.check and result.return_code != 0: raise RuntimeError( - f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}" + f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}\nStderr: {result.stderr}\n" ) return result @log_command def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult: - stdin, stdout, stderr = self._connection.exec_command( - command, timeout=options.timeout, get_pty=True - ) + stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout, get_pty=True) for interactive_input in options.interactive_inputs: input = interactive_input.input if not input.endswith("\n"): diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 45c08b3..27fa034 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -3,9 +3,13 @@ import time from typing import TypeVar import frostfs_testlib.resources.optionals as optionals +from frostfs_testlib.cli import FrostfsAdm, FrostfsCli +from frostfs_testlib.cli.netmap_parser import NetmapParser from frostfs_testlib.healthcheck.interfaces import Healthcheck from frostfs_testlib.plugins import load_all from frostfs_testlib.reporter import get_reporter +from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC +from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG, MORPH_BLOCK_TIME from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider from frostfs_testlib.steps.network import IfUpDownHelper, IpTablesHelper from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode @@ -13,6 +17,7 @@ from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass from frostfs_testlib.testing import parallel from frostfs_testlib.testing.test_control import run_optionally, wait_for_success +from frostfs_testlib.utils.datetime_utils import parse_time from frostfs_testlib.utils.failover_utils import ( wait_all_storage_nodes_returned, wait_for_host_offline, @@ -426,6 +431,79 @@ class ClusterStateController: return parallel(self._disable_date_synchronizer, self.cluster.cluster_nodes) + @reporter.step_deco("Set MaintenanceModeAllowed - {status}") + def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None: + frostfs_adm = FrostfsAdm( + shell=cluster_node.host.get_shell(), + frostfs_adm_exec_path=FROSTFS_ADM_EXEC, + config_file=FROSTFS_ADM_CONFIG_PATH, + ) + frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}") + + @reporter.step_deco("Set mode node to {status}") + def set_mode_node(self, cluster_node: ClusterNode, wallet: str, status: str, await_tick: bool = True) -> None: + rpc_endpoint = cluster_node.storage_node.get_rpc_endpoint() + control_endpoint = cluster_node.service(StorageNode).get_control_endpoint() + + frostfs_adm, frostfs_cli, frostfs_cli_remote = self._get_cli(local_shell=self.shell, cluster_node=cluster_node) + node_netinfo = NetmapParser.netinfo(frostfs_cli.netmap.netinfo(rpc_endpoint=rpc_endpoint, wallet=wallet).stdout) + + with reporter.step("If status maintenance, then check that the option is enabled"): + if node_netinfo.maintenance_mode_allowed == "false": + frostfs_adm.morph.set_config(set_key_value="MaintenanceModeAllowed=true") + + with reporter.step(f"Change the status to {status}"): + frostfs_cli_remote.control.set_status(endpoint=control_endpoint, status=status) + + if not await_tick: + return + + with reporter.step("Tick 1 epoch, and await 2 block"): + frostfs_adm.morph.force_new_epoch() + time.sleep(parse_time(MORPH_BLOCK_TIME) * 2) + + self.check_node_status(status=status, wallet=wallet, cluster_node=cluster_node) + + @wait_for_success(80, 8) + @reporter.step_deco("Check status node, status - {status}") + def check_node_status(self, status: str, wallet: str, cluster_node: ClusterNode): + frostfs_cli = FrostfsCli( + shell=self.shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG + ) + netmap = NetmapParser.snapshot_all_nodes( + frostfs_cli.netmap.snapshot(rpc_endpoint=cluster_node.storage_node.get_rpc_endpoint(), wallet=wallet).stdout + ) + netmap = [node for node in netmap if cluster_node.host_ip == node.node] + if status == "offline": + assert cluster_node.host_ip not in netmap, f"{cluster_node.host_ip} not in Offline" + else: + assert netmap[0].node_status == status.upper(), f"Node state - {netmap[0].node_status} != {status} expect" + + def _get_cli(self, local_shell: Shell, cluster_node: ClusterNode) -> tuple[FrostfsAdm, FrostfsCli, FrostfsCli]: + # TODO Move to service config + host = cluster_node.host + service_config = host.get_service_config(cluster_node.storage_node.name) + wallet_path = service_config.attributes["wallet_path"] + wallet_password = service_config.attributes["wallet_password"] + + shell = host.get_shell() + wallet_config_path = f"/tmp/{cluster_node.storage_node.name}-config.yaml" + wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' + shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") + + frostfs_adm = FrostfsAdm( + shell=shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH + ) + frostfs_cli = FrostfsCli( + shell=local_shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG + ) + frostfs_cli_remote = FrostfsCli( + shell=shell, + frostfs_cli_exec_path=FROSTFS_CLI_EXEC, + config_file=wallet_config_path, + ) + return frostfs_adm, frostfs_cli, frostfs_cli_remote + def _enable_date_synchronizer(self, cluster_node: ClusterNode): shell = cluster_node.host.get_shell() shell.exec("timedatectl set-ntp true") diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index d670d8e..63a3cf2 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -1,5 +1,4 @@ from dataclasses import dataclass -from enum import Enum from typing import Optional from frostfs_testlib.testing.readable import HumanReadableEnum @@ -28,10 +27,16 @@ class StorageObjectInfo(ObjectRef): locks: Optional[list[LockObjectInfo]] = None +class ModeNode(HumanReadableEnum): + MAINTENANCE: str = "maintenance" + ONLINE: str = "online" + OFFLINE: str = "offline" + + @dataclass class NodeNetmapInfo: node_id: str = None - node_status: str = None + node_status: ModeNode = None node_data_ips: list[str] = None cluster_name: str = None continent: str = None @@ -53,3 +58,19 @@ class Interfaces(HumanReadableEnum): MGMT: str = "mgmt" INTERNAL_0: str = "internal0" INTERNAL_1: str = "internal1" + + +@dataclass +class NodeNetInfo: + epoch: str = None + network_magic: str = None + time_per_block: str = None + container_fee: str = None + epoch_duration: str = None + inner_ring_candidate_fee: str = None + maximum_object_size: str = None + withdrawal_fee: str = None + homomorphic_hashing_disabled: str = None + maintenance_mode_allowed: str = None + eigen_trust_alpha: str = None + eigen_trust_iterations: str = None From ed8f90dfc02e30ae6bf1be124b3f346ab2bce50f Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 20 Nov 2023 15:53:30 +0300 Subject: [PATCH 170/363] Change output time format to unix Signed-off-by: Dmitriy Zayakin --- .../storage/controllers/cluster_state_controller.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 27fa034..825f2ac 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -410,8 +410,8 @@ class ClusterStateController: @reporter.step_deco("Set node time to {in_date}") def change_node_date(self, node: ClusterNode, in_date: datetime) -> None: shell = node.host.get_shell() - shell.exec(f"hwclock --set --date='{in_date}'") - shell.exec("hwclock --hctosys") + shell.exec(f"date -s @{time.mktime(in_date.timetuple())}") + shell.exec("hwclock --systohc") node_time = self.get_node_date(node) with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"): assert (self.get_node_date(node) - in_date) < datetime.timedelta(minutes=1) @@ -421,8 +421,8 @@ class ClusterStateController: shell = node.host.get_shell() now_time = datetime.datetime.now(datetime.timezone.utc) with reporter.step(f"Set {now_time} time"): - shell.exec(f"hwclock --set --date='{now_time}'") - shell.exec("hwclock --hctosys") + shell.exec(f"date -s @{time.mktime(now_time.timetuple())}") + shell.exec("hwclock --systohc") @reporter.step_deco("Change the synchronizer status to {status}") def set_sync_date_all_nodes(self, status: str): From 9ab4def44f5b9a81f960cce2d1a3b46664b5ecb6 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 20 Nov 2023 17:39:15 +0300 Subject: [PATCH 171/363] Store k6 output and add socket info collection Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/__init__.py | 3 +- src/frostfs_testlib/load/interfaces/loader.py | 14 +++++ .../scenario_runner.py} | 18 ++----- src/frostfs_testlib/load/k6.py | 54 +++++++------------ src/frostfs_testlib/load/loaders.py | 2 +- src/frostfs_testlib/load/runners.py | 53 +++++++++--------- .../controllers/background_load_controller.py | 26 +++------ .../controllers/cluster_state_controller.py | 10 ++-- src/frostfs_testlib/utils/failover_utils.py | 29 ++++++---- 9 files changed, 99 insertions(+), 110 deletions(-) create mode 100644 src/frostfs_testlib/load/interfaces/loader.py rename src/frostfs_testlib/load/{interfaces.py => interfaces/scenario_runner.py} (79%) diff --git a/src/frostfs_testlib/load/__init__.py b/src/frostfs_testlib/load/__init__.py index ca2f120..8477ee4 100644 --- a/src/frostfs_testlib/load/__init__.py +++ b/src/frostfs_testlib/load/__init__.py @@ -1,4 +1,5 @@ -from frostfs_testlib.load.interfaces import Loader, ScenarioRunner +from frostfs_testlib.load.interfaces.loader import Loader +from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner from frostfs_testlib.load.load_config import ( EndpointSelectionStrategy, K6ProcessAllocationStrategy, diff --git a/src/frostfs_testlib/load/interfaces/loader.py b/src/frostfs_testlib/load/interfaces/loader.py new file mode 100644 index 0000000..2c818d9 --- /dev/null +++ b/src/frostfs_testlib/load/interfaces/loader.py @@ -0,0 +1,14 @@ +from abc import ABC, abstractmethod + +from frostfs_testlib.shell.interfaces import Shell + + +class Loader(ABC): + @abstractmethod + def get_shell(self) -> Shell: + """Get shell for the loader""" + + @property + @abstractmethod + def ip(self): + """Get address of the loader""" diff --git a/src/frostfs_testlib/load/interfaces.py b/src/frostfs_testlib/load/interfaces/scenario_runner.py similarity index 79% rename from src/frostfs_testlib/load/interfaces.py rename to src/frostfs_testlib/load/interfaces/scenario_runner.py index 394fff7..45c1317 100644 --- a/src/frostfs_testlib/load/interfaces.py +++ b/src/frostfs_testlib/load/interfaces/scenario_runner.py @@ -1,20 +1,8 @@ from abc import ABC, abstractmethod +from frostfs_testlib.load.k6 import K6 from frostfs_testlib.load.load_config import LoadParams -from frostfs_testlib.shell.interfaces import Shell from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo - - -class Loader(ABC): - @abstractmethod - def get_shell(self) -> Shell: - """Get shell for the loader""" - - @property - @abstractmethod - def ip(self): - """Get address of the loader""" class ScenarioRunner(ABC): @@ -32,6 +20,10 @@ class ScenarioRunner(ABC): def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): """Init K6 instances""" + @abstractmethod + def get_k6_instances(self) -> list[K6]: + """Get K6 instances""" + @abstractmethod def start(self): """Start K6 instances""" diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index e46221e..3dedd53 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -8,13 +8,8 @@ from time import sleep from typing import Any from urllib.parse import urlparse -from frostfs_testlib.load.interfaces import Loader -from frostfs_testlib.load.load_config import ( - K6ProcessAllocationStrategy, - LoadParams, - LoadScenario, - LoadType, -) +from frostfs_testlib.load.interfaces.loader import Loader +from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario, LoadType from frostfs_testlib.processes.remote_process import RemoteProcess from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.common import STORAGE_USER_NAME @@ -59,6 +54,7 @@ class K6: self.loader: Loader = loader self.shell: Shell = shell self.wallet = wallet + self.preset_output: str = "" self.summary_json: str = os.path.join( self.load_params.working_dir, f"{self.load_params.load_id}_{self.load_params.scenario.value}_summary.json", @@ -101,10 +97,10 @@ class K6: command = " ".join(command_args) result = self.shell.exec(command) - assert ( - result.return_code == EXIT_RESULT_CODE - ), f"Return code of preset is not zero: {result.stdout}" - return result.stdout.strip("\n") + assert result.return_code == EXIT_RESULT_CODE, f"Return code of preset is not zero: {result.stdout}" + + self.preset_output = result.stdout.strip("\n") + return self.preset_output @reporter.step_deco("Generate K6 command") def _generate_env_variables(self) -> str: @@ -113,31 +109,21 @@ class K6: env_vars[f"{self.load_params.load_type.value.upper()}_ENDPOINTS"] = ",".join(self.endpoints) env_vars["SUMMARY_JSON"] = self.summary_json - reporter.attach( - "\n".join(f"{param}: {value}" for param, value in env_vars.items()), "K6 ENV variables" - ) - return " ".join( - [f"-e {param}='{value}'" for param, value in env_vars.items() if value is not None] - ) + reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "K6 ENV variables") + return " ".join([f"-e {param}='{value}'" for param, value in env_vars.items() if value is not None]) def start(self) -> None: - with reporter.step( - f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}" - ): + with reporter.step(f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}"): self._start_time = int(datetime.utcnow().timestamp()) command = ( f"{self._k6_dir}/k6 run {self._generate_env_variables()} " f"{self._k6_dir}/scenarios/{self.load_params.scenario.value}.js" ) user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None - self._k6_process = RemoteProcess.create( - command, self.shell, self.load_params.working_dir, user - ) + self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, user) def wait_until_finished(self, soft_timeout: int = 0) -> None: - with reporter.step( - f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}" - ): + with reporter.step(f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}"): if self.load_params.scenario == LoadScenario.VERIFY: timeout = self.load_params.verify_time or 0 else: @@ -180,9 +166,11 @@ class K6: while timeout > 0: if not self._k6_process.running(): return - remaining_time_hours = f"{timeout//3600}h" if timeout//3600 != 0 else "" - remaining_time_minutes = f"{timeout//60%60}m" if timeout//60%60 != 0 else "" - logger.info(f"K6 is running. Remaining time {remaining_time_hours}{remaining_time_minutes}{timeout%60}s. Next check after {wait_interval} seconds...") + remaining_time_hours = f"{timeout//3600}h" if timeout // 3600 != 0 else "" + remaining_time_minutes = f"{timeout//60%60}m" if timeout // 60 % 60 != 0 else "" + logger.info( + f"K6 is running. Remaining time {remaining_time_hours}{remaining_time_minutes}{timeout%60}s. Next check after {wait_interval} seconds..." + ) sleep(wait_interval) timeout -= min(timeout, wait_interval) wait_interval = max( @@ -198,9 +186,7 @@ class K6: raise TimeoutError(f"Expected K6 to finish after {original_timeout} sec.") def get_results(self) -> Any: - with reporter.step( - f"Get load results from loader {self.loader.ip} on endpoints {self.endpoints}" - ): + with reporter.step(f"Get load results from loader {self.loader.ip} on endpoints {self.endpoints}"): self.__log_output() if not self.summary_json: @@ -231,9 +217,7 @@ class K6: return False @reporter.step_deco("Wait until K6 process end") - @wait_for_success( - K6_STOP_SIGNAL_TIMEOUT, 15, False, False, "Can not stop K6 process within timeout" - ) + @wait_for_success(K6_STOP_SIGNAL_TIMEOUT, 15, False, False, "Can not stop K6 process within timeout") def _wait_until_process_end(self): return self._k6_process.running() diff --git a/src/frostfs_testlib/load/loaders.py b/src/frostfs_testlib/load/loaders.py index 9e92155..1e0e97f 100644 --- a/src/frostfs_testlib/load/loaders.py +++ b/src/frostfs_testlib/load/loaders.py @@ -1,4 +1,4 @@ -from frostfs_testlib.load.interfaces import Loader +from frostfs_testlib.load.interfaces.loader import Loader from frostfs_testlib.resources.load_params import ( LOAD_NODE_SSH_PASSWORD, LOAD_NODE_SSH_PRIVATE_KEY_PASSPHRASE, diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index 982cfcc..ea5a374 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -10,7 +10,8 @@ from urllib.parse import urlparse import yaml from frostfs_testlib.cli.frostfs_authmate.authmate import FrostfsAuthmate -from frostfs_testlib.load.interfaces import Loader, ScenarioRunner +from frostfs_testlib.load.interfaces.loader import Loader +from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner from frostfs_testlib.load.k6 import K6 from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadType from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader @@ -50,6 +51,9 @@ class RunnerBase(ScenarioRunner): return any([future.result() for future in futures]) + def get_k6_instances(self): + return self.k6_instances + class DefaultRunner(RunnerBase): loaders: list[Loader] @@ -391,6 +395,7 @@ class LocalRunner(RunnerBase): return results + class S3LocalRunner(LocalRunner): endpoints: list[str] k6_dir: str @@ -404,7 +409,8 @@ class S3LocalRunner(LocalRunner): @reporter.step_deco("Resolve containers in preset") def _resolve_containers_in_preset(self, k6_instance: K6): k6_instance.shell.exec( - f"sudo {self.k6_dir}/scenarios/preset/resolve_containers_in_preset.py --endpoint {k6_instance.endpoints[0]} --preset_file {k6_instance.load_params.preset.pregen_json}") + f"sudo {self.k6_dir}/scenarios/preset/resolve_containers_in_preset.py --endpoint {k6_instance.endpoints[0]} --preset_file {k6_instance.load_params.preset.pregen_json}" + ) @reporter.step_deco("Init k6 instances") def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): @@ -426,9 +432,9 @@ class S3LocalRunner(LocalRunner): # If we chmod /home/ folder we can no longer ssh to the node # !! IMPORTANT !! if ( - load_params.working_dir - and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}" - and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}/" + load_params.working_dir + and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}" + and not load_params.working_dir == f"/home/{LOAD_NODE_SSH_USER}/" ): shell.exec(f"sudo chmod -R 777 {load_params.working_dir}") @@ -444,30 +450,25 @@ class S3LocalRunner(LocalRunner): @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @reporter.step_deco("Preparation steps") def prepare( - self, - load_params: LoadParams, - cluster_nodes: list[ClusterNode], - nodes_under_load: list[ClusterNode], - k6_dir: str, + self, + load_params: LoadParams, + cluster_nodes: list[ClusterNode], + nodes_under_load: list[ClusterNode], + k6_dir: str, ): self.k6_dir = k6_dir with reporter.step("Init s3 client on loaders"): storage_node = nodes_under_load[0].service(StorageNode) - s3_public_keys = [ - node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes - ] + s3_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes] grpc_peer = storage_node.get_rpc_endpoint() parallel(self.prepare_node, nodes_under_load, k6_dir, load_params, s3_public_keys, grpc_peer) @reporter.step_deco("Prepare node {cluster_node}") - def prepare_node(self, - cluster_node: ClusterNode, - k6_dir: str, - load_params: LoadParams, - s3_public_keys: list[str], - grpc_peer: str): - LocalRunner.prepare_node(self,cluster_node, k6_dir, load_params) + def prepare_node( + self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams, s3_public_keys: list[str], grpc_peer: str + ): + LocalRunner.prepare_node(self, cluster_node, k6_dir, load_params) self.endpoints = cluster_node.s3_gate.get_all_endpoints() shell = cluster_node.host.get_shell() @@ -497,9 +498,9 @@ class S3LocalRunner(LocalRunner): wallet_password=self.wallet.password, ).stdout aws_access_key_id = str( - re.search( - r"access_key_id.*:\s.(?P\w*)", issue_secret_output - ).group("aws_access_key_id") + re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group( + "aws_access_key_id" + ) ) aws_secret_access_key = str( re.search( @@ -509,10 +510,8 @@ class S3LocalRunner(LocalRunner): ) configure_input = [ InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id), - InteractiveInput( - prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key - ), + InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key), InteractiveInput(prompt_pattern=r".*", input=""), InteractiveInput(prompt_pattern=r".*", input=""), ] - shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input)) \ No newline at end of file + shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input)) diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index a18a603..8ecada8 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -2,13 +2,8 @@ import copy from typing import Optional import frostfs_testlib.resources.optionals as optionals -from frostfs_testlib.load.interfaces import ScenarioRunner -from frostfs_testlib.load.load_config import ( - EndpointSelectionStrategy, - LoadParams, - LoadScenario, - LoadType, -) +from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner +from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType from frostfs_testlib.load.load_report import LoadReport from frostfs_testlib.load.load_verifiers import LoadVerifier from frostfs_testlib.reporter import get_reporter @@ -56,9 +51,7 @@ class BackgroundLoadController: raise RuntimeError("endpoint_selection_strategy should not be None") @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED, []) - def _get_endpoints( - self, load_type: LoadType, endpoint_selection_strategy: EndpointSelectionStrategy - ): + def _get_endpoints(self, load_type: LoadType, endpoint_selection_strategy: EndpointSelectionStrategy): all_endpoints = { LoadType.gRPC: { EndpointSelectionStrategy.ALL: list( @@ -85,10 +78,7 @@ class BackgroundLoadController: ) ), EndpointSelectionStrategy.FIRST: list( - set( - node_under_load.service(S3Gate).get_endpoint() - for node_under_load in self.nodes_under_load - ) + set(node_under_load.service(S3Gate).get_endpoint() for node_under_load in self.nodes_under_load) ), }, } @@ -98,12 +88,8 @@ class BackgroundLoadController: @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @reporter.step_deco("Prepare load instances") def prepare(self): - self.endpoints = self._get_endpoints( - self.load_params.load_type, self.load_params.endpoint_selection_strategy - ) - self.runner.prepare( - self.load_params, self.cluster_nodes, self.nodes_under_load, self.k6_dir - ) + self.endpoints = self._get_endpoints(self.load_params.load_type, self.load_params.endpoint_selection_strategy) + self.runner.prepare(self.load_params, self.cluster_nodes, self.nodes_under_load, self.k6_dir) self.runner.init_k6_instances(self.load_params, self.endpoints, self.k6_dir) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 825f2ac..000bdd8 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -109,12 +109,14 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start host of node {node}") - def start_node_host(self, node: ClusterNode): + def start_node_host(self, node: ClusterNode, tree_healthcheck: bool = True): with reporter.step(f"Start host {node.host.config.address}"): node.host.start_host() wait_for_host_online(self.shell, node.storage_node) + self.stopped_nodes.remove(node) wait_for_node_online(node.storage_node) - self.stopped_nodes.remove(node) + if tree_healthcheck: + self.wait_tree_healthcheck() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start stopped hosts") @@ -364,7 +366,7 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Hard reboot host {node} via magic SysRq option") - def panic_reboot_host(self, node: ClusterNode, wait_for_return: bool = True): + def panic_reboot_host(self, node: ClusterNode, wait_for_return: bool = True, tree_healthcheck: bool = True): shell = node.host.get_shell() shell.exec('sudo sh -c "echo 1 > /proc/sys/kernel/sysrq"') @@ -381,6 +383,8 @@ class ClusterStateController: time.sleep(10) wait_for_host_online(self.shell, node.storage_node) wait_for_node_online(node.storage_node) + if tree_healthcheck: + self.wait_tree_healthcheck() @reporter.step_deco("Down {interface} to {nodes}") def down_interface(self, nodes: list[ClusterNode], interface: str): diff --git a/src/frostfs_testlib/utils/failover_utils.py b/src/frostfs_testlib/utils/failover_utils.py index 8c6062f..27cd181 100644 --- a/src/frostfs_testlib/utils/failover_utils.py +++ b/src/frostfs_testlib/utils/failover_utils.py @@ -12,6 +12,7 @@ from frostfs_testlib.steps.node_management import storage_node_healthcheck from frostfs_testlib.steps.storage_policy import get_nodes_with_object from frostfs_testlib.storage.cluster import Cluster, ClusterNode, NodeBase, StorageNode from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain +from frostfs_testlib.testing.parallel import parallel from frostfs_testlib.testing.test_control import retry, wait_for_success from frostfs_testlib.utils.datetime_utils import parse_time @@ -26,12 +27,17 @@ def ping_host(shell: Shell, host: Host): return shell.exec(f"ping {host.config.address} -c 1", options).return_code +# TODO: Move to ClusterStateController @reporter.step_deco("Wait for storage nodes returned to cluster") def wait_all_storage_nodes_returned(shell: Shell, cluster: Cluster) -> None: - for node in cluster.services(StorageNode): - with reporter.step(f"Run health check for storage at '{node}'"): - wait_for_host_online(shell, node) - wait_for_node_online(node) + nodes = cluster.services(StorageNode) + parallel(_wait_for_storage_node, nodes, shell=shell) + + +@reporter.step_deco("Run health check for storage at '{node}'") +def _wait_for_storage_node(node: StorageNode, shell: Shell) -> None: + wait_for_host_online(shell, node) + wait_for_node_online(node) @retry(max_attempts=60, sleep_interval=5, expected_result=0) @@ -64,10 +70,17 @@ def wait_for_node_online(node: StorageNode): except Exception as err: logger.warning(f"Node healthcheck fails with error {err}") return False + finally: + gather_socket_info(node) return health_check.health_status == "READY" and health_check.network_status == "ONLINE" +@reporter.step_deco("Gather socket info for {node}") +def gather_socket_info(node: StorageNode): + node.host.get_shell().exec("ss -tuln | grep 8080", CommandOptions(check=False)) + + @reporter.step_deco("Check and return status of given service") def service_status(service: str, shell: Shell) -> str: return shell.exec(f"sudo systemctl is-active {service}").stdout.rstrip() @@ -139,9 +152,7 @@ def multiple_restart( service_name = node.service(service_type).name for _ in range(count): node.host.restart_service(service_name) - logger.info( - f"Restart {service_systemctl_name}; sleep {sleep_interval} seconds and continue" - ) + logger.info(f"Restart {service_systemctl_name}; sleep {sleep_interval} seconds and continue") sleep(sleep_interval) @@ -164,9 +175,7 @@ def check_services_status(service_list: list[str], expected_status: str, shell: @reporter.step_deco("Wait for active status of passed service") @wait_for_success(60, 5) -def wait_service_in_desired_state( - service: str, shell: Shell, expected_status: Optional[str] = "active" -): +def wait_service_in_desired_state(service: str, shell: Shell, expected_status: Optional[str] = "active"): real_status = service_status(service=service, shell=shell) assert ( expected_status == real_status From 253bb3b1d81eb66b3e145bd777e08c99e3377ee5 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 22 Nov 2023 17:10:09 +0300 Subject: [PATCH 172/363] [126] small healthcheck and stop start hosts rework Signed-off-by: Andrey Berezin --- .../healthcheck/basic_healthcheck.py | 67 ++++++++++++++--- src/frostfs_testlib/healthcheck/interfaces.py | 10 +-- .../controllers/cluster_state_controller.py | 74 +++++++++++++------ src/frostfs_testlib/utils/failover_utils.py | 66 +---------------- 4 files changed, 112 insertions(+), 105 deletions(-) diff --git a/src/frostfs_testlib/healthcheck/basic_healthcheck.py b/src/frostfs_testlib/healthcheck/basic_healthcheck.py index 9c1d151..6f21534 100644 --- a/src/frostfs_testlib/healthcheck/basic_healthcheck.py +++ b/src/frostfs_testlib/healthcheck/basic_healthcheck.py @@ -1,22 +1,65 @@ +from typing import Callable + from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli from frostfs_testlib.healthcheck.interfaces import Healthcheck from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC +from frostfs_testlib.shell import CommandOptions from frostfs_testlib.steps.node_management import storage_node_healthcheck from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.testing.test_control import wait_for_success reporter = get_reporter() class BasicHealthcheck(Healthcheck): - @reporter.step_deco("Perform healthcheck for {cluster_node}") - def perform(self, cluster_node: ClusterNode): - result = self.storage_healthcheck(cluster_node) - if result: - raise AssertionError(result) + def _perform(self, cluster_node: ClusterNode, checks: dict[Callable, dict]): + issues: list[str] = [] + for check, kwargs in checks.items(): + issue = check(cluster_node, **kwargs) + if issue: + issues.append(issue) + + assert not issues, "Issues found:\n" + "\n".join(issues) + + @wait_for_success(900, 30) + def full_healthcheck(self, cluster_node: ClusterNode): + checks = { + self.storage_healthcheck: {}, + self._tree_healthcheck: {}, + } + + with reporter.step(f"Perform full healthcheck for {cluster_node}"): + self._perform(cluster_node, checks) + + @wait_for_success(900, 30) + def startup_healthcheck(self, cluster_node: ClusterNode): + checks = { + self.storage_healthcheck: {}, + self._tree_healthcheck: {}, + } + + with reporter.step(f"Perform startup healthcheck on {cluster_node}"): + self._perform(cluster_node, checks) + + @wait_for_success(900, 30) + def storage_healthcheck(self, cluster_node: ClusterNode) -> str | None: + checks = { + self._storage_healthcheck: {}, + } + + with reporter.step(f"Perform storage healthcheck on {cluster_node}"): + self._perform(cluster_node, checks) + + @reporter.step_deco("Storage healthcheck on {cluster_node}") + def _storage_healthcheck(self, cluster_node: ClusterNode) -> str | None: + result = storage_node_healthcheck(cluster_node.storage_node) + self._gather_socket_info(cluster_node) + if result.health_status != "READY" or result.network_status != "ONLINE": + return f"Node {cluster_node} is not healthy. Health={result.health_status}. Network={result.network_status}" @reporter.step_deco("Tree healthcheck on {cluster_node}") - def tree_healthcheck(self, cluster_node: ClusterNode) -> str | None: + def _tree_healthcheck(self, cluster_node: ClusterNode) -> str | None: host = cluster_node.host service_config = host.get_service_config(cluster_node.storage_node.name) wallet_path = service_config.attributes["wallet_path"] @@ -34,10 +77,10 @@ class BasicHealthcheck(Healthcheck): ) result = remote_cli.tree.healthcheck(rpc_endpoint="127.0.0.1:8080") if result.return_code != 0: - return f"Error during tree healthcheck (rc={result.return_code}): {result.stdout}. \n Stderr: {result.stderr}" + return ( + f"Error during tree healthcheck (rc={result.return_code}): {result.stdout}. \n Stderr: {result.stderr}" + ) - @reporter.step_deco("Storage healthcheck on {cluster_node}") - def storage_healthcheck(self, cluster_node: ClusterNode) -> str | None: - result = storage_node_healthcheck(cluster_node.storage_node) - if result.health_status != "READY" or result.network_status != "ONLINE": - return f"Node {cluster_node} is not healthy. Health={result.health_status}. Network={result.network_status}" + @reporter.step_deco("Gather socket info for {cluster_node}") + def _gather_socket_info(self, cluster_node: ClusterNode): + cluster_node.host.get_shell().exec("ss -tuln | grep 8080", CommandOptions(check=False)) diff --git a/src/frostfs_testlib/healthcheck/interfaces.py b/src/frostfs_testlib/healthcheck/interfaces.py index a036a82..83fa021 100644 --- a/src/frostfs_testlib/healthcheck/interfaces.py +++ b/src/frostfs_testlib/healthcheck/interfaces.py @@ -5,13 +5,13 @@ from frostfs_testlib.storage.cluster import ClusterNode class Healthcheck(ABC): @abstractmethod - def perform(self, cluster_node: ClusterNode): - """Perform healthcheck on the target cluster node""" + def full_healthcheck(self, cluster_node: ClusterNode): + """Perform full healthcheck on the target cluster node""" @abstractmethod - def tree_healthcheck(self, cluster_node: ClusterNode): - """Check tree sync status on target cluster node""" + def startup_healthcheck(self, cluster_node: ClusterNode): + """Perform healthcheck required on startup of target cluster node""" @abstractmethod def storage_healthcheck(self, cluster_node: ClusterNode): - """Perform storage node healthcheck on target cluster node""" + """Perform storage service healthcheck on target cluster node""" diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 000bdd8..7020671 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -1,4 +1,5 @@ import datetime +import logging import time from typing import TypeVar @@ -6,6 +7,7 @@ import frostfs_testlib.resources.optionals as optionals from frostfs_testlib.cli import FrostfsAdm, FrostfsCli from frostfs_testlib.cli.netmap_parser import NetmapParser from frostfs_testlib.healthcheck.interfaces import Healthcheck +from frostfs_testlib.hosting.interfaces import HostStatus from frostfs_testlib.plugins import load_all from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC @@ -16,16 +18,11 @@ from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, Storag from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass from frostfs_testlib.testing import parallel -from frostfs_testlib.testing.test_control import run_optionally, wait_for_success +from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for_success from frostfs_testlib.utils.datetime_utils import parse_time -from frostfs_testlib.utils.failover_utils import ( - wait_all_storage_nodes_returned, - wait_for_host_offline, - wait_for_host_online, - wait_for_node_online, -) reporter = get_reporter() +logger = logging.getLogger("NeoLogger") if_up_down_helper = IfUpDownHelper() @@ -88,7 +85,7 @@ class ClusterStateController: self.stopped_nodes.append(node) with reporter.step(f"Stop host {node.host.config.address}"): node.host.stop_host(mode=mode) - wait_for_host_offline(self.shell, node.storage_node) + self._wait_for_host_offline(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Shutdown whole cluster") @@ -105,18 +102,17 @@ class ClusterStateController: node.host.stop_host(mode=mode) for node in nodes: - wait_for_host_offline(self.shell, node.storage_node) + self._wait_for_host_offline(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start host of node {node}") - def start_node_host(self, node: ClusterNode, tree_healthcheck: bool = True): + def start_node_host(self, node: ClusterNode, startup_healthcheck: bool = True): with reporter.step(f"Start host {node.host.config.address}"): node.host.start_host() - wait_for_host_online(self.shell, node.storage_node) + self._wait_for_host_online(node) self.stopped_nodes.remove(node) - wait_for_node_online(node.storage_node) - if tree_healthcheck: - self.wait_tree_healthcheck() + if startup_healthcheck: + self.wait_startup_healthcheck() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start stopped hosts") @@ -131,6 +127,9 @@ class ClusterStateController: self.stopped_services.difference_update(self._get_stopped_by_node(node)) self.stopped_nodes = [] + with reporter.step("Wait for all nodes to go online"): + parallel(self._wait_for_host_online, self.cluster.cluster_nodes) + self.wait_after_storage_startup() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @@ -183,16 +182,15 @@ class ClusterStateController: if online_s3gates: parallel(self.wait_s3gate, online_s3gates) - @wait_for_success(600, 60) - def wait_tree_healthcheck(self): + @reporter.step_deco("Wait for cluster startup healtcheck") + def wait_startup_healthcheck(self): nodes = self.cluster.nodes(self._get_online(StorageNode)) - parallel(self.healthcheck.tree_healthcheck, nodes) + parallel(self.healthcheck.startup_healthcheck, nodes) @reporter.step_deco("Wait for storage reconnection to the system") def wait_after_storage_startup(self): - wait_all_storage_nodes_returned(self.shell, self.cluster) + self.wait_startup_healthcheck() self.wait_s3gates() - self.wait_tree_healthcheck() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Start all stopped services") @@ -366,7 +364,7 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step_deco("Hard reboot host {node} via magic SysRq option") - def panic_reboot_host(self, node: ClusterNode, wait_for_return: bool = True, tree_healthcheck: bool = True): + def panic_reboot_host(self, node: ClusterNode, wait_for_return: bool = True, startup_healthcheck: bool = True): shell = node.host.get_shell() shell.exec('sudo sh -c "echo 1 > /proc/sys/kernel/sysrq"') @@ -381,10 +379,9 @@ class ClusterStateController: # Let the things to be settled # A little wait here to prevent ssh stuck during panic time.sleep(10) - wait_for_host_online(self.shell, node.storage_node) - wait_for_node_online(node.storage_node) - if tree_healthcheck: - self.wait_tree_healthcheck() + self._wait_for_host_online(node) + if startup_healthcheck: + self.wait_startup_healthcheck() @reporter.step_deco("Down {interface} to {nodes}") def down_interface(self, nodes: list[ClusterNode], interface: str): @@ -539,3 +536,32 @@ class ClusterStateController: if "mgmt" not in type: interfaces.append(ip) return interfaces + + @reporter.step_deco("Ping node") + def _ping_host(self, node: ClusterNode): + options = CommandOptions(check=False) + return self.shell.exec(f"ping {node.host.config.address} -c 1", options).return_code + + @retry(max_attempts=60, sleep_interval=5, expected_result=HostStatus.ONLINE) + @reporter.step_deco("Waiting for {node} to go online") + def _wait_for_host_online(self, node: ClusterNode): + try: + ping_result = self._ping_host(node) + if ping_result != 0: + return HostStatus.OFFLINE + return node.host.get_host_status() + except Exception as err: + logger.warning(f"Host ping fails with error {err}") + return HostStatus.OFFLINE + + @retry(max_attempts=60, sleep_interval=5, expected_result=HostStatus.OFFLINE) + @reporter.step_deco("Waiting for {node} to go offline") + def _wait_for_host_offline(self, node: ClusterNode): + try: + ping_result = self._ping_host(node) + if ping_result == 0: + return HostStatus.ONLINE + return node.host.get_host_status() + except Exception as err: + logger.warning(f"Host ping fails with error {err}") + return HostStatus.ONLINE diff --git a/src/frostfs_testlib/utils/failover_utils.py b/src/frostfs_testlib/utils/failover_utils.py index 27cd181..d4892c4 100644 --- a/src/frostfs_testlib/utils/failover_utils.py +++ b/src/frostfs_testlib/utils/failover_utils.py @@ -3,17 +3,15 @@ from dataclasses import dataclass from time import sleep from typing import Optional -from frostfs_testlib.hosting import Host from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.common import SERVICE_MAX_STARTUP_TIME -from frostfs_testlib.shell import CommandOptions, Shell +from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import neo_go_dump_keys from frostfs_testlib.steps.node_management import storage_node_healthcheck from frostfs_testlib.steps.storage_policy import get_nodes_with_object from frostfs_testlib.storage.cluster import Cluster, ClusterNode, NodeBase, StorageNode from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain -from frostfs_testlib.testing.parallel import parallel -from frostfs_testlib.testing.test_control import retry, wait_for_success +from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils.datetime_utils import parse_time reporter = get_reporter() @@ -21,66 +19,6 @@ reporter = get_reporter() logger = logging.getLogger("NeoLogger") -@reporter.step_deco("Ping node") -def ping_host(shell: Shell, host: Host): - options = CommandOptions(check=False) - return shell.exec(f"ping {host.config.address} -c 1", options).return_code - - -# TODO: Move to ClusterStateController -@reporter.step_deco("Wait for storage nodes returned to cluster") -def wait_all_storage_nodes_returned(shell: Shell, cluster: Cluster) -> None: - nodes = cluster.services(StorageNode) - parallel(_wait_for_storage_node, nodes, shell=shell) - - -@reporter.step_deco("Run health check for storage at '{node}'") -def _wait_for_storage_node(node: StorageNode, shell: Shell) -> None: - wait_for_host_online(shell, node) - wait_for_node_online(node) - - -@retry(max_attempts=60, sleep_interval=5, expected_result=0) -@reporter.step_deco("Waiting for host of {node} to go online") -def wait_for_host_online(shell: Shell, node: StorageNode): - try: - # TODO: Quick solution for now, should be replaced by lib interactions - return ping_host(shell, node.host) - except Exception as err: - logger.warning(f"Host ping fails with error {err}") - return 1 - - -@retry(max_attempts=60, sleep_interval=5, expected_result=1) -@reporter.step_deco("Waiting for host of {node} to go offline") -def wait_for_host_offline(shell: Shell, node: StorageNode): - try: - # TODO: Quick solution for now, should be replaced by lib interactions - return ping_host(shell, node.host) - except Exception as err: - logger.warning(f"Host ping fails with error {err}") - return 0 - - -@retry(max_attempts=20, sleep_interval=30, expected_result=True) -@reporter.step_deco("Waiting for node {node} to go online") -def wait_for_node_online(node: StorageNode): - try: - health_check = storage_node_healthcheck(node) - except Exception as err: - logger.warning(f"Node healthcheck fails with error {err}") - return False - finally: - gather_socket_info(node) - - return health_check.health_status == "READY" and health_check.network_status == "ONLINE" - - -@reporter.step_deco("Gather socket info for {node}") -def gather_socket_info(node: StorageNode): - node.host.get_shell().exec("ss -tuln | grep 8080", CommandOptions(check=False)) - - @reporter.step_deco("Check and return status of given service") def service_status(service: str, shell: Shell) -> str: return shell.exec(f"sudo systemctl is-active {service}").stdout.rstrip() From f072f88673dfddcbf5064b49bbe1b123957685fb Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 22 Nov 2023 19:54:39 +0300 Subject: [PATCH 173/363] [#127] Change service registration Signed-off-by: Andrey Berezin --- pyproject.toml | 6 ++++++ src/frostfs_testlib/plugins/__init__.py | 2 +- src/frostfs_testlib/storage/__init__.py | 15 --------------- 3 files changed, 7 insertions(+), 16 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 34a37e3..7d3e5b0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -50,6 +50,12 @@ basic = "frostfs_testlib.healthcheck.basic_healthcheck:BasicHealthcheck" [project.entry-points."frostfs.testlib.csc_managers"] config = "frostfs_testlib.storage.controllers.state_managers.config_state_manager:ConfigStateManager" +[project.entry-points."frostfs.testlib.services"] +s = "frostfs_testlib.storage.dataclasses.frostfs_services:StorageNode" +s3-gate = "frostfs_testlib.storage.dataclasses.frostfs_services:S3Gate" +http-gate = "frostfs_testlib.storage.dataclasses.frostfs_services:HTTPGate" +morph-chain = "frostfs_testlib.storage.dataclasses.frostfs_services:MorphChain" +ir = "frostfs_testlib.storage.dataclasses.frostfs_services:InnerRing" [tool.isort] profile = "black" diff --git a/src/frostfs_testlib/plugins/__init__.py b/src/frostfs_testlib/plugins/__init__.py index 79de340..26b2441 100644 --- a/src/frostfs_testlib/plugins/__init__.py +++ b/src/frostfs_testlib/plugins/__init__.py @@ -23,7 +23,7 @@ def load_all(group: str) -> Any: """Loads all plugins using entry point specification. Args: - plugin_group: Name of plugin group. + group: Name of plugin group. Returns: Classes from specified group. diff --git a/src/frostfs_testlib/storage/__init__.py b/src/frostfs_testlib/storage/__init__.py index 3562d25..cbbef84 100644 --- a/src/frostfs_testlib/storage/__init__.py +++ b/src/frostfs_testlib/storage/__init__.py @@ -1,22 +1,7 @@ -from frostfs_testlib.storage.constants import _FrostfsServicesNames -from frostfs_testlib.storage.dataclasses.frostfs_services import ( - HTTPGate, - InnerRing, - MorphChain, - S3Gate, - StorageNode, -) from frostfs_testlib.storage.service_registry import ServiceRegistry __class_registry = ServiceRegistry() -# Register default public services -__class_registry.register_service(_FrostfsServicesNames.STORAGE, StorageNode) -__class_registry.register_service(_FrostfsServicesNames.INNER_RING, InnerRing) -__class_registry.register_service(_FrostfsServicesNames.MORPH_CHAIN, MorphChain) -__class_registry.register_service(_FrostfsServicesNames.S3_GATE, S3Gate) -__class_registry.register_service(_FrostfsServicesNames.HTTP_GATE, HTTPGate) - def get_service_registry() -> ServiceRegistry: """Returns registry with registered classes related to cluster and cluster nodes. From d1ba7eb66181a7335b0e0b5cd8a1271f4b078015 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Thu, 23 Nov 2023 08:03:31 +0300 Subject: [PATCH 174/363] Change local timeout Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/s3/aws_cli_client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 320d74b..59ee740 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -17,7 +17,7 @@ from frostfs_testlib.utils.cli_utils import _configure_aws_cli reporter = get_reporter() logger = logging.getLogger("NeoLogger") -command_options = CommandOptions(timeout=240) +command_options = CommandOptions(timeout=480) class AwsCliClient(S3ClientWrapper): From c17f0f6173bfc04b623dc0385c425573a59e78ef Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 24 Nov 2023 19:46:35 +0300 Subject: [PATCH 175/363] [#130] Add service healthcheck and allow to skip version check for some binaries Signed-off-by: Andrey Berezin --- .../healthcheck/basic_healthcheck.py | 23 ++++++++++- src/frostfs_testlib/healthcheck/interfaces.py | 4 ++ src/frostfs_testlib/storage/cluster.py | 20 +++++++++- src/frostfs_testlib/utils/failover_utils.py | 7 ++-- src/frostfs_testlib/utils/version_utils.py | 39 ++++++++++--------- 5 files changed, 69 insertions(+), 24 deletions(-) diff --git a/src/frostfs_testlib/healthcheck/basic_healthcheck.py b/src/frostfs_testlib/healthcheck/basic_healthcheck.py index 6f21534..4cb3a48 100644 --- a/src/frostfs_testlib/healthcheck/basic_healthcheck.py +++ b/src/frostfs_testlib/healthcheck/basic_healthcheck.py @@ -6,8 +6,9 @@ from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC from frostfs_testlib.shell import CommandOptions from frostfs_testlib.steps.node_management import storage_node_healthcheck -from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.cluster import ClusterNode, ServiceClass from frostfs_testlib.testing.test_control import wait_for_success +from frostfs_testlib.utils.failover_utils import check_services_status reporter = get_reporter() @@ -51,6 +52,26 @@ class BasicHealthcheck(Healthcheck): with reporter.step(f"Perform storage healthcheck on {cluster_node}"): self._perform(cluster_node, checks) + @wait_for_success(120, 5) + def services_healthcheck(self, cluster_node: ClusterNode): + svcs_to_check = cluster_node.services + checks = { + check_services_status: { + "service_list": svcs_to_check, + "expected_status": "active", + }, + self._check_services: {"services": svcs_to_check}, + } + + with reporter.step(f"Perform service healthcheck on {cluster_node}"): + self._perform(cluster_node, checks) + + def _check_services(self, cluster_node: ClusterNode, services: list[ServiceClass]): + for svc in services: + result = svc.service_healthcheck() + if result == False: + return f"Service {svc.get_service_systemctl_name()} healthcheck failed on node {cluster_node}." + @reporter.step_deco("Storage healthcheck on {cluster_node}") def _storage_healthcheck(self, cluster_node: ClusterNode) -> str | None: result = storage_node_healthcheck(cluster_node.storage_node) diff --git a/src/frostfs_testlib/healthcheck/interfaces.py b/src/frostfs_testlib/healthcheck/interfaces.py index 83fa021..c665b8a 100644 --- a/src/frostfs_testlib/healthcheck/interfaces.py +++ b/src/frostfs_testlib/healthcheck/interfaces.py @@ -15,3 +15,7 @@ class Healthcheck(ABC): @abstractmethod def storage_healthcheck(self, cluster_node: ClusterNode): """Perform storage service healthcheck on target cluster node""" + + @abstractmethod + def services_healthcheck(self, cluster_node: ClusterNode): + """Perform service status check on target cluster node""" diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index b8c32ca..02601ac 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -116,8 +116,24 @@ class ClusterNode: self.host, ) - def get_list_of_services(self) -> list[str]: - return [config.attributes[ConfigAttributes.SERVICE_NAME] for config in self.host.config.services] + @property + def services(self) -> list[NodeBase]: + svcs: list[NodeBase] = [] + svcs_names_on_node = [svc.name for svc in self.host.config.services] + for entry in self.class_registry._class_mapping.values(): + hosting_svc_name = entry["hosting_service_name"] + pattern = f"{hosting_svc_name}{self.id:02}" + if pattern in svcs_names_on_node: + config = self.host.get_service_config(pattern) + svcs.append( + entry["cls"]( + self.id, + config.name, + self.host, + ) + ) + + return svcs def get_all_interfaces(self) -> dict[str, str]: return self.host.config.interfaces diff --git a/src/frostfs_testlib/utils/failover_utils.py b/src/frostfs_testlib/utils/failover_utils.py index d4892c4..507168e 100644 --- a/src/frostfs_testlib/utils/failover_utils.py +++ b/src/frostfs_testlib/utils/failover_utils.py @@ -11,6 +11,7 @@ from frostfs_testlib.steps.node_management import storage_node_healthcheck from frostfs_testlib.steps.storage_policy import get_nodes_with_object from frostfs_testlib.storage.cluster import Cluster, ClusterNode, NodeBase, StorageNode from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain +from frostfs_testlib.storage.dataclasses.node_base import ServiceClass from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils.datetime_utils import parse_time @@ -96,11 +97,11 @@ def multiple_restart( @reporter.step_deco("Get status of list of services and check expected status") @wait_for_success(60, 5) -def check_services_status(service_list: list[str], expected_status: str, shell: Shell): +def check_services_status(cluster_node: ClusterNode, service_list: list[ServiceClass], expected_status: str): cmd = "" for service in service_list: - cmd += f' sudo systemctl status {service} --lines=0 | grep "Active:";' - result = shell.exec(cmd).stdout.rstrip() + cmd += f' sudo systemctl status {service.get_service_systemctl_name()} --lines=0 | grep "Active:";' + result = cluster_node.host.get_shell().exec(cmd).stdout.rstrip() statuses = list() for line in result.split("\n"): status_substring = line.split() diff --git a/src/frostfs_testlib/utils/version_utils.py b/src/frostfs_testlib/utils/version_utils.py index 68f8578..42bde6d 100644 --- a/src/frostfs_testlib/utils/version_utils.py +++ b/src/frostfs_testlib/utils/version_utils.py @@ -3,12 +3,7 @@ import re from frostfs_testlib.cli import FrostfsAdm, FrostfsCli from frostfs_testlib.hosting import Hosting -from frostfs_testlib.resources.cli import ( - FROSTFS_ADM_EXEC, - FROSTFS_AUTHMATE_EXEC, - FROSTFS_CLI_EXEC, - NEOGO_EXECUTABLE, -) +from frostfs_testlib.resources.cli import FROSTFS_ADM_EXEC, FROSTFS_AUTHMATE_EXEC, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell @@ -44,36 +39,44 @@ def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]: binary_path_by_name = {} # Maps binary name to executable path for service_config in host.config.services: exec_path = service_config.attributes.get("exec_path") + requires_check = service_config.attributes.get("requires_version_check", "true") if exec_path: - binary_path_by_name[service_config.name] = exec_path + binary_path_by_name[service_config.name] = { + "exec_path": exec_path, + "check": requires_check.lower() == "true", + } for cli_config in host.config.clis: - binary_path_by_name[cli_config.name] = cli_config.exec_path + requires_check = cli_config.attributes.get("requires_version_check", "true") + binary_path_by_name[cli_config.name] = { + "exec_path": cli_config.exec_path, + "check": requires_check.lower() == "true", + } shell = host.get_shell() versions_at_host = {} - for binary_name, binary_path in binary_path_by_name.items(): + for binary_name, binary in binary_path_by_name.items(): try: + binary_path = binary["exec_path"] result = shell.exec(f"{binary_path} --version") - versions_at_host[binary_name] = _parse_version(result.stdout) + versions_at_host[binary_name] = {"version": _parse_version(result.stdout), "check": binary["check"]} except Exception as exc: logger.error(f"Cannot get version for {binary_path} because of\n{exc}") - versions_at_host[binary_name] = "Unknown" + versions_at_host[binary_name] = {"version": "Unknown", "check": binary["check"]} versions_by_host[host.config.address] = versions_at_host # Consolidate versions across all hosts versions = {} for host, binary_versions in versions_by_host.items(): - for name, version in binary_versions.items(): - captured_version = versions.get(name) + for name, binary in binary_versions.items(): + captured_version = versions.get(name, {}).get("version") + version = binary["version"] if captured_version: - assert ( - captured_version == version - ), f"Binary {name} has inconsistent version on host {host}" + assert captured_version == version, f"Binary {name} has inconsistent version on host {host}" else: - versions[name] = version + versions[name] = {"version": version, "check": binary["check"]} return versions def _parse_version(version_output: str) -> str: version = re.search(r"version[:\s]*v?(.+)", version_output, re.IGNORECASE) - return version.group(1).strip() if version else "Unknown" + return version.group(1).strip() if version else version_output From 47414eb86630224f1cc9a19178307e921fad45c3 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Fri, 24 Nov 2023 16:32:26 +0300 Subject: [PATCH 176/363] Support of AWS profiles --- src/frostfs_testlib/s3/aws_cli_client.py | 103 ++++++++++++----------- src/frostfs_testlib/s3/boto3_client.py | 4 +- src/frostfs_testlib/s3/interfaces.py | 2 +- 3 files changed, 55 insertions(+), 54 deletions(-) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 59ee740..059e949 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -29,14 +29,15 @@ class AwsCliClient(S3ClientWrapper): s3gate_endpoint: str @reporter.step_deco("Configure S3 client (aws cli)") - def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None: + def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str='default') -> None: self.s3gate_endpoint = s3gate_endpoint + self.profile = profile self.local_shell = LocalShell() try: - _configure_aws_cli("aws configure", access_key_id, secret_access_key) - self.local_shell.exec(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS}") + _configure_aws_cli(f"aws configure --profile {profile}", access_key_id, secret_access_key) + self.local_shell.exec(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS} --profile {profile}") self.local_shell.exec( - f"aws configure set retry_mode {RETRY_MODE}", + f"aws configure set retry_mode {RETRY_MODE} --profile {profile}", ) except Exception as err: raise RuntimeError("Error while configuring AwsCliClient") from err @@ -67,7 +68,7 @@ class AwsCliClient(S3ClientWrapper): object_lock = " --no-object-lock-enabled-for-bucket" cmd = ( f"aws {self.common_flags} s3api create-bucket --bucket {bucket} " - f"{object_lock} --endpoint {self.s3gate_endpoint}" + f"{object_lock} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) if acl: cmd += f" --acl {acl}" @@ -86,20 +87,20 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("List buckets S3") def list_buckets(self) -> list[str]: - cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {self.s3gate_endpoint}" + cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {self.s3gate_endpoint} --profile {self.profile}" output = self.local_shell.exec(cmd).stdout buckets_json = self._to_json(output) return [bucket["Name"] for bucket in buckets_json["Buckets"]] @reporter.step_deco("Delete bucket S3") def delete_bucket(self, bucket: str) -> None: - cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint}" + cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" self.local_shell.exec(cmd, command_options) sleep(S3_SYNC_WAIT_TIME) @reporter.step_deco("Head bucket S3") def head_bucket(self, bucket: str) -> None: - cmd = f"aws {self.common_flags} s3api head-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint}" + cmd = f"aws {self.common_flags} s3api head-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" self.local_shell.exec(cmd) @reporter.step_deco("Put bucket versioning status") @@ -107,7 +108,7 @@ class AwsCliClient(S3ClientWrapper): cmd = ( f"aws {self.common_flags} s3api put-bucket-versioning --bucket {bucket} " f"--versioning-configuration Status={status.value} " - f"--endpoint {self.s3gate_endpoint}" + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) @@ -115,7 +116,7 @@ class AwsCliClient(S3ClientWrapper): def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: cmd = ( f"aws {self.common_flags} s3api get-bucket-versioning --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint}" + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -126,14 +127,14 @@ class AwsCliClient(S3ClientWrapper): tags_json = {"TagSet": [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]} cmd = ( f"aws {self.common_flags} s3api put-bucket-tagging --bucket {bucket} " - f"--tagging '{json.dumps(tags_json)}' --endpoint {self.s3gate_endpoint}" + f"--tagging '{json.dumps(tags_json)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) @reporter.step_deco("Get bucket tagging") def get_bucket_tagging(self, bucket: str) -> list: cmd = ( - f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" + f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -141,7 +142,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("Get bucket acl") def get_bucket_acl(self, bucket: str) -> list: - cmd = f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" + cmd = f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Grants") @@ -149,7 +150,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("Get bucket location") def get_bucket_location(self, bucket: str) -> dict: cmd = ( - f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" + f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -157,7 +158,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("List objects S3") def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" + cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -168,7 +169,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("List objects S3 v2") def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - cmd = f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" + cmd = f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -181,7 +182,7 @@ class AwsCliClient(S3ClientWrapper): def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: cmd = ( f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint}" + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -191,7 +192,7 @@ class AwsCliClient(S3ClientWrapper): def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: cmd = ( f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint}" + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -218,7 +219,7 @@ class AwsCliClient(S3ClientWrapper): cmd = ( f"aws {self.common_flags} s3api copy-object --copy-source {copy_source} " - f"--bucket {bucket} --key {key} --endpoint {self.s3gate_endpoint}" + f"--bucket {bucket} --key {key} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) if acl: cmd += f" --acl {acl}" @@ -255,7 +256,7 @@ class AwsCliClient(S3ClientWrapper): cmd = ( f"aws {self.common_flags} s3api put-object --bucket {bucket} --key {key} " - f"--body {filepath} --endpoint {self.s3gate_endpoint}" + f"--body {filepath} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) if metadata: cmd += " --metadata" @@ -284,7 +285,7 @@ class AwsCliClient(S3ClientWrapper): version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api head-object --bucket {bucket} --key {key} " - f"{version} --endpoint {self.s3gate_endpoint}" + f"{version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -303,7 +304,7 @@ class AwsCliClient(S3ClientWrapper): version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api get-object --bucket {bucket} --key {key} " - f"{version} {file_path} --endpoint {self.s3gate_endpoint}" + f"{version} {file_path} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) if object_range: cmd += f" --range bytes={object_range[0]}-{object_range[1]}" @@ -316,7 +317,7 @@ class AwsCliClient(S3ClientWrapper): version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api get-object-acl --bucket {bucket} --key {key} " - f"{version} --endpoint {self.s3gate_endpoint}" + f"{version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -333,7 +334,7 @@ class AwsCliClient(S3ClientWrapper): ) -> list: cmd = ( f"aws {self.common_flags} s3api put-object-acl --bucket {bucket} --key {key} " - f" --endpoint {self.s3gate_endpoint}" + f" --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) if acl: cmd += f" --acl {acl}" @@ -353,7 +354,7 @@ class AwsCliClient(S3ClientWrapper): grant_write: Optional[str] = None, grant_read: Optional[str] = None, ) -> None: - cmd = f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} " f" --endpoint {self.s3gate_endpoint}" + cmd = f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} " f" --endpoint {self.s3gate_endpoint} --profile {self.profile}" if acl: cmd += f" --acl {acl}" if grant_write: @@ -372,7 +373,7 @@ class AwsCliClient(S3ClientWrapper): cmd = ( f"aws {self.common_flags} s3api delete-objects --bucket {bucket} " - f"--delete file://{file_path} --endpoint {self.s3gate_endpoint}" + f"--delete file://{file_path} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd, command_options).stdout response = self._to_json(output) @@ -384,7 +385,7 @@ class AwsCliClient(S3ClientWrapper): version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api delete-object --bucket {bucket} " - f"--key {key} {version} --endpoint {self.s3gate_endpoint}" + f"--key {key} {version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd, command_options).stdout sleep(S3_SYNC_WAIT_TIME) @@ -411,7 +412,7 @@ class AwsCliClient(S3ClientWrapper): cmd = ( f"aws {self.common_flags} s3api delete-objects --bucket {bucket} " - f"--delete file://{file_path} --endpoint {self.s3gate_endpoint}" + f"--delete file://{file_path} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd, command_options).stdout sleep(S3_SYNC_WAIT_TIME) @@ -442,7 +443,7 @@ class AwsCliClient(S3ClientWrapper): cmd = ( f"aws {self.common_flags} s3api get-object-attributes --bucket {bucket} " f"--key {key} {version} {parts} {part_number_str} --object-attributes {attrs} " - f"--endpoint {self.s3gate_endpoint}" + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -457,7 +458,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step_deco("Get bucket policy") def get_bucket_policy(self, bucket: str) -> dict: - cmd = f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" + cmd = f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Policy") @@ -473,13 +474,13 @@ class AwsCliClient(S3ClientWrapper): dumped_policy = json.dumps(json.dumps(policy)) cmd = ( f"aws {self.common_flags} s3api put-bucket-policy --bucket {bucket} " - f"--policy {dumped_policy} --endpoint {self.s3gate_endpoint}" + f"--policy {dumped_policy} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) @reporter.step_deco("Get bucket cors") def get_bucket_cors(self, bucket: str) -> dict: - cmd = f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" + cmd = f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("CORSRules") @@ -488,14 +489,14 @@ class AwsCliClient(S3ClientWrapper): def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: cmd = ( f"aws {self.common_flags} s3api put-bucket-cors --bucket {bucket} " - f"--cors-configuration '{json.dumps(cors_configuration)}' --endpoint {self.s3gate_endpoint}" + f"--cors-configuration '{json.dumps(cors_configuration)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) @reporter.step_deco("Delete bucket cors") def delete_bucket_cors(self, bucket: str) -> None: cmd = ( - f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint}" + f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) @@ -503,7 +504,7 @@ class AwsCliClient(S3ClientWrapper): def delete_bucket_tagging(self, bucket: str) -> None: cmd = ( f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint}" + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) @@ -519,7 +520,7 @@ class AwsCliClient(S3ClientWrapper): version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api put-object-retention --bucket {bucket} --key {key} " - f"{version} --retention '{json.dumps(retention, indent=4, sort_keys=True, default=str)}' --endpoint {self.s3gate_endpoint}" + f"{version} --retention '{json.dumps(retention, indent=4, sort_keys=True, default=str)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) if bypass_governance_retention is not None: cmd += " --bypass-governance-retention" @@ -537,7 +538,7 @@ class AwsCliClient(S3ClientWrapper): legal_hold = json.dumps({"Status": legal_hold_status}) cmd = ( f"aws {self.common_flags} s3api put-object-legal-hold --bucket {bucket} --key {key} " - f"{version} --legal-hold '{legal_hold}' --endpoint {self.s3gate_endpoint}" + f"{version} --legal-hold '{legal_hold}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) @@ -547,7 +548,7 @@ class AwsCliClient(S3ClientWrapper): tagging = {"TagSet": tags} cmd = ( f"aws {self.common_flags} s3api put-object-tagging --bucket {bucket} --key {key} " - f"--tagging '{json.dumps(tagging)}' --endpoint {self.s3gate_endpoint}" + f"--tagging '{json.dumps(tagging)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) @@ -556,7 +557,7 @@ class AwsCliClient(S3ClientWrapper): version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api get-object-tagging --bucket {bucket} --key {key} " - f"{version} --endpoint {self.s3gate_endpoint}" + f"{version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -566,7 +567,7 @@ class AwsCliClient(S3ClientWrapper): def delete_object_tagging(self, bucket: str, key: str) -> None: cmd = ( f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} " - f"--key {key} --endpoint {self.s3gate_endpoint}" + f"--key {key} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) @@ -578,7 +579,7 @@ class AwsCliClient(S3ClientWrapper): acl: Optional[str] = None, metadata: Optional[dict] = None, ) -> dict: - cmd = f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " f"--endpoint-url {self.s3gate_endpoint}" + cmd = f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" if metadata: cmd += " --metadata" for key, value in metadata.items(): @@ -598,7 +599,7 @@ class AwsCliClient(S3ClientWrapper): ) -> dict: cmd = ( f"aws {self.common_flags} s3 cp {dir_path} s3://{bucket} " - f"--endpoint-url {self.s3gate_endpoint} --recursive" + f"--endpoint-url {self.s3gate_endpoint} --recursive --profile {self.profile}" ) if metadata: cmd += " --metadata" @@ -613,7 +614,7 @@ class AwsCliClient(S3ClientWrapper): def create_multipart_upload(self, bucket: str, key: str) -> str: cmd = ( f"aws {self.common_flags} s3api create-multipart-upload --bucket {bucket} " - f"--key {key} --endpoint-url {self.s3gate_endpoint}" + f"--key {key} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -626,7 +627,7 @@ class AwsCliClient(S3ClientWrapper): def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: cmd = ( f"aws {self.common_flags} s3api list-multipart-uploads --bucket {bucket} " - f"--endpoint-url {self.s3gate_endpoint}" + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -636,7 +637,7 @@ class AwsCliClient(S3ClientWrapper): def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: cmd = ( f"aws {self.common_flags} s3api abort-multipart-upload --bucket {bucket} " - f"--key {key} --upload-id {upload_id} --endpoint-url {self.s3gate_endpoint}" + f"--key {key} --upload-id {upload_id} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) @@ -645,7 +646,7 @@ class AwsCliClient(S3ClientWrapper): cmd = ( f"aws {self.common_flags} s3api upload-part --bucket {bucket} --key {key} " f"--upload-id {upload_id} --part-number {part_num} --body {filepath} " - f"--endpoint-url {self.s3gate_endpoint}" + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd, command_options).stdout response = self._to_json(output) @@ -657,7 +658,7 @@ class AwsCliClient(S3ClientWrapper): cmd = ( f"aws {self.common_flags} s3api upload-part-copy --bucket {bucket} --key {key} " f"--upload-id {upload_id} --part-number {part_num} --copy-source {copy_source} " - f"--endpoint-url {self.s3gate_endpoint}" + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd, command_options).stdout response = self._to_json(output) @@ -669,7 +670,7 @@ class AwsCliClient(S3ClientWrapper): def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: cmd = ( f"aws {self.common_flags} s3api list-parts --bucket {bucket} --key {key} " - f"--upload-id {upload_id} --endpoint-url {self.s3gate_endpoint}" + f"--upload-id {upload_id} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -691,7 +692,7 @@ class AwsCliClient(S3ClientWrapper): cmd = ( f"aws {self.common_flags} s3api complete-multipart-upload --bucket {bucket} " f"--key {key} --upload-id {upload_id} --multipart-upload file://{file_path} " - f"--endpoint-url {self.s3gate_endpoint}" + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) @@ -699,7 +700,7 @@ class AwsCliClient(S3ClientWrapper): def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: cmd = ( f"aws {self.common_flags} s3api put-object-lock-configuration --bucket {bucket} " - f"--object-lock-configuration '{json.dumps(configuration)}' --endpoint-url {self.s3gate_endpoint}" + f"--object-lock-configuration '{json.dumps(configuration)}' --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout return self._to_json(output) @@ -708,7 +709,7 @@ class AwsCliClient(S3ClientWrapper): def get_object_lock_configuration(self, bucket: str): cmd = ( f"aws {self.common_flags} s3api get-object-lock-configuration --bucket {bucket} " - f"--endpoint-url {self.s3gate_endpoint}" + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index 2251efe..ba3716a 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -48,9 +48,9 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step_deco("Configure S3 client (boto3)") @report_error - def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None: + def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str='default') -> None: self.boto3_client: S3Client = None - self.session = boto3.Session() + self.session = boto3.Session(profile_name=profile) self.config = Config( retries={ "max_attempts": MAX_REQUEST_ATTEMPTS, diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index 2b6be7d..dd21823 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -33,7 +33,7 @@ ACL_COPY = [ class S3ClientWrapper(HumanReadableABC): @abstractmethod - def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str) -> None: + def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str) -> None: pass @abstractmethod From 39a17f36346d8bb672bb97676aef51db3c073154 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 28 Nov 2023 12:28:44 +0300 Subject: [PATCH 177/363] [#132] Add steps logger and refactor reporter usage Signed-off-by: Andrey Berezin --- src/frostfs_testlib/reporter/__init__.py | 5 ++ .../reporter/allure_handler.py | 4 +- src/frostfs_testlib/reporter/interfaces.py | 4 +- src/frostfs_testlib/reporter/reporter.py | 18 ++++-- src/frostfs_testlib/reporter/steps_logger.py | 56 ++++++++++++++++++ src/frostfs_testlib/testing/parallel.py | 2 +- src/frostfs_testlib/testing/test_control.py | 28 +++++++-- src/frostfs_testlib/utils/__init__.py | 4 ++ src/frostfs_testlib/utils/func_utils.py | 58 +++++++++++++++++++ 9 files changed, 163 insertions(+), 16 deletions(-) create mode 100644 src/frostfs_testlib/reporter/steps_logger.py create mode 100644 src/frostfs_testlib/utils/func_utils.py diff --git a/src/frostfs_testlib/reporter/__init__.py b/src/frostfs_testlib/reporter/__init__.py index 10e4146..e2c113c 100644 --- a/src/frostfs_testlib/reporter/__init__.py +++ b/src/frostfs_testlib/reporter/__init__.py @@ -1,6 +1,7 @@ from frostfs_testlib.reporter.allure_handler import AllureHandler from frostfs_testlib.reporter.interfaces import ReporterHandler from frostfs_testlib.reporter.reporter import Reporter +from frostfs_testlib.reporter.steps_logger import StepsLogger __reporter = Reporter() @@ -15,3 +16,7 @@ def get_reporter() -> Reporter: Singleton reporter instance. """ return __reporter + + +def step(title: str): + return __reporter.step(title) diff --git a/src/frostfs_testlib/reporter/allure_handler.py b/src/frostfs_testlib/reporter/allure_handler.py index fef815d..9089f98 100644 --- a/src/frostfs_testlib/reporter/allure_handler.py +++ b/src/frostfs_testlib/reporter/allure_handler.py @@ -1,5 +1,5 @@ import os -from contextlib import AbstractContextManager +from contextlib import AbstractContextManager, ContextDecorator from textwrap import shorten from typing import Any, Callable @@ -12,7 +12,7 @@ from frostfs_testlib.reporter.interfaces import ReporterHandler class AllureHandler(ReporterHandler): """Handler that stores test artifacts in Allure report.""" - def step(self, name: str) -> AbstractContextManager: + def step(self, name: str) -> AbstractContextManager | ContextDecorator: name = shorten(name, width=140, placeholder="...") return allure.step(name) diff --git a/src/frostfs_testlib/reporter/interfaces.py b/src/frostfs_testlib/reporter/interfaces.py index b47a3fb..4e24feb 100644 --- a/src/frostfs_testlib/reporter/interfaces.py +++ b/src/frostfs_testlib/reporter/interfaces.py @@ -1,5 +1,5 @@ from abc import ABC, abstractmethod -from contextlib import AbstractContextManager +from contextlib import AbstractContextManager, ContextDecorator from typing import Any, Callable @@ -7,7 +7,7 @@ class ReporterHandler(ABC): """Interface of handler that stores test artifacts in some reporting tool.""" @abstractmethod - def step(self, name: str) -> AbstractContextManager: + def step(self, name: str) -> AbstractContextManager | ContextDecorator: """Register a new step in test execution. Args: diff --git a/src/frostfs_testlib/reporter/reporter.py b/src/frostfs_testlib/reporter/reporter.py index d1c75f5..2d1a43e 100644 --- a/src/frostfs_testlib/reporter/reporter.py +++ b/src/frostfs_testlib/reporter/reporter.py @@ -5,6 +5,7 @@ from typing import Any, Callable, Optional from frostfs_testlib.plugins import load_plugin from frostfs_testlib.reporter.interfaces import ReporterHandler +from frostfs_testlib.utils.func_utils import format_by_args @contextmanager @@ -63,7 +64,8 @@ class Reporter: def wrapper(*a, **kw): resulting_func = func for handler in self.handlers: - decorator = handler.step_decorator(name) + parsed_name = format_by_args(func, name, *a, **kw) + decorator = handler.step_decorator(parsed_name) resulting_func = decorator(resulting_func) return resulting_func(*a, **kw) @@ -81,11 +83,11 @@ class Reporter: Returns: Step context. """ - if not self.handlers: - return _empty_step() - step_contexts = [handler.step(name) for handler in self.handlers] - return AggregateContextManager(step_contexts) + if not step_contexts: + step_contexts = [_empty_step()] + decorated_wrapper = self.step_deco(name) + return AggregateContextManager(step_contexts, decorated_wrapper) def attach(self, content: Any, file_name: str) -> None: """Attach specified content with given file name to the test report. @@ -104,9 +106,10 @@ class AggregateContextManager(AbstractContextManager): contexts: list[AbstractContextManager] - def __init__(self, contexts: list[AbstractContextManager]) -> None: + def __init__(self, contexts: list[AbstractContextManager], decorated_wrapper: Callable) -> None: super().__init__() self.contexts = contexts + self.wrapper = decorated_wrapper def __enter__(self): for context in self.contexts: @@ -127,3 +130,6 @@ class AggregateContextManager(AbstractContextManager): # If all context agreed to suppress exception, then suppress it; # otherwise return None to reraise return True if all(suppress_decisions) else None + + def __call__(self, *args: Any, **kwds: Any) -> Any: + return self.wrapper(*args, **kwds) diff --git a/src/frostfs_testlib/reporter/steps_logger.py b/src/frostfs_testlib/reporter/steps_logger.py new file mode 100644 index 0000000..4cdfb3d --- /dev/null +++ b/src/frostfs_testlib/reporter/steps_logger.py @@ -0,0 +1,56 @@ +import logging +import threading +from contextlib import AbstractContextManager, ContextDecorator +from functools import wraps +from types import TracebackType +from typing import Any, Callable + +from frostfs_testlib.reporter.interfaces import ReporterHandler + + +class StepsLogger(ReporterHandler): + """Handler that prints steps to log.""" + + def step(self, name: str) -> AbstractContextManager | ContextDecorator: + return StepLoggerContext(name) + + def step_decorator(self, name: str) -> Callable: + return StepLoggerContext(name) + + def attach(self, body: Any, file_name: str) -> None: + pass + + +class StepLoggerContext(AbstractContextManager): + INDENT = {} + + def __init__(self, title: str): + self.title = title + self.logger = logging.getLogger("NeoLogger") + self.thread = threading.get_ident() + if self.thread not in StepLoggerContext.INDENT: + StepLoggerContext.INDENT[self.thread] = 1 + + def __enter__(self) -> Any: + indent = ">" * StepLoggerContext.INDENT[self.thread] + self.logger.info(f"[{self.thread}] {indent} {self.title}") + StepLoggerContext.INDENT[self.thread] += 1 + + def __exit__( + self, + __exc_type: type[BaseException] | None, + __exc_value: BaseException | None, + __traceback: TracebackType | None, + ) -> bool | None: + + StepLoggerContext.INDENT[self.thread] -= 1 + indent = "<" * StepLoggerContext.INDENT[self.thread] + self.logger.info(f"[{self.thread}] {indent} {self.title}") + + def __call__(self, func): + @wraps(func) + def impl(*a, **kw): + with self: + return func(*a, **kw) + + return impl diff --git a/src/frostfs_testlib/testing/parallel.py b/src/frostfs_testlib/testing/parallel.py index ebddd38..1c30cec 100644 --- a/src/frostfs_testlib/testing/parallel.py +++ b/src/frostfs_testlib/testing/parallel.py @@ -42,7 +42,7 @@ def parallel( exceptions = [future.exception() for future in futures if future.exception()] if exceptions: message = "\n".join([str(e) for e in exceptions]) - raise RuntimeError(f"The following exceptions occured during parallel run:\n {message}") + raise RuntimeError(f"The following exceptions occured during parallel run:\n{message}") return futures diff --git a/src/frostfs_testlib/testing/test_control.py b/src/frostfs_testlib/testing/test_control.py index ed74f6a..4fa6390 100644 --- a/src/frostfs_testlib/testing/test_control.py +++ b/src/frostfs_testlib/testing/test_control.py @@ -7,6 +7,9 @@ from typing import Any from _pytest.outcomes import Failed from pytest import fail +from frostfs_testlib import reporter +from frostfs_testlib.utils.func_utils import format_by_args + logger = logging.getLogger("NeoLogger") # TODO: we may consider deprecating some methods here and use tenacity instead @@ -50,7 +53,7 @@ class expect_not_raises: return impl -def retry(max_attempts: int, sleep_interval: int = 1, expected_result: Any = None): +def retry(max_attempts: int, sleep_interval: int = 1, expected_result: Any = None, title: str = None): """ Decorator to wait for some conditions/functions to pass successfully. This is useful if you don't know exact time when something should pass successfully and do not @@ -62,8 +65,7 @@ def retry(max_attempts: int, sleep_interval: int = 1, expected_result: Any = Non assert max_attempts >= 1, "Cannot apply retry decorator with max_attempts < 1" def wrapper(func): - @wraps(func) - def impl(*a, **kw): + def call(func, *a, **kw): last_exception = None for _ in range(max_attempts): try: @@ -84,6 +86,14 @@ def retry(max_attempts: int, sleep_interval: int = 1, expected_result: Any = Non if last_exception is not None: raise last_exception + @wraps(func) + def impl(*a, **kw): + if title is not None: + with reporter.step(format_by_args(func, title, *a, **kw)): + return call(func, *a, **kw) + + return call(func, *a, **kw) + return impl return wrapper @@ -124,6 +134,7 @@ def wait_for_success( expected_result: Any = None, fail_testcase: bool = False, fail_message: str = "", + title: str = None, ): """ Decorator to wait for some conditions/functions to pass successfully. @@ -134,8 +145,7 @@ def wait_for_success( """ def wrapper(func): - @wraps(func) - def impl(*a, **kw): + def call(func, *a, **kw): start = int(round(time())) last_exception = None while start + max_wait_time >= int(round(time())): @@ -160,6 +170,14 @@ def wait_for_success( if last_exception is not None: raise last_exception + @wraps(func) + def impl(*a, **kw): + if title is not None: + with reporter.step(format_by_args(func, title, *a, **kw)): + return call(func, *a, **kw) + + return call(func, *a, **kw) + return impl return wrapper diff --git a/src/frostfs_testlib/utils/__init__.py b/src/frostfs_testlib/utils/__init__.py index fbc4a8f..4acc5b1 100644 --- a/src/frostfs_testlib/utils/__init__.py +++ b/src/frostfs_testlib/utils/__init__.py @@ -1,3 +1,7 @@ +""" +Idea of utils is to have small utilitary functions which are not dependent of anything. +""" + import frostfs_testlib.utils.converting_utils import frostfs_testlib.utils.datetime_utils import frostfs_testlib.utils.json_utils diff --git a/src/frostfs_testlib/utils/func_utils.py b/src/frostfs_testlib/utils/func_utils.py new file mode 100644 index 0000000..0e22d4a --- /dev/null +++ b/src/frostfs_testlib/utils/func_utils.py @@ -0,0 +1,58 @@ +import collections +import inspect +import sys +from typing import Callable + + +def format_by_args(__func: Callable, __title: str, *a, **kw) -> str: + params = _func_parameters(__func, *a, **kw) + args = list(map(lambda x: _represent(x), a)) + + return __title.format(*args, **params) + + +# These 2 functions are copied from allure_commons._allure +# Duplicate it here in order to be independent of allure and make some adjustments. +def _represent(item): + if isinstance(item, str): + return item + elif isinstance(item, (bytes, bytearray)): + return repr(type(item)) + else: + return repr(item) + + +def _func_parameters(func, *args, **kwargs): + parameters = {} + arg_spec = inspect.getfullargspec(func) + arg_order = list(arg_spec.args) + args_dict = dict(zip(arg_spec.args, args)) + + if arg_spec.defaults: + kwargs_defaults_dict = dict(zip(arg_spec.args[-len(arg_spec.defaults) :], arg_spec.defaults)) + parameters.update(kwargs_defaults_dict) + + if arg_spec.varargs: + arg_order.append(arg_spec.varargs) + varargs = args[len(arg_spec.args) :] + parameters.update({arg_spec.varargs: varargs} if varargs else {}) + + if arg_spec.args and arg_spec.args[0] in ["cls", "self"]: + args_dict.pop(arg_spec.args[0], None) + + if kwargs: + if sys.version_info < (3, 7): + # Sort alphabetically as old python versions does + # not preserve call order for kwargs. + arg_order.extend(sorted(list(kwargs.keys()))) + else: + # Keep py3.7 behaviour to preserve kwargs order + arg_order.extend(list(kwargs.keys())) + parameters.update(kwargs) + + parameters.update(args_dict) + + items = parameters.items() + sorted_items = sorted(map(lambda kv: (kv[0], _represent(kv[1])), items), key=lambda x: arg_order.index(x[0])) + + return collections.OrderedDict(sorted_items) From dc6b0e407fd8e65bfab85c9ce52770887de9cc20 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 29 Nov 2023 15:27:17 +0300 Subject: [PATCH 178/363] [#133] Change reporter usage Signed-off-by: Andrey Berezin --- .../healthcheck/basic_healthcheck.py | 30 ++-- src/frostfs_testlib/load/k6.py | 7 +- src/frostfs_testlib/load/load_verifiers.py | 23 +-- src/frostfs_testlib/load/runners.py | 32 ++-- .../processes/remote_process.py | 62 +++---- src/frostfs_testlib/reporter/__init__.py | 6 + .../reporter/allure_handler.py | 7 +- src/frostfs_testlib/s3/aws_cli_client.py | 151 ++++++++++-------- src/frostfs_testlib/s3/boto3_client.py | 145 ++++++++--------- src/frostfs_testlib/shell/local_shell.py | 3 +- src/frostfs_testlib/shell/ssh_shell.py | 3 +- src/frostfs_testlib/steps/acl.py | 19 +-- src/frostfs_testlib/steps/cli/container.py | 35 ++-- src/frostfs_testlib/steps/cli/object.py | 41 +++-- .../steps/complex_object_actions.py | 7 +- src/frostfs_testlib/steps/epoch.py | 17 +- src/frostfs_testlib/steps/http/http_gate.py | 58 +++---- src/frostfs_testlib/steps/network.py | 30 ++-- src/frostfs_testlib/steps/node_management.py | 61 +++---- src/frostfs_testlib/steps/payment_neogo.py | 24 ++- src/frostfs_testlib/steps/s3/s3_helper.py | 86 +++------- src/frostfs_testlib/steps/session_token.py | 35 ++-- src/frostfs_testlib/steps/storage_object.py | 9 +- src/frostfs_testlib/steps/storage_policy.py | 37 ++--- src/frostfs_testlib/steps/tombstone.py | 21 +-- src/frostfs_testlib/storage/cluster.py | 4 +- .../configuration/service_configuration.py | 4 +- .../controllers/background_load_controller.py | 20 ++- .../controllers/cluster_state_controller.py | 94 ++++++----- .../state_managers/config_state_manager.py | 12 +- .../storage/dataclasses/node_base.py | 4 +- .../testing/cluster_test_base.py | 6 +- src/frostfs_testlib/utils/cli_utils.py | 7 +- src/frostfs_testlib/utils/env_utils.py | 7 +- src/frostfs_testlib/utils/failover_utils.py | 32 ++-- src/frostfs_testlib/utils/file_keeper.py | 10 +- src/frostfs_testlib/utils/file_utils.py | 7 +- 37 files changed, 478 insertions(+), 678 deletions(-) diff --git a/src/frostfs_testlib/healthcheck/basic_healthcheck.py b/src/frostfs_testlib/healthcheck/basic_healthcheck.py index 4cb3a48..0443e28 100644 --- a/src/frostfs_testlib/healthcheck/basic_healthcheck.py +++ b/src/frostfs_testlib/healthcheck/basic_healthcheck.py @@ -1,8 +1,8 @@ from typing import Callable +from frostfs_testlib import reporter from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli from frostfs_testlib.healthcheck.interfaces import Healthcheck -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC from frostfs_testlib.shell import CommandOptions from frostfs_testlib.steps.node_management import storage_node_healthcheck @@ -10,8 +10,6 @@ from frostfs_testlib.storage.cluster import ClusterNode, ServiceClass from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils.failover_utils import check_services_status -reporter = get_reporter() - class BasicHealthcheck(Healthcheck): def _perform(self, cluster_node: ClusterNode, checks: dict[Callable, dict]): @@ -23,36 +21,33 @@ class BasicHealthcheck(Healthcheck): assert not issues, "Issues found:\n" + "\n".join(issues) - @wait_for_success(900, 30) + @wait_for_success(900, 30, title="Wait for full healthcheck for {cluster_node}") def full_healthcheck(self, cluster_node: ClusterNode): checks = { self.storage_healthcheck: {}, self._tree_healthcheck: {}, } - with reporter.step(f"Perform full healthcheck for {cluster_node}"): - self._perform(cluster_node, checks) + self._perform(cluster_node, checks) - @wait_for_success(900, 30) + @wait_for_success(900, 30, title="Wait for startup healthcheck on {cluster_node}") def startup_healthcheck(self, cluster_node: ClusterNode): checks = { self.storage_healthcheck: {}, self._tree_healthcheck: {}, } - with reporter.step(f"Perform startup healthcheck on {cluster_node}"): - self._perform(cluster_node, checks) + self._perform(cluster_node, checks) - @wait_for_success(900, 30) + @wait_for_success(900, 30, title="Wait for storage healthcheck on {cluster_node}") def storage_healthcheck(self, cluster_node: ClusterNode) -> str | None: checks = { self._storage_healthcheck: {}, } - with reporter.step(f"Perform storage healthcheck on {cluster_node}"): - self._perform(cluster_node, checks) + self._perform(cluster_node, checks) - @wait_for_success(120, 5) + @wait_for_success(120, 5, title="Wait for service healthcheck on {cluster_node}") def services_healthcheck(self, cluster_node: ClusterNode): svcs_to_check = cluster_node.services checks = { @@ -63,8 +58,7 @@ class BasicHealthcheck(Healthcheck): self._check_services: {"services": svcs_to_check}, } - with reporter.step(f"Perform service healthcheck on {cluster_node}"): - self._perform(cluster_node, checks) + self._perform(cluster_node, checks) def _check_services(self, cluster_node: ClusterNode, services: list[ServiceClass]): for svc in services: @@ -72,14 +66,14 @@ class BasicHealthcheck(Healthcheck): if result == False: return f"Service {svc.get_service_systemctl_name()} healthcheck failed on node {cluster_node}." - @reporter.step_deco("Storage healthcheck on {cluster_node}") + @reporter.step("Storage healthcheck on {cluster_node}") def _storage_healthcheck(self, cluster_node: ClusterNode) -> str | None: result = storage_node_healthcheck(cluster_node.storage_node) self._gather_socket_info(cluster_node) if result.health_status != "READY" or result.network_status != "ONLINE": return f"Node {cluster_node} is not healthy. Health={result.health_status}. Network={result.network_status}" - @reporter.step_deco("Tree healthcheck on {cluster_node}") + @reporter.step("Tree healthcheck on {cluster_node}") def _tree_healthcheck(self, cluster_node: ClusterNode) -> str | None: host = cluster_node.host service_config = host.get_service_config(cluster_node.storage_node.name) @@ -102,6 +96,6 @@ class BasicHealthcheck(Healthcheck): f"Error during tree healthcheck (rc={result.return_code}): {result.stdout}. \n Stderr: {result.stderr}" ) - @reporter.step_deco("Gather socket info for {cluster_node}") + @reporter.step("Gather socket info for {cluster_node}") def _gather_socket_info(self, cluster_node: ClusterNode): cluster_node.host.get_shell().exec("ss -tuln | grep 8080", CommandOptions(check=False)) diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index 3dedd53..92da8e0 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -8,10 +8,10 @@ from time import sleep from typing import Any from urllib.parse import urlparse +from frostfs_testlib import reporter from frostfs_testlib.load.interfaces.loader import Loader from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario, LoadType from frostfs_testlib.processes.remote_process import RemoteProcess -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.common import STORAGE_USER_NAME from frostfs_testlib.resources.load_params import K6_STOP_SIGNAL_TIMEOUT, K6_TEARDOWN_PERIOD from frostfs_testlib.shell import Shell @@ -21,7 +21,6 @@ from frostfs_testlib.testing.test_control import wait_for_success EXIT_RESULT_CODE = 0 logger = logging.getLogger("NeoLogger") -reporter = get_reporter() @dataclass @@ -102,7 +101,7 @@ class K6: self.preset_output = result.stdout.strip("\n") return self.preset_output - @reporter.step_deco("Generate K6 command") + @reporter.step("Generate K6 command") def _generate_env_variables(self) -> str: env_vars = self.load_params.get_env_vars() @@ -216,7 +215,7 @@ class K6: return self._k6_process.running() return False - @reporter.step_deco("Wait until K6 process end") + @reporter.step("Wait until K6 process end") @wait_for_success(K6_STOP_SIGNAL_TIMEOUT, 15, False, False, "Can not stop K6 process within timeout") def _wait_until_process_end(self): return self._k6_process.running() diff --git a/src/frostfs_testlib/load/load_verifiers.py b/src/frostfs_testlib/load/load_verifiers.py index b691b02..fe39862 100644 --- a/src/frostfs_testlib/load/load_verifiers.py +++ b/src/frostfs_testlib/load/load_verifiers.py @@ -1,11 +1,6 @@ -import logging - +from frostfs_testlib import reporter from frostfs_testlib.load.load_config import LoadParams, LoadScenario from frostfs_testlib.load.load_metrics import get_metrics_object -from frostfs_testlib.reporter import get_reporter - -reporter = get_reporter() -logger = logging.getLogger("NeoLogger") class LoadVerifier: @@ -49,19 +44,11 @@ class LoadVerifier: if deleters and not delete_operations: issues.append(f"No any delete operation was performed") - if ( - write_operations - and writers - and write_errors / write_operations * 100 > self.load_params.error_threshold - ): + if write_operations and writers and write_errors / write_operations * 100 > self.load_params.error_threshold: issues.append( f"Write error rate is greater than threshold: {write_errors / write_operations * 100} > {self.load_params.error_threshold}" ) - if ( - read_operations - and readers - and read_errors / read_operations * 100 > self.load_params.error_threshold - ): + if read_operations and readers and read_errors / read_operations * 100 > self.load_params.error_threshold: issues.append( f"Read error rate is greater than threshold: {read_errors / read_operations * 100} > {self.load_params.error_threshold}" ) @@ -89,9 +76,7 @@ class LoadVerifier: ) return verify_issues - def _collect_verify_issues_on_process( - self, label, load_summary, verification_summary - ) -> list[str]: + def _collect_verify_issues_on_process(self, label, load_summary, verification_summary) -> list[str]: issues = [] load_metrics = get_metrics_object(self.load_params.scenario, load_summary) diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index ea5a374..f5284d8 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -9,13 +9,13 @@ from urllib.parse import urlparse import yaml +from frostfs_testlib import reporter from frostfs_testlib.cli.frostfs_authmate.authmate import FrostfsAuthmate from frostfs_testlib.load.interfaces.loader import Loader from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner from frostfs_testlib.load.k6 import K6 from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadType from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources import optionals from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC from frostfs_testlib.resources.common import STORAGE_USER_NAME @@ -31,17 +31,15 @@ from frostfs_testlib.testing.test_control import retry from frostfs_testlib.utils import datetime_utils from frostfs_testlib.utils.file_keeper import FileKeeper -reporter = get_reporter() - class RunnerBase(ScenarioRunner): k6_instances: list[K6] - @reporter.step_deco("Run preset on loaders") + @reporter.step("Run preset on loaders") def preset(self): parallel([k6.preset for k6 in self.k6_instances]) - @reporter.step_deco("Wait until load finish") + @reporter.step("Wait until load finish") def wait_until_finish(self, soft_timeout: int = 0): parallel([k6.wait_until_finished for k6 in self.k6_instances], soft_timeout=soft_timeout) @@ -70,7 +68,7 @@ class DefaultRunner(RunnerBase): self.loaders_wallet = loaders_wallet @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Preparation steps") + @reporter.step("Preparation steps") def prepare( self, load_params: LoadParams, @@ -127,7 +125,7 @@ class DefaultRunner(RunnerBase): ] shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input)) - @reporter.step_deco("Init k6 instances") + @reporter.step("Init k6 instances") def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): self.k6_instances = [] cycled_loaders = itertools.cycle(self.loaders) @@ -271,7 +269,7 @@ class LocalRunner(RunnerBase): self.nodes_under_load = nodes_under_load @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Preparation steps") + @reporter.step("Preparation steps") def prepare( self, load_params: LoadParams, @@ -298,7 +296,7 @@ class LocalRunner(RunnerBase): return True - @reporter.step_deco("Prepare node {cluster_node}") + @reporter.step("Prepare node {cluster_node}") def prepare_node(self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams): shell = cluster_node.host.get_shell() @@ -323,7 +321,7 @@ class LocalRunner(RunnerBase): shell.exec(f'echo "{content}" | sudo tee {self.wallet.config_path}') shell.exec(f"sudo chmod -R 777 {self.wallet.config_path}") - @reporter.step_deco("Init k6 instances") + @reporter.step("Init k6 instances") def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): self.k6_instances = [] futures = parallel( @@ -369,12 +367,12 @@ class LocalRunner(RunnerBase): with reporter.step(f"Wait for start timeout + couple more seconds ({wait_after_start_time}) before moving on"): time.sleep(wait_after_start_time) - @reporter.step_deco("Restore passwd on {cluster_node}") + @reporter.step("Restore passwd on {cluster_node}") def restore_passwd_on_node(self, cluster_node: ClusterNode): shell = cluster_node.host.get_shell() shell.exec("sudo chattr -i /etc/passwd") - @reporter.step_deco("Lock passwd on {cluster_node}") + @reporter.step("Lock passwd on {cluster_node}") def lock_passwd_on_node(self, cluster_node: ClusterNode): shell = cluster_node.host.get_shell() shell.exec("sudo chattr +i /etc/passwd") @@ -400,19 +398,19 @@ class S3LocalRunner(LocalRunner): endpoints: list[str] k6_dir: str - @reporter.step_deco("Run preset on loaders") + @reporter.step("Run preset on loaders") def preset(self): LocalRunner.preset(self) with reporter.step(f"Resolve containers in preset"): parallel(self._resolve_containers_in_preset, self.k6_instances) - @reporter.step_deco("Resolve containers in preset") + @reporter.step("Resolve containers in preset") def _resolve_containers_in_preset(self, k6_instance: K6): k6_instance.shell.exec( f"sudo {self.k6_dir}/scenarios/preset/resolve_containers_in_preset.py --endpoint {k6_instance.endpoints[0]} --preset_file {k6_instance.load_params.preset.pregen_json}" ) - @reporter.step_deco("Init k6 instances") + @reporter.step("Init k6 instances") def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): self.k6_instances = [] futures = parallel( @@ -448,7 +446,7 @@ class S3LocalRunner(LocalRunner): ) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Preparation steps") + @reporter.step("Preparation steps") def prepare( self, load_params: LoadParams, @@ -464,7 +462,7 @@ class S3LocalRunner(LocalRunner): parallel(self.prepare_node, nodes_under_load, k6_dir, load_params, s3_public_keys, grpc_peer) - @reporter.step_deco("Prepare node {cluster_node}") + @reporter.step("Prepare node {cluster_node}") def prepare_node( self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams, s3_public_keys: list[str], grpc_peer: str ): diff --git a/src/frostfs_testlib/processes/remote_process.py b/src/frostfs_testlib/processes/remote_process.py index d92d77a..1252b97 100644 --- a/src/frostfs_testlib/processes/remote_process.py +++ b/src/frostfs_testlib/processes/remote_process.py @@ -8,18 +8,14 @@ from tenacity import retry from tenacity.stop import stop_after_attempt from tenacity.wait import wait_fixed -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.shell import Shell from frostfs_testlib.shell.command_inspectors import SuInspector from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions -reporter = get_reporter() - class RemoteProcess: - def __init__( - self, cmd: str, process_dir: str, shell: Shell, cmd_inspector: Optional[CommandInspector] - ): + def __init__(self, cmd: str, process_dir: str, shell: Shell, cmd_inspector: Optional[CommandInspector]): self.process_dir = process_dir self.cmd = cmd self.stdout_last_line_number = 0 @@ -32,10 +28,8 @@ class RemoteProcess: self.cmd_inspectors: list[CommandInspector] = [cmd_inspector] if cmd_inspector else [] @classmethod - @reporter.step_deco("Create remote process") - def create( - cls, command: str, shell: Shell, working_dir: str = "/tmp", user: Optional[str] = None - ) -> RemoteProcess: + @reporter.step("Create remote process") + def create(cls, command: str, shell: Shell, working_dir: str = "/tmp", user: Optional[str] = None) -> RemoteProcess: """ Create a process on a remote host. @@ -68,7 +62,7 @@ class RemoteProcess: remote_process.pid = remote_process._get_pid() return remote_process - @reporter.step_deco("Get process stdout") + @reporter.step("Get process stdout") def stdout(self, full: bool = False) -> str: """ Method to get process stdout, either fresh info or full. @@ -100,7 +94,7 @@ class RemoteProcess: return resulted_stdout return "" - @reporter.step_deco("Get process stderr") + @reporter.step("Get process stderr") def stderr(self, full: bool = False) -> str: """ Method to get process stderr, either fresh info or full. @@ -131,7 +125,7 @@ class RemoteProcess: return resulted_stderr return "" - @reporter.step_deco("Get process rc") + @reporter.step("Get process rc") def rc(self) -> Optional[int]: if self.proc_rc is not None: return self.proc_rc @@ -148,11 +142,11 @@ class RemoteProcess: self.proc_rc = int(terminal.stdout) return self.proc_rc - @reporter.step_deco("Check if process is running") + @reporter.step("Check if process is running") def running(self) -> bool: return self.rc() is None - @reporter.step_deco("Send signal to process") + @reporter.step("Send signal to process") def send_signal(self, signal: int) -> None: kill_res = self.shell.exec( f"kill -{signal} {self.pid}", @@ -161,27 +155,23 @@ class RemoteProcess: if "No such process" in kill_res.stderr: return if kill_res.return_code: - raise AssertionError( - f"Signal {signal} not sent. Return code of kill: {kill_res.return_code}" - ) + raise AssertionError(f"Signal {signal} not sent. Return code of kill: {kill_res.return_code}") - @reporter.step_deco("Stop process") + @reporter.step("Stop process") def stop(self) -> None: self.send_signal(15) - @reporter.step_deco("Kill process") + @reporter.step("Kill process") def kill(self) -> None: self.send_signal(9) - @reporter.step_deco("Clear process directory") + @reporter.step("Clear process directory") def clear(self) -> None: if self.process_dir == "/": raise AssertionError(f"Invalid path to delete: {self.process_dir}") - self.shell.exec( - f"rm -rf {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors) - ) + self.shell.exec(f"rm -rf {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) - @reporter.step_deco("Start remote process") + @reporter.step("Start remote process") def _start_process(self) -> None: self.shell.exec( f"nohup {self.process_dir}/command.sh None: - self.shell.exec( - f"mkdir -p {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors) - ) - self.shell.exec( - f"chmod 777 {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors) - ) - terminal = self.shell.exec( - f"realpath {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors) - ) + self.shell.exec(f"mkdir -p {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) + self.shell.exec(f"chmod 777 {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) + terminal = self.shell.exec(f"realpath {self.process_dir}", CommandOptions(extra_inspectors=self.cmd_inspectors)) self.process_dir = terminal.stdout.strip() - @reporter.step_deco("Get pid") + @reporter.step("Get pid") @retry(wait=wait_fixed(10), stop=stop_after_attempt(5), reraise=True) def _get_pid(self) -> str: - terminal = self.shell.exec( - f"cat {self.process_dir}/pid", CommandOptions(extra_inspectors=self.cmd_inspectors) - ) + terminal = self.shell.exec(f"cat {self.process_dir}/pid", CommandOptions(extra_inspectors=self.cmd_inspectors)) assert terminal.stdout, f"invalid pid: {terminal.stdout}" return terminal.stdout.strip() - @reporter.step_deco("Generate command script") + @reporter.step("Generate command script") def _generate_command_script(self, command: str) -> None: command = command.replace('"', '\\"').replace("\\", "\\\\") script = ( diff --git a/src/frostfs_testlib/reporter/__init__.py b/src/frostfs_testlib/reporter/__init__.py index e2c113c..848c175 100644 --- a/src/frostfs_testlib/reporter/__init__.py +++ b/src/frostfs_testlib/reporter/__init__.py @@ -1,3 +1,5 @@ +from typing import Any + from frostfs_testlib.reporter.allure_handler import AllureHandler from frostfs_testlib.reporter.interfaces import ReporterHandler from frostfs_testlib.reporter.reporter import Reporter @@ -20,3 +22,7 @@ def get_reporter() -> Reporter: def step(title: str): return __reporter.step(title) + + +def attach(content: Any, file_name: str): + return __reporter.attach(content, file_name) diff --git a/src/frostfs_testlib/reporter/allure_handler.py b/src/frostfs_testlib/reporter/allure_handler.py index 9089f98..ef63638 100644 --- a/src/frostfs_testlib/reporter/allure_handler.py +++ b/src/frostfs_testlib/reporter/allure_handler.py @@ -21,9 +21,14 @@ class AllureHandler(ReporterHandler): def attach(self, body: Any, file_name: str) -> None: attachment_name, extension = os.path.splitext(file_name) + if extension.startswith("."): + extension = extension[1:] attachment_type = self._resolve_attachment_type(extension) - allure.attach(body, attachment_name, attachment_type, extension) + if os.path.exists(body): + allure.attach.file(body, file_name, attachment_type, extension) + else: + allure.attach(body, attachment_name, attachment_type, extension) def _resolve_attachment_type(self, extension: str) -> attachment_type: """Try to find matching Allure attachment type by extension. diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 059e949..e4f2bb2 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -6,7 +6,7 @@ from datetime import datetime from time import sleep from typing import Literal, Optional, Union -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.shell import CommandOptions @@ -15,7 +15,6 @@ from frostfs_testlib.shell.local_shell import LocalShell # TODO: Refactor this code to use shell instead of _cmd_run from frostfs_testlib.utils.cli_utils import _configure_aws_cli -reporter = get_reporter() logger = logging.getLogger("NeoLogger") command_options = CommandOptions(timeout=480) @@ -28,8 +27,10 @@ class AwsCliClient(S3ClientWrapper): common_flags = "--no-verify-ssl --no-paginate" s3gate_endpoint: str - @reporter.step_deco("Configure S3 client (aws cli)") - def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str='default') -> None: + @reporter.step("Configure S3 client (aws cli)") + def __init__( + self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default" + ) -> None: self.s3gate_endpoint = s3gate_endpoint self.profile = profile self.local_shell = LocalShell() @@ -42,11 +43,11 @@ class AwsCliClient(S3ClientWrapper): except Exception as err: raise RuntimeError("Error while configuring AwsCliClient") from err - @reporter.step_deco("Set endpoint S3 to {s3gate_endpoint}") + @reporter.step("Set endpoint S3 to {s3gate_endpoint}") def set_endpoint(self, s3gate_endpoint: str): self.s3gate_endpoint = s3gate_endpoint - @reporter.step_deco("Create bucket S3") + @reporter.step("Create bucket S3") def create_bucket( self, bucket: Optional[str] = None, @@ -85,25 +86,25 @@ class AwsCliClient(S3ClientWrapper): return bucket - @reporter.step_deco("List buckets S3") + @reporter.step("List buckets S3") def list_buckets(self) -> list[str]: cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {self.s3gate_endpoint} --profile {self.profile}" output = self.local_shell.exec(cmd).stdout buckets_json = self._to_json(output) return [bucket["Name"] for bucket in buckets_json["Buckets"]] - @reporter.step_deco("Delete bucket S3") + @reporter.step("Delete bucket S3") def delete_bucket(self, bucket: str) -> None: cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" self.local_shell.exec(cmd, command_options) sleep(S3_SYNC_WAIT_TIME) - @reporter.step_deco("Head bucket S3") + @reporter.step("Head bucket S3") def head_bucket(self, bucket: str) -> None: cmd = f"aws {self.common_flags} s3api head-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" self.local_shell.exec(cmd) - @reporter.step_deco("Put bucket versioning status") + @reporter.step("Put bucket versioning status") def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: cmd = ( f"aws {self.common_flags} s3api put-bucket-versioning --bucket {bucket} " @@ -112,7 +113,7 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Get bucket versioning status") + @reporter.step("Get bucket versioning status") def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: cmd = ( f"aws {self.common_flags} s3api get-bucket-versioning --bucket {bucket} " @@ -122,7 +123,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response.get("Status") - @reporter.step_deco("Put bucket tagging") + @reporter.step("Put bucket tagging") def put_bucket_tagging(self, bucket: str, tags: list) -> None: tags_json = {"TagSet": [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]} cmd = ( @@ -131,34 +132,42 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Get bucket tagging") + @reporter.step("Get bucket tagging") def get_bucket_tagging(self, bucket: str) -> list: cmd = ( - f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("TagSet") - @reporter.step_deco("Get bucket acl") + @reporter.step("Get bucket acl") def get_bucket_acl(self, bucket: str) -> list: - cmd = f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + cmd = ( + f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Grants") - @reporter.step_deco("Get bucket location") + @reporter.step("Get bucket location") def get_bucket_location(self, bucket: str) -> dict: cmd = ( - f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("LocationConstraint") - @reporter.step_deco("List objects S3") + @reporter.step("List objects S3") def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + cmd = ( + f"aws {self.common_flags} s3api list-objects --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -167,9 +176,12 @@ class AwsCliClient(S3ClientWrapper): return response if full_output else obj_list - @reporter.step_deco("List objects S3 v2") + @reporter.step("List objects S3 v2") def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - cmd = f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + cmd = ( + f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -178,7 +190,7 @@ class AwsCliClient(S3ClientWrapper): return response if full_output else obj_list - @reporter.step_deco("List objects versions S3") + @reporter.step("List objects versions S3") def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: cmd = ( f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " @@ -188,7 +200,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response if full_output else response.get("Versions", []) - @reporter.step_deco("List objects delete markers S3") + @reporter.step("List objects delete markers S3") def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: cmd = ( f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " @@ -198,7 +210,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response if full_output else response.get("DeleteMarkers", []) - @reporter.step_deco("Copy object S3") + @reporter.step("Copy object S3") def copy_object( self, source_bucket: str, @@ -236,7 +248,7 @@ class AwsCliClient(S3ClientWrapper): self.local_shell.exec(cmd, command_options) return key - @reporter.step_deco("Put object S3") + @reporter.step("Put object S3") def put_object( self, bucket: str, @@ -280,7 +292,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response.get("VersionId") - @reporter.step_deco("Head object S3") + @reporter.step("Head object S3") def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: version = f" --version-id {version_id}" if version_id else "" cmd = ( @@ -291,7 +303,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response - @reporter.step_deco("Get object S3") + @reporter.step("Get object S3") def get_object( self, bucket: str, @@ -312,7 +324,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response if full_output else file_path - @reporter.step_deco("Get object ACL") + @reporter.step("Get object ACL") def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: version = f" --version-id {version_id}" if version_id else "" cmd = ( @@ -323,7 +335,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response.get("Grants") - @reporter.step_deco("Put object ACL") + @reporter.step("Put object ACL") def put_object_acl( self, bucket: str, @@ -346,7 +358,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response.get("Grants") - @reporter.step_deco("Put bucket ACL") + @reporter.step("Put bucket ACL") def put_bucket_acl( self, bucket: str, @@ -354,7 +366,10 @@ class AwsCliClient(S3ClientWrapper): grant_write: Optional[str] = None, grant_read: Optional[str] = None, ) -> None: - cmd = f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} " f" --endpoint {self.s3gate_endpoint} --profile {self.profile}" + cmd = ( + f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} " + f" --endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) if acl: cmd += f" --acl {acl}" if grant_write: @@ -363,7 +378,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --grant-read {grant_read}" self.local_shell.exec(cmd) - @reporter.step_deco("Delete objects S3") + @reporter.step("Delete objects S3") def delete_objects(self, bucket: str, keys: list[str]) -> dict: file_path = os.path.join(os.getcwd(), ASSETS_DIR, "delete.json") delete_structure = json.dumps(_make_objs_dict(keys)) @@ -380,7 +395,7 @@ class AwsCliClient(S3ClientWrapper): sleep(S3_SYNC_WAIT_TIME) return response - @reporter.step_deco("Delete object S3") + @reporter.step("Delete object S3") def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: version = f" --version-id {version_id}" if version_id else "" cmd = ( @@ -391,7 +406,7 @@ class AwsCliClient(S3ClientWrapper): sleep(S3_SYNC_WAIT_TIME) return self._to_json(output) - @reporter.step_deco("Delete object versions S3") + @reporter.step("Delete object versions S3") def delete_object_versions(self, bucket: str, object_versions: list) -> dict: # Build deletion list in S3 format delete_list = { @@ -418,13 +433,13 @@ class AwsCliClient(S3ClientWrapper): sleep(S3_SYNC_WAIT_TIME) return self._to_json(output) - @reporter.step_deco("Delete object versions S3 without delete markers") + @reporter.step("Delete object versions S3 without delete markers") def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: # Delete objects without creating delete markers for object_version in object_versions: self.delete_object(bucket=bucket, key=object_version["Key"], version_id=object_version["VersionId"]) - @reporter.step_deco("Get object attributes") + @reporter.step("Get object attributes") def get_object_attributes( self, bucket: str, @@ -456,14 +471,17 @@ class AwsCliClient(S3ClientWrapper): else: return response.get(attributes[0]) - @reporter.step_deco("Get bucket policy") + @reporter.step("Get bucket policy") def get_bucket_policy(self, bucket: str) -> dict: - cmd = f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + cmd = ( + f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("Policy") - @reporter.step_deco("Put bucket policy") + @reporter.step("Put bucket policy") def put_bucket_policy(self, bucket: str, policy: dict) -> None: # Leaving it as is was in test repo. Double dumps to escape resulting string # Example: @@ -478,14 +496,17 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Get bucket cors") + @reporter.step("Get bucket cors") def get_bucket_cors(self, bucket: str) -> dict: - cmd = f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + cmd = ( + f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) return response.get("CORSRules") - @reporter.step_deco("Put bucket cors") + @reporter.step("Put bucket cors") def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: cmd = ( f"aws {self.common_flags} s3api put-bucket-cors --bucket {bucket} " @@ -493,14 +514,15 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Delete bucket cors") + @reporter.step("Delete bucket cors") def delete_bucket_cors(self, bucket: str) -> None: cmd = ( - f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) - @reporter.step_deco("Delete bucket tagging") + @reporter.step("Delete bucket tagging") def delete_bucket_tagging(self, bucket: str) -> None: cmd = ( f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {bucket} " @@ -508,7 +530,7 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Put object retention") + @reporter.step("Put object retention") def put_object_retention( self, bucket: str, @@ -526,7 +548,7 @@ class AwsCliClient(S3ClientWrapper): cmd += " --bypass-governance-retention" self.local_shell.exec(cmd) - @reporter.step_deco("Put object legal hold") + @reporter.step("Put object legal hold") def put_object_legal_hold( self, bucket: str, @@ -542,7 +564,7 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Put object tagging") + @reporter.step("Put object tagging") def put_object_tagging(self, bucket: str, key: str, tags: list) -> None: tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tagging = {"TagSet": tags} @@ -552,7 +574,7 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Get object tagging") + @reporter.step("Get object tagging") def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: version = f" --version-id {version_id}" if version_id else "" cmd = ( @@ -563,7 +585,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response.get("TagSet") - @reporter.step_deco("Delete object tagging") + @reporter.step("Delete object tagging") def delete_object_tagging(self, bucket: str, key: str) -> None: cmd = ( f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} " @@ -571,7 +593,7 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Sync directory S3") + @reporter.step("Sync directory S3") def sync( self, bucket: str, @@ -579,7 +601,10 @@ class AwsCliClient(S3ClientWrapper): acl: Optional[str] = None, metadata: Optional[dict] = None, ) -> dict: - cmd = f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + cmd = ( + f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) if metadata: cmd += " --metadata" for key, value in metadata.items(): @@ -589,7 +614,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd, command_options).stdout return self._to_json(output) - @reporter.step_deco("CP directory S3") + @reporter.step("CP directory S3") def cp( self, bucket: str, @@ -610,7 +635,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd, command_options).stdout return self._to_json(output) - @reporter.step_deco("Create multipart upload S3") + @reporter.step("Create multipart upload S3") def create_multipart_upload(self, bucket: str, key: str) -> str: cmd = ( f"aws {self.common_flags} s3api create-multipart-upload --bucket {bucket} " @@ -623,7 +648,7 @@ class AwsCliClient(S3ClientWrapper): return response["UploadId"] - @reporter.step_deco("List multipart uploads S3") + @reporter.step("List multipart uploads S3") def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: cmd = ( f"aws {self.common_flags} s3api list-multipart-uploads --bucket {bucket} " @@ -633,7 +658,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response.get("Uploads") - @reporter.step_deco("Abort multipart upload S3") + @reporter.step("Abort multipart upload S3") def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: cmd = ( f"aws {self.common_flags} s3api abort-multipart-upload --bucket {bucket} " @@ -641,7 +666,7 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Upload part S3") + @reporter.step("Upload part S3") def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: cmd = ( f"aws {self.common_flags} s3api upload-part --bucket {bucket} --key {key} " @@ -653,7 +678,7 @@ class AwsCliClient(S3ClientWrapper): assert response.get("ETag"), f"Expected ETag in response:\n{response}" return response["ETag"] - @reporter.step_deco("Upload copy part S3") + @reporter.step("Upload copy part S3") def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: cmd = ( f"aws {self.common_flags} s3api upload-part-copy --bucket {bucket} --key {key} " @@ -666,7 +691,7 @@ class AwsCliClient(S3ClientWrapper): return response["CopyPartResult"]["ETag"] - @reporter.step_deco("List parts S3") + @reporter.step("List parts S3") def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: cmd = ( f"aws {self.common_flags} s3api list-parts --bucket {bucket} --key {key} " @@ -679,7 +704,7 @@ class AwsCliClient(S3ClientWrapper): return response["Parts"] - @reporter.step_deco("Complete multipart upload S3") + @reporter.step("Complete multipart upload S3") def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: file_path = os.path.join(os.getcwd(), ASSETS_DIR, "parts.json") parts_dict = {"Parts": [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]} @@ -696,7 +721,7 @@ class AwsCliClient(S3ClientWrapper): ) self.local_shell.exec(cmd) - @reporter.step_deco("Put object lock configuration") + @reporter.step("Put object lock configuration") def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: cmd = ( f"aws {self.common_flags} s3api put-object-lock-configuration --bucket {bucket} " @@ -705,7 +730,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout return self._to_json(output) - @reporter.step_deco("Get object lock configuration") + @reporter.step("Get object lock configuration") def get_object_lock_configuration(self, bucket: str): cmd = ( f"aws {self.common_flags} s3api get-object-lock-configuration --bucket {bucket} " diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index ba3716a..bdb177e 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -13,17 +13,11 @@ from botocore.config import Config from botocore.exceptions import ClientError from mypy_boto3_s3 import S3Client -from frostfs_testlib.reporter import get_reporter -from frostfs_testlib.resources.common import ( - ASSETS_DIR, - MAX_REQUEST_ATTEMPTS, - RETRY_MODE, - S3_SYNC_WAIT_TIME, -) +from frostfs_testlib import reporter +from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.utils.cli_utils import log_command_execution -reporter = get_reporter() logger = logging.getLogger("NeoLogger") # Disable warnings on self-signed certificate which the @@ -46,9 +40,11 @@ def report_error(func): class Boto3ClientWrapper(S3ClientWrapper): __repr_name__: str = "Boto3 client" - @reporter.step_deco("Configure S3 client (boto3)") + @reporter.step("Configure S3 client (boto3)") @report_error - def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str='default') -> None: + def __init__( + self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default" + ) -> None: self.boto3_client: S3Client = None self.session = boto3.Session(profile_name=profile) self.config = Config( @@ -62,7 +58,7 @@ class Boto3ClientWrapper(S3ClientWrapper): self.s3gate_endpoint: str = "" self.set_endpoint(s3gate_endpoint) - @reporter.step_deco("Set endpoint S3 to {s3gate_endpoint}") + @reporter.step("Set endpoint S3 to {s3gate_endpoint}") def set_endpoint(self, s3gate_endpoint: str): if self.s3gate_endpoint == s3gate_endpoint: return @@ -90,7 +86,7 @@ class Boto3ClientWrapper(S3ClientWrapper): return result # BUCKET METHODS # - @reporter.step_deco("Create bucket S3") + @reporter.step("Create bucket S3") @report_error def create_bucket( self, @@ -118,16 +114,14 @@ class Boto3ClientWrapper(S3ClientWrapper): elif grant_full_control: params.update({"GrantFullControl": grant_full_control}) if location_constraint: - params.update( - {"CreateBucketConfiguration": {"LocationConstraint": location_constraint}} - ) + params.update({"CreateBucketConfiguration": {"LocationConstraint": location_constraint}}) s3_bucket = self.boto3_client.create_bucket(**params) log_command_execution(f"Created S3 bucket {bucket}", s3_bucket) sleep(S3_SYNC_WAIT_TIME) return bucket - @reporter.step_deco("List buckets S3") + @reporter.step("List buckets S3") @report_error def list_buckets(self) -> list[str]: found_buckets = [] @@ -140,20 +134,20 @@ class Boto3ClientWrapper(S3ClientWrapper): return found_buckets - @reporter.step_deco("Delete bucket S3") + @reporter.step("Delete bucket S3") @report_error def delete_bucket(self, bucket: str) -> None: response = self.boto3_client.delete_bucket(Bucket=bucket) log_command_execution("S3 Delete bucket result", response) sleep(S3_SYNC_WAIT_TIME) - @reporter.step_deco("Head bucket S3") + @reporter.step("Head bucket S3") @report_error def head_bucket(self, bucket: str) -> None: response = self.boto3_client.head_bucket(Bucket=bucket) log_command_execution("S3 Head bucket result", response) - @reporter.step_deco("Put bucket versioning status") + @reporter.step("Put bucket versioning status") @report_error def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: response = self.boto3_client.put_bucket_versioning( @@ -161,7 +155,7 @@ class Boto3ClientWrapper(S3ClientWrapper): ) log_command_execution("S3 Set bucket versioning to", response) - @reporter.step_deco("Get bucket versioning status") + @reporter.step("Get bucket versioning status") @report_error def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: response = self.boto3_client.get_bucket_versioning(Bucket=bucket) @@ -169,7 +163,7 @@ class Boto3ClientWrapper(S3ClientWrapper): log_command_execution("S3 Got bucket versioning status", response) return status - @reporter.step_deco("Put bucket tagging") + @reporter.step("Put bucket tagging") @report_error def put_bucket_tagging(self, bucket: str, tags: list) -> None: tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] @@ -177,27 +171,27 @@ class Boto3ClientWrapper(S3ClientWrapper): response = self.boto3_client.put_bucket_tagging(Bucket=bucket, Tagging=tagging) log_command_execution("S3 Put bucket tagging", response) - @reporter.step_deco("Get bucket tagging") + @reporter.step("Get bucket tagging") @report_error def get_bucket_tagging(self, bucket: str) -> list: response = self.boto3_client.get_bucket_tagging(Bucket=bucket) log_command_execution("S3 Get bucket tagging", response) return response.get("TagSet") - @reporter.step_deco("Get bucket acl") + @reporter.step("Get bucket acl") @report_error def get_bucket_acl(self, bucket: str) -> list: response = self.boto3_client.get_bucket_acl(Bucket=bucket) log_command_execution("S3 Get bucket acl", response) return response.get("Grants") - @reporter.step_deco("Delete bucket tagging") + @reporter.step("Delete bucket tagging") @report_error def delete_bucket_tagging(self, bucket: str) -> None: response = self.boto3_client.delete_bucket_tagging(Bucket=bucket) log_command_execution("S3 Delete bucket tagging", response) - @reporter.step_deco("Put bucket ACL") + @reporter.step("Put bucket ACL") @report_error def put_bucket_acl( self, @@ -214,60 +208,56 @@ class Boto3ClientWrapper(S3ClientWrapper): response = self.boto3_client.put_bucket_acl(**params) log_command_execution("S3 ACL bucket result", response) - @reporter.step_deco("Put object lock configuration") + @reporter.step("Put object lock configuration") @report_error def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: - response = self.boto3_client.put_object_lock_configuration( - Bucket=bucket, ObjectLockConfiguration=configuration - ) + response = self.boto3_client.put_object_lock_configuration(Bucket=bucket, ObjectLockConfiguration=configuration) log_command_execution("S3 put_object_lock_configuration result", response) return response - @reporter.step_deco("Get object lock configuration") + @reporter.step("Get object lock configuration") @report_error def get_object_lock_configuration(self, bucket: str) -> dict: response = self.boto3_client.get_object_lock_configuration(Bucket=bucket) log_command_execution("S3 get_object_lock_configuration result", response) return response.get("ObjectLockConfiguration") - @reporter.step_deco("Get bucket policy") + @reporter.step("Get bucket policy") @report_error def get_bucket_policy(self, bucket: str) -> str: response = self.boto3_client.get_bucket_policy(Bucket=bucket) log_command_execution("S3 get_bucket_policy result", response) return response.get("Policy") - @reporter.step_deco("Put bucket policy") + @reporter.step("Put bucket policy") @report_error def put_bucket_policy(self, bucket: str, policy: dict) -> None: response = self.boto3_client.put_bucket_policy(Bucket=bucket, Policy=json.dumps(policy)) log_command_execution("S3 put_bucket_policy result", response) return response - @reporter.step_deco("Get bucket cors") + @reporter.step("Get bucket cors") @report_error def get_bucket_cors(self, bucket: str) -> dict: response = self.boto3_client.get_bucket_cors(Bucket=bucket) log_command_execution("S3 get_bucket_cors result", response) return response.get("CORSRules") - @reporter.step_deco("Get bucket location") + @reporter.step("Get bucket location") @report_error def get_bucket_location(self, bucket: str) -> str: response = self.boto3_client.get_bucket_location(Bucket=bucket) log_command_execution("S3 get_bucket_location result", response) return response.get("LocationConstraint") - @reporter.step_deco("Put bucket cors") + @reporter.step("Put bucket cors") @report_error def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: - response = self.boto3_client.put_bucket_cors( - Bucket=bucket, CORSConfiguration=cors_configuration - ) + response = self.boto3_client.put_bucket_cors(Bucket=bucket, CORSConfiguration=cors_configuration) log_command_execution("S3 put_bucket_cors result", response) return response - @reporter.step_deco("Delete bucket cors") + @reporter.step("Delete bucket cors") @report_error def delete_bucket_cors(self, bucket: str) -> None: response = self.boto3_client.delete_bucket_cors(Bucket=bucket) @@ -276,7 +266,7 @@ class Boto3ClientWrapper(S3ClientWrapper): # END OF BUCKET METHODS # # OBJECT METHODS # - @reporter.step_deco("List objects S3 v2") + @reporter.step("List objects S3 v2") @report_error def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: response = self.boto3_client.list_objects_v2(Bucket=bucket) @@ -287,7 +277,7 @@ class Boto3ClientWrapper(S3ClientWrapper): return response if full_output else obj_list - @reporter.step_deco("List objects S3") + @reporter.step("List objects S3") @report_error def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: response = self.boto3_client.list_objects(Bucket=bucket) @@ -298,21 +288,21 @@ class Boto3ClientWrapper(S3ClientWrapper): return response if full_output else obj_list - @reporter.step_deco("List objects versions S3") + @reporter.step("List objects versions S3") @report_error def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: response = self.boto3_client.list_object_versions(Bucket=bucket) log_command_execution("S3 List objects versions result", response) return response if full_output else response.get("Versions", []) - @reporter.step_deco("List objects delete markers S3") + @reporter.step("List objects delete markers S3") @report_error def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: response = self.boto3_client.list_object_versions(Bucket=bucket) log_command_execution("S3 List objects delete markers result", response) return response if full_output else response.get("DeleteMarkers", []) - @reporter.step_deco("Put object S3") + @reporter.step("Put object S3") @report_error def put_object( self, @@ -343,7 +333,7 @@ class Boto3ClientWrapper(S3ClientWrapper): log_command_execution("S3 Put object result", response) return response.get("VersionId") - @reporter.step_deco("Head object S3") + @reporter.step("Head object S3") @report_error def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: params = { @@ -355,7 +345,7 @@ class Boto3ClientWrapper(S3ClientWrapper): log_command_execution("S3 Head object result", response) return response - @reporter.step_deco("Delete object S3") + @reporter.step("Delete object S3") @report_error def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: params = { @@ -368,7 +358,7 @@ class Boto3ClientWrapper(S3ClientWrapper): sleep(S3_SYNC_WAIT_TIME) return response - @reporter.step_deco("Delete objects S3") + @reporter.step("Delete objects S3") @report_error def delete_objects(self, bucket: str, keys: list[str]) -> dict: response = self.boto3_client.delete_objects(Bucket=bucket, Delete=_make_objs_dict(keys)) @@ -379,7 +369,7 @@ class Boto3ClientWrapper(S3ClientWrapper): sleep(S3_SYNC_WAIT_TIME) return response - @reporter.step_deco("Delete object versions S3") + @reporter.step("Delete object versions S3") @report_error def delete_object_versions(self, bucket: str, object_versions: list) -> dict: # Build deletion list in S3 format @@ -396,7 +386,7 @@ class Boto3ClientWrapper(S3ClientWrapper): log_command_execution("S3 Delete objects result", response) return response - @reporter.step_deco("Delete object versions S3 without delete markers") + @reporter.step("Delete object versions S3 without delete markers") @report_error def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: # Delete objects without creating delete markers @@ -406,7 +396,7 @@ class Boto3ClientWrapper(S3ClientWrapper): ) log_command_execution("S3 Delete object result", response) - @reporter.step_deco("Put object ACL") + @reporter.step("Put object ACL") @report_error def put_object_acl( self, @@ -419,7 +409,7 @@ class Boto3ClientWrapper(S3ClientWrapper): # pytest.skip("Method put_object_acl is not supported by boto3 client") raise NotImplementedError("Unsupported for boto3 client") - @reporter.step_deco("Get object ACL") + @reporter.step("Get object ACL") @report_error def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: params = { @@ -431,7 +421,7 @@ class Boto3ClientWrapper(S3ClientWrapper): log_command_execution("S3 ACL objects result", response) return response.get("Grants") - @reporter.step_deco("Copy object S3") + @reporter.step("Copy object S3") @report_error def copy_object( self, @@ -460,7 +450,7 @@ class Boto3ClientWrapper(S3ClientWrapper): log_command_execution("S3 Copy objects result", response) return key - @reporter.step_deco("Get object S3") + @reporter.step("Get object S3") @report_error def get_object( self, @@ -478,8 +468,7 @@ class Boto3ClientWrapper(S3ClientWrapper): params = { self._to_s3_param(param): value for param, value in {**locals(), **{"Range": range_str}}.items() - if param not in ["self", "object_range", "full_output", "range_str", "filename"] - and value is not None + if param not in ["self", "object_range", "full_output", "range_str", "filename"] and value is not None } response = self.boto3_client.get_object(**params) log_command_execution("S3 Get objects result", response) @@ -491,7 +480,7 @@ class Boto3ClientWrapper(S3ClientWrapper): chunk = response["Body"].read(1024) return response if full_output else filename - @reporter.step_deco("Create multipart upload S3") + @reporter.step("Create multipart upload S3") @report_error def create_multipart_upload(self, bucket: str, key: str) -> str: response = self.boto3_client.create_multipart_upload(Bucket=bucket, Key=key) @@ -500,7 +489,7 @@ class Boto3ClientWrapper(S3ClientWrapper): return response["UploadId"] - @reporter.step_deco("List multipart uploads S3") + @reporter.step("List multipart uploads S3") @report_error def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: response = self.boto3_client.list_multipart_uploads(Bucket=bucket) @@ -508,19 +497,15 @@ class Boto3ClientWrapper(S3ClientWrapper): return response.get("Uploads") - @reporter.step_deco("Abort multipart upload S3") + @reporter.step("Abort multipart upload S3") @report_error def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: - response = self.boto3_client.abort_multipart_upload( - Bucket=bucket, Key=key, UploadId=upload_id - ) + response = self.boto3_client.abort_multipart_upload(Bucket=bucket, Key=key, UploadId=upload_id) log_command_execution("S3 Abort multipart upload", response) - @reporter.step_deco("Upload part S3") + @reporter.step("Upload part S3") @report_error - def upload_part( - self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str - ) -> str: + def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: with open(filepath, "rb") as put_file: body = put_file.read() @@ -536,11 +521,9 @@ class Boto3ClientWrapper(S3ClientWrapper): return response["ETag"] - @reporter.step_deco("Upload copy part S3") + @reporter.step("Upload copy part S3") @report_error - def upload_part_copy( - self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str - ) -> str: + def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: response = self.boto3_client.upload_part_copy( UploadId=upload_id, Bucket=bucket, @@ -549,13 +532,11 @@ class Boto3ClientWrapper(S3ClientWrapper): CopySource=copy_source, ) log_command_execution("S3 Upload copy part", response) - assert response.get("CopyPartResult", []).get( - "ETag" - ), f"Expected ETag in response:\n{response}" + assert response.get("CopyPartResult", []).get("ETag"), f"Expected ETag in response:\n{response}" return response["CopyPartResult"]["ETag"] - @reporter.step_deco("List parts S3") + @reporter.step("List parts S3") @report_error def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: response = self.boto3_client.list_parts(UploadId=upload_id, Bucket=bucket, Key=key) @@ -564,7 +545,7 @@ class Boto3ClientWrapper(S3ClientWrapper): return response["Parts"] - @reporter.step_deco("Complete multipart upload S3") + @reporter.step("Complete multipart upload S3") @report_error def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts] @@ -573,7 +554,7 @@ class Boto3ClientWrapper(S3ClientWrapper): ) log_command_execution("S3 Complete multipart upload", response) - @reporter.step_deco("Put object retention") + @reporter.step("Put object retention") @report_error def put_object_retention( self, @@ -591,7 +572,7 @@ class Boto3ClientWrapper(S3ClientWrapper): response = self.boto3_client.put_object_retention(**params) log_command_execution("S3 Put object retention ", response) - @reporter.step_deco("Put object legal hold") + @reporter.step("Put object legal hold") @report_error def put_object_legal_hold( self, @@ -609,7 +590,7 @@ class Boto3ClientWrapper(S3ClientWrapper): response = self.boto3_client.put_object_legal_hold(**params) log_command_execution("S3 Put object legal hold ", response) - @reporter.step_deco("Put object tagging") + @reporter.step("Put object tagging") @report_error def put_object_tagging(self, bucket: str, key: str, tags: list) -> None: tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] @@ -617,7 +598,7 @@ class Boto3ClientWrapper(S3ClientWrapper): response = self.boto3_client.put_object_tagging(Bucket=bucket, Key=key, Tagging=tagging) log_command_execution("S3 Put object tagging", response) - @reporter.step_deco("Get object tagging") + @reporter.step("Get object tagging") @report_error def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: params = { @@ -629,13 +610,13 @@ class Boto3ClientWrapper(S3ClientWrapper): log_command_execution("S3 Get object tagging", response) return response.get("TagSet") - @reporter.step_deco("Delete object tagging") + @reporter.step("Delete object tagging") @report_error def delete_object_tagging(self, bucket: str, key: str) -> None: response = self.boto3_client.delete_object_tagging(Bucket=bucket, Key=key) log_command_execution("S3 Delete object tagging", response) - @reporter.step_deco("Get object attributes") + @reporter.step("Get object attributes") @report_error def get_object_attributes( self, @@ -650,7 +631,7 @@ class Boto3ClientWrapper(S3ClientWrapper): logger.warning("Method get_object_attributes is not supported by boto3 client") return {} - @reporter.step_deco("Sync directory S3") + @reporter.step("Sync directory S3") @report_error def sync( self, @@ -661,7 +642,7 @@ class Boto3ClientWrapper(S3ClientWrapper): ) -> dict: raise NotImplementedError("Sync is not supported for boto3 client") - @reporter.step_deco("CP directory S3") + @reporter.step("CP directory S3") @report_error def cp( self, diff --git a/src/frostfs_testlib/shell/local_shell.py b/src/frostfs_testlib/shell/local_shell.py index 26c7e9b..acf01ff 100644 --- a/src/frostfs_testlib/shell/local_shell.py +++ b/src/frostfs_testlib/shell/local_shell.py @@ -6,11 +6,10 @@ from typing import IO, Optional import pexpect -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell logger = logging.getLogger("frostfs.testlib.shell") -reporter = get_reporter() class LocalShell(Shell): diff --git a/src/frostfs_testlib/shell/ssh_shell.py b/src/frostfs_testlib/shell/ssh_shell.py index 6b12f81..a7e6e1d 100644 --- a/src/frostfs_testlib/shell/ssh_shell.py +++ b/src/frostfs_testlib/shell/ssh_shell.py @@ -9,11 +9,10 @@ from typing import ClassVar, Optional, Tuple from paramiko import AutoAddPolicy, Channel, ECDSAKey, Ed25519Key, PKey, RSAKey, SSHClient, SSHException, ssh_exception from paramiko.ssh_exception import AuthenticationException -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell, SshCredentials logger = logging.getLogger("frostfs.testlib.shell") -reporter = get_reporter() class SshConnectionProvider: diff --git a/src/frostfs_testlib/steps/acl.py b/src/frostfs_testlib/steps/acl.py index 0ef101b..e97e4ee 100644 --- a/src/frostfs_testlib/steps/acl.py +++ b/src/frostfs_testlib/steps/acl.py @@ -8,8 +8,8 @@ from typing import List, Optional, Union import base58 +from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell @@ -22,11 +22,10 @@ from frostfs_testlib.storage.dataclasses.acl import ( ) from frostfs_testlib.utils import wallet_utils -reporter = get_reporter() logger = logging.getLogger("NeoLogger") -@reporter.step_deco("Get extended ACL") +@reporter.step("Get extended ACL") def get_eacl(wallet_path: str, cid: str, shell: Shell, endpoint: str) -> Optional[str]: cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) try: @@ -40,7 +39,7 @@ def get_eacl(wallet_path: str, cid: str, shell: Shell, endpoint: str) -> Optiona return result.stdout -@reporter.step_deco("Set extended ACL") +@reporter.step("Set extended ACL") def set_eacl( wallet_path: str, cid: str, @@ -165,24 +164,20 @@ def eacl_rules(access: str, verbs: list, user: str) -> list[str]: return rules -def sign_bearer( - shell: Shell, wallet_path: str, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool -) -> None: - frostfscli = FrostfsCli( - shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG - ) +def sign_bearer(shell: Shell, wallet_path: str, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool) -> None: + frostfscli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG) frostfscli.util.sign_bearer_token( wallet=wallet_path, from_file=eacl_rules_file_from, to_file=eacl_rules_file_to, json=json ) -@reporter.step_deco("Wait for eACL cache expired") +@reporter.step("Wait for eACL cache expired") def wait_for_cache_expired(): sleep(FROSTFS_CONTRACT_CACHE_TIMEOUT) return -@reporter.step_deco("Return bearer token in base64 to caller") +@reporter.step("Return bearer token in base64 to caller") def bearer_token_base64_from_file( bearer_path: str, ) -> str: diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index 74f445a..be96138 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -5,8 +5,8 @@ from dataclasses import dataclass from time import sleep from typing import Optional, Union +from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell @@ -17,7 +17,6 @@ from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.utils import json_utils from frostfs_testlib.utils.file_utils import generate_file, get_file_hash -reporter = get_reporter() logger = logging.getLogger("NeoLogger") @@ -47,7 +46,7 @@ class StorageContainer: def get_wallet_config_path(self) -> str: return self.storage_container_info.wallet_file.config_path - @reporter.step_deco("Generate new object and put in container") + @reporter.step("Generate new object and put in container") def generate_object( self, size: int, @@ -103,7 +102,7 @@ SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X" REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X" -@reporter.step_deco("Create Container") +@reporter.step("Create Container") def create_container( wallet: str, shell: Shell, @@ -178,9 +177,7 @@ def wait_for_container_creation( return logger.info(f"There is no {cid} in {containers} yet; sleep {sleep_interval} and continue") sleep(sleep_interval) - raise RuntimeError( - f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting" - ) + raise RuntimeError(f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting") def wait_for_container_deletion( @@ -198,7 +195,7 @@ def wait_for_container_deletion( raise AssertionError(f"Expected container deleted during {attempts * sleep_interval} sec.") -@reporter.step_deco("List Containers") +@reporter.step("List Containers") def list_containers( wallet: str, shell: Shell, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT ) -> list[str]: @@ -219,7 +216,7 @@ def list_containers( return result.stdout.split() -@reporter.step_deco("List Objects in container") +@reporter.step("List Objects in container") def list_objects( wallet: str, shell: Shell, @@ -240,14 +237,12 @@ def list_objects( (list): list of containers """ cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) - result = cli.container.list_objects( - rpc_endpoint=endpoint, wallet=wallet, cid=container_id, timeout=timeout - ) + result = cli.container.list_objects(rpc_endpoint=endpoint, wallet=wallet, cid=container_id, timeout=timeout) logger.info(f"Container objects: \n{result}") return result.stdout.split() -@reporter.step_deco("Get Container") +@reporter.step("Get Container") def get_container( wallet: str, cid: str, @@ -271,9 +266,7 @@ def get_container( """ cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) - result = cli.container.get( - rpc_endpoint=endpoint, wallet=wallet, cid=cid, json_mode=json_mode, timeout=timeout - ) + result = cli.container.get(rpc_endpoint=endpoint, wallet=wallet, cid=cid, json_mode=json_mode, timeout=timeout) if not json_mode: return result.stdout @@ -287,7 +280,7 @@ def get_container( return container_info -@reporter.step_deco("Delete Container") +@reporter.step("Delete Container") # TODO: make the error message about a non-found container more user-friendly def delete_container( wallet: str, @@ -350,7 +343,7 @@ def _parse_cid(output: str) -> str: return splitted[1] -@reporter.step_deco("Search container by name") +@reporter.step("Search container by name") def search_container_by_name(wallet: str, name: str, shell: Shell, endpoint: str): list_cids = list_containers(wallet, shell, endpoint) for cid in list_cids: @@ -360,7 +353,7 @@ def search_container_by_name(wallet: str, name: str, shell: Shell, endpoint: str return None -@reporter.step_deco("Search for nodes with a container") +@reporter.step("Search for nodes with a container") def search_nodes_with_container( wallet: str, cid: str, @@ -370,9 +363,7 @@ def search_nodes_with_container( timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, ) -> list[ClusterNode]: cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) - result = cli.container.search_node( - rpc_endpoint=endpoint, wallet=wallet, cid=cid, timeout=timeout - ) + result = cli.container.search_node(rpc_endpoint=endpoint, wallet=wallet, cid=cid, timeout=timeout) pattern = r"[0-9]+(?:\.[0-9]+){3}" nodes_ip = list(set(re.findall(pattern, result.stdout))) diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index 9c7c694..803524a 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -5,9 +5,9 @@ import re import uuid from typing import Any, Optional +from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli.neogo import NeoGo -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell @@ -16,10 +16,9 @@ from frostfs_testlib.utils import json_utils from frostfs_testlib.utils.cli_utils import parse_cmd_table, parse_netmap_output logger = logging.getLogger("NeoLogger") -reporter = get_reporter() -@reporter.step_deco("Get object from random node") +@reporter.step("Get object from random node") def get_object_from_random_node( wallet: str, cid: str, @@ -70,7 +69,7 @@ def get_object_from_random_node( ) -@reporter.step_deco("Get object from {endpoint}") +@reporter.step("Get object from {endpoint}") def get_object( wallet: str, cid: str, @@ -126,7 +125,7 @@ def get_object( return file_path -@reporter.step_deco("Get Range Hash from {endpoint}") +@reporter.step("Get Range Hash from {endpoint}") def get_range_hash( wallet: str, cid: str, @@ -176,7 +175,7 @@ def get_range_hash( return result.stdout.split(":")[1].strip() -@reporter.step_deco("Put object to random node") +@reporter.step("Put object to random node") def put_object_to_random_node( wallet: str, path: str, @@ -235,7 +234,7 @@ def put_object_to_random_node( ) -@reporter.step_deco("Put object at {endpoint} in container {cid}") +@reporter.step("Put object at {endpoint} in container {cid}") def put_object( wallet: str, path: str, @@ -296,7 +295,7 @@ def put_object( return oid.strip() -@reporter.step_deco("Delete object {cid}/{oid} from {endpoint}") +@reporter.step("Delete object {cid}/{oid} from {endpoint}") def delete_object( wallet: str, cid: str, @@ -344,7 +343,7 @@ def delete_object( return tombstone.strip() -@reporter.step_deco("Get Range") +@reporter.step("Get Range") def get_range( wallet: str, cid: str, @@ -397,7 +396,7 @@ def get_range( return range_file_path, content -@reporter.step_deco("Lock Object") +@reporter.step("Lock Object") def lock_object( wallet: str, cid: str, @@ -458,7 +457,7 @@ def lock_object( return oid.strip() -@reporter.step_deco("Search object") +@reporter.step("Search object") def search_object( wallet: str, cid: str, @@ -503,9 +502,7 @@ def search_object( cid=cid, bearer=bearer, xhdr=xhdr, - filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] - if filters - else None, + filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] if filters else None, session=session, phy=phy, root=root, @@ -517,19 +514,17 @@ def search_object( if expected_objects_list: if sorted(found_objects) == sorted(expected_objects_list): logger.info( - f"Found objects list '{found_objects}' " - f"is equal for expected list '{expected_objects_list}'" + f"Found objects list '{found_objects}' " f"is equal for expected list '{expected_objects_list}'" ) else: logger.warning( - f"Found object list {found_objects} " - f"is not equal to expected list '{expected_objects_list}'" + f"Found object list {found_objects} " f"is not equal to expected list '{expected_objects_list}'" ) return found_objects -@reporter.step_deco("Get netmap netinfo") +@reporter.step("Get netmap netinfo") def get_netmap_netinfo( wallet: str, shell: Shell, @@ -581,7 +576,7 @@ def get_netmap_netinfo( return settings -@reporter.step_deco("Head object") +@reporter.step("Head object") def head_object( wallet: str, cid: str, @@ -677,7 +672,7 @@ def head_object( return json_utils.decode_simple_header(decoded) -@reporter.step_deco("Run neo-go dump-keys") +@reporter.step("Run neo-go dump-keys") def neo_go_dump_keys(shell: Shell, wallet: str) -> dict: """ Run neo-go dump keys command @@ -702,7 +697,7 @@ def neo_go_dump_keys(shell: Shell, wallet: str) -> dict: return {address_id: wallet_key} -@reporter.step_deco("Run neo-go query height") +@reporter.step("Run neo-go query height") def neo_go_query_height(shell: Shell, endpoint: str) -> dict: """ Run neo-go query height command @@ -734,7 +729,7 @@ def neo_go_query_height(shell: Shell, endpoint: str) -> dict: } -@reporter.step_deco("Search object nodes") +@reporter.step("Search object nodes") def get_object_nodes( cluster: Cluster, wallet: str, diff --git a/src/frostfs_testlib/steps/complex_object_actions.py b/src/frostfs_testlib/steps/complex_object_actions.py index 54e5fc2..a67dd4c 100644 --- a/src/frostfs_testlib/steps/complex_object_actions.py +++ b/src/frostfs_testlib/steps/complex_object_actions.py @@ -12,7 +12,7 @@ import logging from typing import Optional, Tuple -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell @@ -20,7 +20,6 @@ from frostfs_testlib.steps.cli.object import head_object from frostfs_testlib.storage.cluster import Cluster, StorageNode from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo -reporter = get_reporter() logger = logging.getLogger("NeoLogger") @@ -113,7 +112,7 @@ def get_complex_object_split_ranges( return ranges -@reporter.step_deco("Get Link Object") +@reporter.step("Get Link Object") def get_link_object( wallet: str, cid: str, @@ -166,7 +165,7 @@ def get_link_object( return None -@reporter.step_deco("Get Last Object") +@reporter.step("Get Last Object") def get_last_object( wallet: str, cid: str, diff --git a/src/frostfs_testlib/steps/epoch.py b/src/frostfs_testlib/steps/epoch.py index a589569..5a43ba3 100644 --- a/src/frostfs_testlib/steps/epoch.py +++ b/src/frostfs_testlib/steps/epoch.py @@ -2,8 +2,8 @@ import logging from time import sleep from typing import Optional +from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsAdm, FrostfsCli, NeoGo -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import ( CLI_DEFAULT_TIMEOUT, FROSTFS_ADM_CONFIG_PATH, @@ -19,11 +19,10 @@ from frostfs_testlib.storage.dataclasses.frostfs_services import InnerRing, Morp from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils import datetime_utils, wallet_utils -reporter = get_reporter() logger = logging.getLogger("NeoLogger") -@reporter.step_deco("Get epochs from nodes") +@reporter.step("Get epochs from nodes") def get_epochs_from_nodes(shell: Shell, cluster: Cluster) -> dict[str, int]: """ Get current epochs on each node. @@ -41,10 +40,8 @@ def get_epochs_from_nodes(shell: Shell, cluster: Cluster) -> dict[str, int]: return epochs_by_node -@reporter.step_deco("Ensure fresh epoch") -def ensure_fresh_epoch( - shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None -) -> int: +@reporter.step("Ensure fresh epoch") +def ensure_fresh_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None) -> int: # ensure new fresh epoch to avoid epoch switch during test session alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] current_epoch = get_epoch(shell, cluster, alive_node) @@ -54,7 +51,7 @@ def ensure_fresh_epoch( return epoch -@reporter.step_deco("Wait up to {timeout} seconds for nodes on cluster to align epochs") +@reporter.step("Wait up to {timeout} seconds for nodes on cluster to align epochs") def wait_for_epochs_align(shell: Shell, cluster: Cluster, timeout=60): @wait_for_success(timeout, 5, None, True) def check_epochs(): @@ -64,7 +61,7 @@ def wait_for_epochs_align(shell: Shell, cluster: Cluster, timeout=60): check_epochs() -@reporter.step_deco("Get Epoch") +@reporter.step("Get Epoch") def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None): alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] endpoint = alive_node.get_rpc_endpoint() @@ -77,7 +74,7 @@ def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] return int(epoch.stdout) -@reporter.step_deco("Tick Epoch") +@reporter.step("Tick Epoch") def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None): """ Tick epoch using frostfs-adm or NeoGo if frostfs-adm is not available (DevEnv) diff --git a/src/frostfs_testlib/steps/http/http_gate.py b/src/frostfs_testlib/steps/http/http_gate.py index 2b70d6c..a8c9899 100644 --- a/src/frostfs_testlib/steps/http/http_gate.py +++ b/src/frostfs_testlib/steps/http/http_gate.py @@ -10,7 +10,7 @@ from urllib.parse import quote_plus import requests -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.resources.common import SIMPLE_OBJECT_SIZE from frostfs_testlib.s3.aws_cli_client import command_options from frostfs_testlib.shell import Shell @@ -21,15 +21,13 @@ from frostfs_testlib.storage.cluster import StorageNode from frostfs_testlib.testing.test_control import retry from frostfs_testlib.utils.file_utils import get_file_hash -reporter = get_reporter() - logger = logging.getLogger("NeoLogger") ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/") local_shell = LocalShell() -@reporter.step_deco("Get via HTTP Gate") +@reporter.step("Get via HTTP Gate") def get_via_http_gate( cid: str, oid: str, @@ -53,9 +51,7 @@ def get_via_http_gate( else: request = f"{endpoint}{request_path}" - resp = requests.get( - request, headers={"Host": http_hostname}, stream=True, timeout=timeout, verify=False - ) + resp = requests.get(request, headers={"Host": http_hostname}, stream=True, timeout=timeout, verify=False) if not resp.ok: raise Exception( @@ -75,10 +71,8 @@ def get_via_http_gate( return file_path -@reporter.step_deco("Get via Zip HTTP Gate") -def get_via_zip_http_gate( - cid: str, prefix: str, endpoint: str, http_hostname: str, timeout: Optional[int] = 300 -): +@reporter.step("Get via Zip HTTP Gate") +def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str, http_hostname: str, timeout: Optional[int] = 300): """ This function gets given object from HTTP gate cid: container id to get object from @@ -111,7 +105,7 @@ def get_via_zip_http_gate( return os.path.join(os.getcwd(), ASSETS_DIR, prefix) -@reporter.step_deco("Get via HTTP Gate by attribute") +@reporter.step("Get via HTTP Gate by attribute") def get_via_http_gate_by_attribute( cid: str, attribute: dict, @@ -136,9 +130,7 @@ def get_via_http_gate_by_attribute( else: request = f"{endpoint}{request_path}" - resp = requests.get( - request, stream=True, timeout=timeout, verify=False, headers={"Host": http_hostname} - ) + resp = requests.get(request, stream=True, timeout=timeout, verify=False, headers={"Host": http_hostname}) if not resp.ok: raise Exception( @@ -159,7 +151,7 @@ def get_via_http_gate_by_attribute( # TODO: pass http_hostname as a header -@reporter.step_deco("Upload via HTTP Gate") +@reporter.step("Upload via HTTP Gate") def upload_via_http_gate( cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300 ) -> str: @@ -173,9 +165,7 @@ def upload_via_http_gate( request = f"{endpoint}/upload/{cid}" files = {"upload_file": open(path, "rb")} body = {"filename": path} - resp = requests.post( - request, files=files, data=body, headers=headers, timeout=timeout, verify=False - ) + resp = requests.post(request, files=files, data=body, headers=headers, timeout=timeout, verify=False) if not resp.ok: raise Exception( @@ -193,7 +183,7 @@ def upload_via_http_gate( return resp.json().get("object_id") -@reporter.step_deco("Check is the passed object large") +@reporter.step("Check is the passed object large") def is_object_large(filepath: str) -> bool: """ This function check passed file size and return True if file_size > SIMPLE_OBJECT_SIZE @@ -208,7 +198,7 @@ def is_object_large(filepath: str) -> bool: # TODO: pass http_hostname as a header -@reporter.step_deco("Upload via HTTP Gate using Curl") +@reporter.step("Upload via HTTP Gate using Curl") def upload_via_http_gate_curl( cid: str, filepath: str, @@ -256,7 +246,7 @@ def upload_via_http_gate_curl( @retry(max_attempts=3, sleep_interval=1) -@reporter.step_deco("Get via HTTP Gate using Curl") +@reporter.step("Get via HTTP Gate using Curl") def get_via_http_curl(cid: str, oid: str, endpoint: str, http_hostname: str) -> str: """ This function gets given object from HTTP gate using curl utility. @@ -280,7 +270,7 @@ def _attach_allure_step(request: str, status_code: int, req_type="GET"): reporter.attach(command_attachment, f"{req_type} Request") -@reporter.step_deco("Try to get object and expect error") +@reporter.step("Try to get object and expect error") def try_to_get_object_and_expect_error( cid: str, oid: str, @@ -296,7 +286,7 @@ def try_to_get_object_and_expect_error( assert match, f"Expected {err} to match {error_pattern}" -@reporter.step_deco("Verify object can be get using HTTP header attribute") +@reporter.step("Verify object can be get using HTTP header attribute") def get_object_by_attr_and_verify_hashes( oid: str, file_name: str, @@ -305,9 +295,7 @@ def get_object_by_attr_and_verify_hashes( endpoint: str, http_hostname: str, ) -> None: - got_file_path_http = get_via_http_gate( - cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname - ) + got_file_path_http = get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname) got_file_path_http_attr = get_via_http_gate_by_attribute( cid=cid, attribute=attrs, endpoint=endpoint, http_hostname=http_hostname ) @@ -348,9 +336,7 @@ def verify_object_hash( shell=shell, endpoint=random_node.get_rpc_endpoint(), ) - got_file_path_http = object_getter( - cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname - ) + got_file_path_http = object_getter(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname) assert_hashes_are_equal(file_name, got_file_path, got_file_path_http) @@ -359,18 +345,14 @@ def assert_hashes_are_equal(orig_file_name: str, got_file_1: str, got_file_2: st msg = "Expected hashes are equal for files {f1} and {f2}" got_file_hash_http = get_file_hash(got_file_1) assert get_file_hash(got_file_2) == got_file_hash_http, msg.format(f1=got_file_2, f2=got_file_1) - assert get_file_hash(orig_file_name) == got_file_hash_http, msg.format( - f1=orig_file_name, f2=got_file_1 - ) + assert get_file_hash(orig_file_name) == got_file_hash_http, msg.format(f1=orig_file_name, f2=got_file_1) def attr_into_header(attrs: dict) -> dict: return {f"X-Attribute-{_key}": _value for _key, _value in attrs.items()} -@reporter.step_deco( - "Convert each attribute (Key=Value) to the following format: -H 'X-Attribute-Key: Value'" -) +@reporter.step("Convert each attribute (Key=Value) to the following format: -H 'X-Attribute-Key: Value'") def attr_into_str_header_curl(attrs: dict) -> list: headers = [] for k, v in attrs.items(): @@ -379,9 +361,7 @@ def attr_into_str_header_curl(attrs: dict) -> list: return headers -@reporter.step_deco( - "Try to get object via http (pass http_request and optional attributes) and expect error" -) +@reporter.step("Try to get object via http (pass http_request and optional attributes) and expect error") def try_to_get_object_via_passed_request_and_expect_error( cid: str, oid: str, diff --git a/src/frostfs_testlib/steps/network.py b/src/frostfs_testlib/steps/network.py index a865461..64e235a 100644 --- a/src/frostfs_testlib/steps/network.py +++ b/src/frostfs_testlib/steps/network.py @@ -1,9 +1,7 @@ -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.testing.test_control import retry -reporter = get_reporter() - class IpTablesHelper: @staticmethod @@ -21,11 +19,7 @@ class IpTablesHelper: @staticmethod def restore_input_traffic_to_port(node: ClusterNode) -> None: shell = node.host.get_shell() - ports = ( - shell.exec("iptables -L --numeric | grep DROP | awk '{print $7}'") - .stdout.strip() - .split("\n") - ) + ports = shell.exec("iptables -L --numeric | grep DROP | awk '{print $7}'").stdout.strip().split("\n") if ports[0] == "": return for port in ports: @@ -34,11 +28,7 @@ class IpTablesHelper: @staticmethod def restore_input_traffic_to_node(node: ClusterNode) -> None: shell = node.host.get_shell() - unlock_ip = ( - shell.exec("iptables -L --numeric | grep DROP | awk '{print $4}'") - .stdout.strip() - .split("\n") - ) + unlock_ip = shell.exec("iptables -L --numeric | grep DROP | awk '{print $4}'").stdout.strip().split("\n") if unlock_ip[0] == "": return for ip in unlock_ip: @@ -47,17 +37,17 @@ class IpTablesHelper: # TODO Move class to HOST class IfUpDownHelper: - @reporter.step_deco("Down {interface} to {node}") + @reporter.step("Down {interface} to {node}") def down_interface(self, node: ClusterNode, interface: str) -> None: shell = node.host.get_shell() shell.exec(f"ifdown {interface}") - @reporter.step_deco("Up {interface} to {node}") + @reporter.step("Up {interface} to {node}") def up_interface(self, node: ClusterNode, interface: str) -> None: shell = node.host.get_shell() shell.exec(f"ifup {interface}") - @reporter.step_deco("Up all interface to {node}") + @reporter.step("Up all interface to {node}") def up_all_interface(self, node: ClusterNode) -> None: shell = node.host.get_shell() interfaces = list(node.host.config.interfaces.keys()) @@ -65,7 +55,7 @@ class IfUpDownHelper: for name_interface in interfaces: self.check_state_up(node, name_interface) - @reporter.step_deco("Down all interface to {node}") + @reporter.step("Down all interface to {node}") def down_all_interface(self, node: ClusterNode) -> None: shell = node.host.get_shell() interfaces = list(node.host.config.interfaces.keys()) @@ -73,12 +63,10 @@ class IfUpDownHelper: for name_interface in interfaces: self.check_state_down(node, name_interface) - @reporter.step_deco("Check {node} to {interface}") + @reporter.step("Check {node} to {interface}") def check_state(self, node: ClusterNode, interface: str) -> str: shell = node.host.get_shell() - return shell.exec( - f"ip link show {interface} | sed -z 's/.*state \(.*\) mode .*/\\1/'" - ).stdout.strip() + return shell.exec(f"ip link show {interface} | sed -z 's/.*state \(.*\) mode .*/\\1/'").stdout.strip() @retry(max_attempts=5, sleep_interval=5, expected_result="UP") def check_state_up(self, node: ClusterNode, interface: str) -> str: diff --git a/src/frostfs_testlib/steps/node_management.py b/src/frostfs_testlib/steps/node_management.py index d91721c..28e3820 100644 --- a/src/frostfs_testlib/steps/node_management.py +++ b/src/frostfs_testlib/steps/node_management.py @@ -6,13 +6,9 @@ from dataclasses import dataclass from time import sleep from typing import Optional +from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsAdm, FrostfsCli -from frostfs_testlib.reporter import get_reporter -from frostfs_testlib.resources.cli import ( - FROSTFS_ADM_CONFIG_PATH, - FROSTFS_ADM_EXEC, - FROSTFS_CLI_EXEC, -) +from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.shell import Shell from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align @@ -20,7 +16,6 @@ from frostfs_testlib.storage.cluster import Cluster, StorageNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate from frostfs_testlib.utils import datetime_utils -reporter = get_reporter() logger = logging.getLogger("NeoLogger") @@ -40,7 +35,7 @@ class HealthStatus: return HealthStatus(network, health) -@reporter.step_deco("Get Locode from random storage node") +@reporter.step("Get Locode from random storage node") def get_locode_from_random_node(cluster: Cluster) -> str: node = random.choice(cluster.services(StorageNode)) locode = node.get_un_locode() @@ -48,7 +43,7 @@ def get_locode_from_random_node(cluster: Cluster) -> str: return locode -@reporter.step_deco("Healthcheck for storage node {node}") +@reporter.step("Healthcheck for storage node {node}") def storage_node_healthcheck(node: StorageNode) -> HealthStatus: """ The function returns storage node's health status. @@ -62,7 +57,7 @@ def storage_node_healthcheck(node: StorageNode) -> HealthStatus: return HealthStatus.from_stdout(output) -@reporter.step_deco("Set status for {node}") +@reporter.step("Set status for {node}") def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) -> None: """ The function sets particular status for given node. @@ -75,7 +70,7 @@ def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) -> _run_control_command_with_retries(node, command, retries) -@reporter.step_deco("Get netmap snapshot") +@reporter.step("Get netmap snapshot") def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str: """ The function returns string representation of netmap snapshot. @@ -95,7 +90,7 @@ def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str: ).stdout -@reporter.step_deco("Get shard list for {node}") +@reporter.step("Get shard list for {node}") def node_shard_list(node: StorageNode) -> list[str]: """ The function returns list of shards for specified storage node. @@ -109,7 +104,7 @@ def node_shard_list(node: StorageNode) -> list[str]: return re.findall(r"Shard (.*):", output) -@reporter.step_deco("Shard set for {node}") +@reporter.step("Shard set for {node}") def node_shard_set_mode(node: StorageNode, shard: str, mode: str) -> str: """ The function sets mode for specified shard. @@ -120,7 +115,7 @@ def node_shard_set_mode(node: StorageNode, shard: str, mode: str) -> str: return _run_control_command_with_retries(node, command) -@reporter.step_deco("Drop object from {node}") +@reporter.step("Drop object from {node}") def drop_object(node: StorageNode, cid: str, oid: str) -> str: """ The function drops object from specified node. @@ -131,14 +126,14 @@ def drop_object(node: StorageNode, cid: str, oid: str) -> str: return _run_control_command_with_retries(node, command) -@reporter.step_deco("Delete data from host for node {node}") +@reporter.step("Delete data from host for node {node}") def delete_node_data(node: StorageNode) -> None: node.stop_service() node.host.delete_storage_node_data(node.name) time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) -@reporter.step_deco("Exclude node {node_to_exclude} from network map") +@reporter.step("Exclude node {node_to_exclude} from network map") def exclude_node_from_network_map( node_to_exclude: StorageNode, alive_node: StorageNode, @@ -154,12 +149,10 @@ def exclude_node_from_network_map( wait_for_epochs_align(shell, cluster) snapshot = get_netmap_snapshot(node=alive_node, shell=shell) - assert ( - node_netmap_key not in snapshot - ), f"Expected node with key {node_netmap_key} to be absent in network map" + assert node_netmap_key not in snapshot, f"Expected node with key {node_netmap_key} to be absent in network map" -@reporter.step_deco("Include node {node_to_include} into network map") +@reporter.step("Include node {node_to_include} into network map") def include_node_to_network_map( node_to_include: StorageNode, alive_node: StorageNode, @@ -178,37 +171,29 @@ def include_node_to_network_map( check_node_in_map(node_to_include, shell, alive_node) -@reporter.step_deco("Check node {node} in network map") -def check_node_in_map( - node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None -) -> None: +@reporter.step("Check node {node} in network map") +def check_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None: alive_node = alive_node or node node_netmap_key = node.get_wallet_public_key() logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}") snapshot = get_netmap_snapshot(alive_node, shell) - assert ( - node_netmap_key in snapshot - ), f"Expected node with key {node_netmap_key} to be in network map" + assert node_netmap_key in snapshot, f"Expected node with key {node_netmap_key} to be in network map" -@reporter.step_deco("Check node {node} NOT in network map") -def check_node_not_in_map( - node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None -) -> None: +@reporter.step("Check node {node} NOT in network map") +def check_node_not_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None: alive_node = alive_node or node node_netmap_key = node.get_wallet_public_key() logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}") snapshot = get_netmap_snapshot(alive_node, shell) - assert ( - node_netmap_key not in snapshot - ), f"Expected node with key {node_netmap_key} to be NOT in network map" + assert node_netmap_key not in snapshot, f"Expected node with key {node_netmap_key} to be NOT in network map" -@reporter.step_deco("Wait for node {node} is ready") +@reporter.step("Wait for node {node} is ready") def wait_for_node_to_be_ready(node: StorageNode) -> None: timeout, attempts = 30, 6 for _ in range(attempts): @@ -219,12 +204,10 @@ def wait_for_node_to_be_ready(node: StorageNode) -> None: except Exception as err: logger.warning(f"Node {node} is not ready:\n{err}") sleep(timeout) - raise AssertionError( - f"Node {node} hasn't gone to the READY state after {timeout * attempts} seconds" - ) + raise AssertionError(f"Node {node} hasn't gone to the READY state after {timeout * attempts} seconds") -@reporter.step_deco("Remove nodes from network map trough cli-adm morph command") +@reporter.step("Remove nodes from network map trough cli-adm morph command") def remove_nodes_from_map_morph( shell: Shell, cluster: Cluster, diff --git a/src/frostfs_testlib/steps/payment_neogo.py b/src/frostfs_testlib/steps/payment_neogo.py index 7fe0b4d..8e78cca 100644 --- a/src/frostfs_testlib/steps/payment_neogo.py +++ b/src/frostfs_testlib/steps/payment_neogo.py @@ -8,21 +8,21 @@ from typing import Optional from neo3.wallet import utils as neo3_utils from neo3.wallet import wallet as neo3_wallet +from frostfs_testlib import reporter from frostfs_testlib.cli import NeoGo -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import NEOGO_EXECUTABLE from frostfs_testlib.resources.common import FROSTFS_CONTRACT, GAS_HASH, MORPH_BLOCK_TIME from frostfs_testlib.shell import Shell from frostfs_testlib.storage.dataclasses.frostfs_services import MorphChain from frostfs_testlib.utils import converting_utils, datetime_utils, wallet_utils -reporter = get_reporter() logger = logging.getLogger("NeoLogger") EMPTY_PASSWORD = "" TX_PERSIST_TIMEOUT = 15 # seconds ASSET_POWER_SIDECHAIN = 10**12 + def get_nns_contract_hash(morph_chain: MorphChain) -> str: return morph_chain.rpc_client.get_contract_state(1)["hash"] @@ -39,6 +39,7 @@ def get_contract_hash(morph_chain: MorphChain, resolve_name: str, shell: Shell) stack_data = json.loads(out.stdout.replace("\n", ""))["stack"][0]["value"] return bytes.decode(base64.b64decode(stack_data[0]["value"])) + def transaction_accepted(morph_chain: MorphChain, tx_id: str): """ This function returns True in case of accepted TX. @@ -62,7 +63,7 @@ def transaction_accepted(morph_chain: MorphChain, tx_id: str): return False -@reporter.step_deco("Get FrostFS Balance") +@reporter.step("Get FrostFS Balance") def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_password: str = ""): """ This function returns FrostFS balance for given wallet. @@ -82,7 +83,8 @@ def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_ logger.error(f"failed to get wallet balance: {out}") raise out -@reporter.step_deco("Transfer Gas") + +@reporter.step("Transfer Gas") def transfer_gas( shell: Shell, amount: int, @@ -111,16 +113,10 @@ def transfer_gas( """ wallet_from_path = wallet_from_path or morph_chain.get_wallet_path() wallet_from_password = ( - wallet_from_password - if wallet_from_password is not None - else morph_chain.get_wallet_password() - ) - address_from = address_from or wallet_utils.get_last_address_from_wallet( - wallet_from_path, wallet_from_password - ) - address_to = address_to or wallet_utils.get_last_address_from_wallet( - wallet_to_path, wallet_to_password + wallet_from_password if wallet_from_password is not None else morph_chain.get_wallet_password() ) + address_from = address_from or wallet_utils.get_last_address_from_wallet(wallet_from_path, wallet_from_password) + address_to = address_to or wallet_utils.get_last_address_from_wallet(wallet_to_path, wallet_to_password) neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE) out = neogo.nep17.transfer( @@ -141,7 +137,7 @@ def transfer_gas( time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME)) -@reporter.step_deco("Get Sidechain Balance") +@reporter.step("Get Sidechain Balance") def get_sidechain_balance(morph_chain: MorphChain, address: str): resp = morph_chain.rpc_client.get_nep17_balances(address=address) logger.info(f"Got getnep17balances response: {resp}") diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index d746337..1d7adfa 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -8,27 +8,23 @@ from typing import Optional from dateutil.parser import parse +from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsAuthmate -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC from frostfs_testlib.resources.common import CREDENTIALS_CREATE_TIMEOUT from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus from frostfs_testlib.shell import CommandOptions, InteractiveInput, Shell from frostfs_testlib.shell.interfaces import SshCredentials -from frostfs_testlib.steps.cli.container import ( - search_container_by_name, - search_nodes_with_container, -) +from frostfs_testlib.steps.cli.container import search_container_by_name, search_nodes_with_container from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.utils.cli_utils import _run_with_passwd -reporter = get_reporter() logger = logging.getLogger("NeoLogger") -@reporter.step_deco("Expected all objects are presented in the bucket") +@reporter.step("Expected all objects are presented in the bucket") def check_objects_in_bucket( s3_client: S3ClientWrapper, bucket: str, @@ -37,13 +33,9 @@ def check_objects_in_bucket( ) -> None: unexpected_objects = unexpected_objects or [] bucket_objects = s3_client.list_objects(bucket) - assert len(bucket_objects) == len( - expected_objects - ), f"Expected {len(expected_objects)} objects in the bucket" + assert len(bucket_objects) == len(expected_objects), f"Expected {len(expected_objects)} objects in the bucket" for bucket_object in expected_objects: - assert ( - bucket_object in bucket_objects - ), f"Expected object {bucket_object} in objects list {bucket_objects}" + assert bucket_object in bucket_objects, f"Expected object {bucket_object} in objects list {bucket_objects}" for bucket_object in unexpected_objects: assert ( @@ -51,21 +43,17 @@ def check_objects_in_bucket( ), f"Expected object {bucket_object} not in objects list {bucket_objects}" -@reporter.step_deco("Try to get object and got error") -def try_to_get_objects_and_expect_error( - s3_client: S3ClientWrapper, bucket: str, object_keys: list -) -> None: +@reporter.step("Try to get object and got error") +def try_to_get_objects_and_expect_error(s3_client: S3ClientWrapper, bucket: str, object_keys: list) -> None: for obj in object_keys: try: s3_client.get_object(bucket, obj) raise AssertionError(f"Object {obj} found in bucket {bucket}") except Exception as err: - assert "The specified key does not exist" in str( - err - ), f"Expected error in exception {err}" + assert "The specified key does not exist" in str(err), f"Expected error in exception {err}" -@reporter.step_deco("Set versioning status to '{status}' for bucket '{bucket}'") +@reporter.step("Set versioning status to '{status}' for bucket '{bucket}'") def set_bucket_versioning(s3_client: S3ClientWrapper, bucket: str, status: VersioningStatus): if status == VersioningStatus.UNDEFINED: return @@ -83,12 +71,8 @@ def object_key_from_file_path(full_path: str) -> str: def assert_tags( actual_tags: list, expected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None ) -> None: - expected_tags = ( - [{"Key": key, "Value": value} for key, value in expected_tags] if expected_tags else [] - ) - unexpected_tags = ( - [{"Key": key, "Value": value} for key, value in unexpected_tags] if unexpected_tags else [] - ) + expected_tags = [{"Key": key, "Value": value} for key, value in expected_tags] if expected_tags else [] + unexpected_tags = [{"Key": key, "Value": value} for key, value in unexpected_tags] if unexpected_tags else [] if expected_tags == []: assert not actual_tags, f"Expected there is no tags, got {actual_tags}" assert len(expected_tags) == len(actual_tags) @@ -98,7 +82,7 @@ def assert_tags( assert tag not in actual_tags, f"Tag {tag} should not be in {actual_tags}" -@reporter.step_deco("Expected all tags are presented in object") +@reporter.step("Expected all tags are presented in object") def check_tags_by_object( s3_client: S3ClientWrapper, bucket: str, @@ -107,12 +91,10 @@ def check_tags_by_object( unexpected_tags: Optional[list] = None, ) -> None: actual_tags = s3_client.get_object_tagging(bucket, key) - assert_tags( - expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags - ) + assert_tags(expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags) -@reporter.step_deco("Expected all tags are presented in bucket") +@reporter.step("Expected all tags are presented in bucket") def check_tags_by_bucket( s3_client: S3ClientWrapper, bucket: str, @@ -120,9 +102,7 @@ def check_tags_by_bucket( unexpected_tags: Optional[list] = None, ) -> None: actual_tags = s3_client.get_bucket_tagging(bucket) - assert_tags( - expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags - ) + assert_tags(expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags) def assert_object_lock_mode( @@ -135,25 +115,19 @@ def assert_object_lock_mode( retain_period: Optional[int] = None, ): object_dict = s3_client.get_object(bucket, file_name, full_output=True) - assert ( - object_dict.get("ObjectLockMode") == object_lock_mode - ), f"Expected Object Lock Mode is {object_lock_mode}" + assert object_dict.get("ObjectLockMode") == object_lock_mode, f"Expected Object Lock Mode is {object_lock_mode}" assert ( object_dict.get("ObjectLockLegalHoldStatus") == legal_hold_status ), f"Expected Object Lock Legal Hold Status is {legal_hold_status}" object_retain_date = object_dict.get("ObjectLockRetainUntilDate") - retain_date = ( - parse(object_retain_date) if isinstance(object_retain_date, str) else object_retain_date - ) + retain_date = parse(object_retain_date) if isinstance(object_retain_date, str) else object_retain_date if retain_until_date: assert retain_date.strftime("%Y-%m-%dT%H:%M:%S") == retain_until_date.strftime( "%Y-%m-%dT%H:%M:%S" ), f'Expected Object Lock Retain Until Date is {str(retain_until_date.strftime("%Y-%m-%dT%H:%M:%S"))}' elif retain_period: last_modify_date = object_dict.get("LastModified") - last_modify = ( - parse(last_modify_date) if isinstance(last_modify_date, str) else last_modify_date - ) + last_modify = parse(last_modify_date) if isinstance(last_modify_date, str) else last_modify_date assert ( retain_date - last_modify + timedelta(seconds=1) ).days == retain_period, f"Expected retention period is {retain_period} days" @@ -187,7 +161,7 @@ def assert_s3_acl(acl_grants: list, permitted_users: str): logger.error("FULL_CONTROL is given to All Users") -@reporter.step_deco("Init S3 Credentials") +@reporter.step("Init S3 Credentials") def init_s3_credentials( wallet: WalletInfo, shell: Shell, @@ -213,24 +187,18 @@ def init_s3_credentials( container_placement_policy=container_placement_policy, ).stdout aws_access_key_id = str( - re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group( - "aws_access_key_id" - ) + re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group("aws_access_key_id") ) aws_secret_access_key = str( - re.search( - r"secret_access_key.*:\s.(?P\w*)", issue_secret_output - ).group("aws_secret_access_key") - ) - cid = str( - re.search(r"container_id.*:\s.(?P\w*)", issue_secret_output).group( - "container_id" + re.search(r"secret_access_key.*:\s.(?P\w*)", issue_secret_output).group( + "aws_secret_access_key" ) ) + cid = str(re.search(r"container_id.*:\s.(?P\w*)", issue_secret_output).group("container_id")) return cid, aws_access_key_id, aws_secret_access_key -@reporter.step_deco("Delete bucket with all objects") +@reporter.step("Delete bucket with all objects") def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str): versioning_status = s3_client.get_bucket_versioning_status(bucket) if versioning_status == VersioningStatus.ENABLED.value: @@ -255,7 +223,7 @@ def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str): s3_client.delete_bucket(bucket) -@reporter.step_deco("Search nodes bucket") +@reporter.step("Search nodes bucket") def search_nodes_with_bucket( cluster: Cluster, bucket_name: str, @@ -264,7 +232,5 @@ def search_nodes_with_bucket( endpoint: str, ) -> list[ClusterNode]: cid = search_container_by_name(wallet=wallet, name=bucket_name, shell=shell, endpoint=endpoint) - nodes_list = search_nodes_with_container( - wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster - ) + nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster) return nodes_list diff --git a/src/frostfs_testlib/steps/session_token.py b/src/frostfs_testlib/steps/session_token.py index b82d0e2..6c87cac 100644 --- a/src/frostfs_testlib/steps/session_token.py +++ b/src/frostfs_testlib/steps/session_token.py @@ -7,8 +7,8 @@ from dataclasses import dataclass from enum import Enum from typing import Any, Optional +from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell @@ -17,7 +17,6 @@ from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.readable import HumanReadableEnum from frostfs_testlib.utils import json_utils, wallet_utils -reporter = get_reporter() logger = logging.getLogger("NeoLogger") UNRELATED_KEY = "unrelated key in the session" @@ -50,7 +49,7 @@ class Lifetime: iat: int = 0 -@reporter.step_deco("Generate Session Token") +@reporter.step("Generate Session Token") def generate_session_token( owner_wallet: WalletInfo, session_wallet: WalletInfo, @@ -72,9 +71,7 @@ def generate_session_token( file_path = os.path.join(tokens_dir, str(uuid.uuid4())) - pub_key_64 = wallet_utils.get_wallet_public_key( - session_wallet.path, session_wallet.password, "base64" - ) + pub_key_64 = wallet_utils.get_wallet_public_key(session_wallet.path, session_wallet.password, "base64") lifetime = lifetime or Lifetime() @@ -99,7 +96,7 @@ def generate_session_token( return file_path -@reporter.step_deco("Generate Session Token For Container") +@reporter.step("Generate Session Token For Container") def generate_container_session_token( owner_wallet: WalletInfo, session_wallet: WalletInfo, @@ -126,11 +123,7 @@ def generate_container_session_token( "container": { "verb": verb.value, "wildcard": cid is None, - **( - {"containerID": {"value": f"{json_utils.encode_for_json(cid)}"}} - if cid is not None - else {} - ), + **({"containerID": {"value": f"{json_utils.encode_for_json(cid)}"}} if cid is not None else {}), }, } @@ -143,7 +136,7 @@ def generate_container_session_token( ) -@reporter.step_deco("Generate Session Token For Object") +@reporter.step("Generate Session Token For Object") def generate_object_session_token( owner_wallet: WalletInfo, session_wallet: WalletInfo, @@ -185,7 +178,7 @@ def generate_object_session_token( ) -@reporter.step_deco("Get signed token for container session") +@reporter.step("Get signed token for container session") def get_container_signed_token( owner_wallet: WalletInfo, user_wallet: WalletInfo, @@ -207,7 +200,7 @@ def get_container_signed_token( return sign_session_token(shell, session_token_file, owner_wallet) -@reporter.step_deco("Get signed token for object session") +@reporter.step("Get signed token for object session") def get_object_signed_token( owner_wallet: WalletInfo, user_wallet: WalletInfo, @@ -234,7 +227,7 @@ def get_object_signed_token( return sign_session_token(shell, session_token_file, owner_wallet) -@reporter.step_deco("Create Session Token") +@reporter.step("Create Session Token") def create_session_token( shell: Shell, owner: str, @@ -265,7 +258,7 @@ def create_session_token( return session_token -@reporter.step_deco("Sign Session Token") +@reporter.step("Sign Session Token") def sign_session_token(shell: Shell, session_token_file: str, wlt: WalletInfo) -> str: """ This function signs the session token by the given wallet. @@ -279,10 +272,6 @@ def sign_session_token(shell: Shell, session_token_file: str, wlt: WalletInfo) - The path to the signed token. """ signed_token_file = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) - frostfscli = FrostfsCli( - shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG - ) - frostfscli.util.sign_session_token( - wallet=wlt.path, from_file=session_token_file, to_file=signed_token_file - ) + frostfscli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG) + frostfscli.util.sign_session_token(wallet=wlt.path, from_file=session_token_file, to_file=signed_token_file) return signed_token_file diff --git a/src/frostfs_testlib/steps/storage_object.py b/src/frostfs_testlib/steps/storage_object.py index 7776754..ce1bb94 100644 --- a/src/frostfs_testlib/steps/storage_object.py +++ b/src/frostfs_testlib/steps/storage_object.py @@ -3,7 +3,7 @@ from time import sleep import pytest -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.resources.error_patterns import OBJECT_ALREADY_REMOVED from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import delete_object, get_object @@ -12,16 +12,13 @@ from frostfs_testlib.steps.tombstone import verify_head_tombstone from frostfs_testlib.storage.cluster import Cluster from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo -reporter = get_reporter() logger = logging.getLogger("NeoLogger") CLEANUP_TIMEOUT = 10 -@reporter.step_deco("Delete Objects") -def delete_objects( - storage_objects: list[StorageObjectInfo], shell: Shell, cluster: Cluster -) -> None: +@reporter.step("Delete Objects") +def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell, cluster: Cluster) -> None: """ Deletes given storage objects. diff --git a/src/frostfs_testlib/steps/storage_policy.py b/src/frostfs_testlib/steps/storage_policy.py index eca25d2..d2202a4 100644 --- a/src/frostfs_testlib/steps/storage_policy.py +++ b/src/frostfs_testlib/steps/storage_policy.py @@ -6,7 +6,7 @@ """ import logging -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import head_object @@ -14,14 +14,11 @@ from frostfs_testlib.steps.complex_object_actions import get_last_object from frostfs_testlib.storage.cluster import StorageNode from frostfs_testlib.utils import string_utils -reporter = get_reporter() logger = logging.getLogger("NeoLogger") -@reporter.step_deco("Get Object Copies") -def get_object_copies( - complexity: str, wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] -) -> int: +@reporter.step("Get Object Copies") +def get_object_copies(complexity: str, wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: """ The function performs requests to all nodes of the container and finds out if they store a copy of the object. The procedure is @@ -45,10 +42,8 @@ def get_object_copies( ) -@reporter.step_deco("Get Simple Object Copies") -def get_simple_object_copies( - wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] -) -> int: +@reporter.step("Get Simple Object Copies") +def get_simple_object_copies(wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: """ To figure out the number of a simple object copies, only direct HEAD requests should be made to the every node of the container. @@ -66,9 +61,7 @@ def get_simple_object_copies( copies = 0 for node in nodes: try: - response = head_object( - wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True - ) + response = head_object(wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True) if response: logger.info(f"Found object {oid} on node {node}") copies += 1 @@ -78,10 +71,8 @@ def get_simple_object_copies( return copies -@reporter.step_deco("Get Complex Object Copies") -def get_complex_object_copies( - wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] -) -> int: +@reporter.step("Get Complex Object Copies") +def get_complex_object_copies(wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: """ To figure out the number of a complex object copies, we firstly need to retrieve its Last object. We consider that the number of @@ -102,10 +93,8 @@ def get_complex_object_copies( return get_simple_object_copies(wallet, cid, last_oid, shell, nodes) -@reporter.step_deco("Get Nodes With Object") -def get_nodes_with_object( - cid: str, oid: str, shell: Shell, nodes: list[StorageNode] -) -> list[StorageNode]: +@reporter.step("Get Nodes With Object") +def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> list[StorageNode]: """ The function returns list of nodes which store the given object. @@ -141,7 +130,7 @@ def get_nodes_with_object( return nodes_list -@reporter.step_deco("Get Nodes Without Object") +@reporter.step("Get Nodes Without Object") def get_nodes_without_object( wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] ) -> list[StorageNode]: @@ -160,9 +149,7 @@ def get_nodes_without_object( nodes_list = [] for node in nodes: try: - res = head_object( - wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True - ) + res = head_object(wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True) if res is None: nodes_list.append(node) except Exception as err: diff --git a/src/frostfs_testlib/steps/tombstone.py b/src/frostfs_testlib/steps/tombstone.py index a46cf77..b468c93 100644 --- a/src/frostfs_testlib/steps/tombstone.py +++ b/src/frostfs_testlib/steps/tombstone.py @@ -3,18 +3,15 @@ import logging from neo3.wallet import wallet -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import head_object -reporter = get_reporter() logger = logging.getLogger("NeoLogger") -@reporter.step_deco("Verify Head Tombstone") -def verify_head_tombstone( - wallet_path: str, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str -): +@reporter.step("Verify Head Tombstone") +def verify_head_tombstone(wallet_path: str, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str): header = head_object(wallet_path, cid, oid_ts, shell=shell, endpoint=endpoint)["header"] s_oid = header["sessionToken"]["body"]["object"]["target"]["objects"] @@ -30,12 +27,6 @@ def verify_head_tombstone( assert header["ownerID"] == addr, "Tombstone Owner ID is wrong" assert header["objectType"] == "TOMBSTONE", "Header Type isn't Tombstone" - assert ( - header["sessionToken"]["body"]["object"]["verb"] == "DELETE" - ), "Header Session Type isn't DELETE" - assert ( - header["sessionToken"]["body"]["object"]["target"]["container"] == cid - ), "Header Session ID is wrong" - assert ( - oid in header["sessionToken"]["body"]["object"]["target"]["objects"] - ), "Header Session OID is wrong" + assert header["sessionToken"]["body"]["object"]["verb"] == "DELETE", "Header Session Type isn't DELETE" + assert header["sessionToken"]["body"]["object"]["target"]["container"] == cid, "Header Session ID is wrong" + assert oid in header["sessionToken"]["body"]["object"]["target"]["objects"], "Header Session OID is wrong" diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index 02601ac..313215a 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -4,9 +4,9 @@ import re import yaml from yarl import URL +from frostfs_testlib import reporter from frostfs_testlib.hosting import Host, Hosting from frostfs_testlib.hosting.config import ServiceConfig -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.storage import get_service_registry from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml from frostfs_testlib.storage.configuration.service_configuration import ServiceConfiguration @@ -16,8 +16,6 @@ from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.service_registry import ServiceRegistry -reporter = get_reporter() - class ClusterNode: """ diff --git a/src/frostfs_testlib/storage/configuration/service_configuration.py b/src/frostfs_testlib/storage/configuration/service_configuration.py index 1aa7846..f7b3be7 100644 --- a/src/frostfs_testlib/storage/configuration/service_configuration.py +++ b/src/frostfs_testlib/storage/configuration/service_configuration.py @@ -4,13 +4,11 @@ from typing import Any import yaml -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.shell.interfaces import CommandOptions from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml from frostfs_testlib.storage.dataclasses.node_base import ServiceClass -reporter = get_reporter() - class ServiceConfiguration(ServiceConfigurationYml): def __init__(self, service: "ServiceClass") -> None: diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index 8ecada8..003bb6b 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -2,18 +2,16 @@ import copy from typing import Optional import frostfs_testlib.resources.optionals as optionals +from frostfs_testlib import reporter from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType from frostfs_testlib.load.load_report import LoadReport from frostfs_testlib.load.load_verifiers import LoadVerifier -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.test_control import run_optionally -reporter = get_reporter() - class BackgroundLoadController: k6_dir: str @@ -86,7 +84,7 @@ class BackgroundLoadController: return all_endpoints[load_type][endpoint_selection_strategy] @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Prepare load instances") + @reporter.step("Prepare load instances") def prepare(self): self.endpoints = self._get_endpoints(self.load_params.load_type, self.load_params.endpoint_selection_strategy) self.runner.prepare(self.load_params, self.cluster_nodes, self.nodes_under_load, self.k6_dir) @@ -99,7 +97,7 @@ class BackgroundLoadController: self.started = True @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Stop load") + @reporter.step("Stop load") def stop(self): self.runner.stop() @@ -108,7 +106,7 @@ class BackgroundLoadController: return self.runner.is_running @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Reset load") + @reporter.step("Reset load") def _reset_for_consequent_load(self): """This method is required if we want to run multiple loads during test run. Raise load counter by 1 and append it to load_id @@ -118,7 +116,7 @@ class BackgroundLoadController: self.load_params.set_id(f"{self.load_params.load_id}_{self.load_counter}") @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Startup load") + @reporter.step("Startup load") def startup(self): self.prepare() self.preset() @@ -129,7 +127,7 @@ class BackgroundLoadController: self.runner.preset() @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Stop and get results of load") + @reporter.step("Stop and get results of load") def teardown(self, load_report: Optional[LoadReport] = None): if not self.started: return @@ -141,7 +139,7 @@ class BackgroundLoadController: load_report.add_summaries(self.load_summaries) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Run post-load verification") + @reporter.step("Run post-load verification") def verify(self): try: load_issues = self._collect_load_issues() @@ -153,7 +151,7 @@ class BackgroundLoadController: self._reset_for_consequent_load() @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Collect load issues") + @reporter.step("Collect load issues") def _collect_load_issues(self): verifier = LoadVerifier(self.load_params) return verifier.collect_load_issues(self.load_summaries) @@ -163,7 +161,7 @@ class BackgroundLoadController: self.runner.wait_until_finish(soft_timeout) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) - @reporter.step_deco("Verify loaded objects") + @reporter.step("Verify loaded objects") def _run_verify_scenario(self) -> list[str]: self.verification_params = LoadParams( verify_clients=self.load_params.verify_clients, diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 7020671..35ab6c1 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -4,12 +4,12 @@ import time from typing import TypeVar import frostfs_testlib.resources.optionals as optionals +from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsAdm, FrostfsCli from frostfs_testlib.cli.netmap_parser import NetmapParser from frostfs_testlib.healthcheck.interfaces import Healthcheck from frostfs_testlib.hosting.interfaces import HostStatus from frostfs_testlib.plugins import load_all -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG, MORPH_BLOCK_TIME from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider @@ -21,7 +21,6 @@ from frostfs_testlib.testing import parallel from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for_success from frostfs_testlib.utils.datetime_utils import parse_time -reporter = get_reporter() logger = logging.getLogger("NeoLogger") if_up_down_helper = IfUpDownHelper() @@ -76,7 +75,7 @@ class ClusterStateController: return online_svc @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop host of node {node}") + @reporter.step("Stop host of node {node}") def stop_node_host(self, node: ClusterNode, mode: str): # Drop ssh connection for this node before shutdown provider = SshConnectionProvider() @@ -88,7 +87,7 @@ class ClusterStateController: self._wait_for_host_offline(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Shutdown whole cluster") + @reporter.step("Shutdown whole cluster") def shutdown_cluster(self, mode: str, reversed_order: bool = False): nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes @@ -105,7 +104,7 @@ class ClusterStateController: self._wait_for_host_offline(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start host of node {node}") + @reporter.step("Start host of node {node}") def start_node_host(self, node: ClusterNode, startup_healthcheck: bool = True): with reporter.step(f"Start host {node.host.config.address}"): node.host.start_host() @@ -115,7 +114,7 @@ class ClusterStateController: self.wait_startup_healthcheck() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start stopped hosts") + @reporter.step("Start stopped hosts") def start_stopped_hosts(self, reversed_order: bool = False): if not self.stopped_nodes: return @@ -133,35 +132,35 @@ class ClusterStateController: self.wait_after_storage_startup() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Detach disk {device} at {mountpoint} on node {node}") + @reporter.step("Detach disk {device} at {mountpoint} on node {node}") def detach_disk(self, node: StorageNode, device: str, mountpoint: str): disk_controller = self._get_disk_controller(node, device, mountpoint) self.detached_disks[disk_controller.id] = disk_controller disk_controller.detach() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Attach disk {device} at {mountpoint} on node {node}") + @reporter.step("Attach disk {device} at {mountpoint} on node {node}") def attach_disk(self, node: StorageNode, device: str, mountpoint: str): disk_controller = self._get_disk_controller(node, device, mountpoint) disk_controller.attach() self.detached_disks.pop(disk_controller.id, None) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Restore detached disks") + @reporter.step("Restore detached disks") def restore_disks(self): for disk_controller in self.detached_disks.values(): disk_controller.attach() self.detached_disks = {} @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop all {service_type} services") + @reporter.step("Stop all {service_type} services") def stop_services_of_type(self, service_type: type[ServiceClass], mask: bool = True): services = self.cluster.services(service_type) self.stopped_services.update(services) parallel([service.stop_service for service in services], mask=mask) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start all {service_type} services") + @reporter.step("Start all {service_type} services") def start_services_of_type(self, service_type: type[ServiceClass]): services = self.cluster.services(service_type) parallel([service.start_service for service in services]) @@ -176,24 +175,24 @@ class ClusterStateController: result = s3gate.get_metric("frostfs_s3_gw_pool_current_nodes") assert 'address="127.0.0.1' in result.stdout, "S3Gate should connect to local storage node" - @reporter.step_deco("Wait for S3Gates reconnection to local storage") + @reporter.step("Wait for S3Gates reconnection to local storage") def wait_s3gates(self): online_s3gates = self._get_online(S3Gate) if online_s3gates: parallel(self.wait_s3gate, online_s3gates) - @reporter.step_deco("Wait for cluster startup healtcheck") + @reporter.step("Wait for cluster startup healtcheck") def wait_startup_healthcheck(self): nodes = self.cluster.nodes(self._get_online(StorageNode)) parallel(self.healthcheck.startup_healthcheck, nodes) - @reporter.step_deco("Wait for storage reconnection to the system") + @reporter.step("Wait for storage reconnection to the system") def wait_after_storage_startup(self): self.wait_startup_healthcheck() self.wait_s3gates() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start all stopped services") + @reporter.step("Start all stopped services") def start_all_stopped_services(self): stopped_storages = self._get_stopped_by_type(StorageNode) parallel([service.start_service for service in self.stopped_services]) @@ -203,21 +202,21 @@ class ClusterStateController: self.wait_after_storage_startup() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop {service_type} service on {node}") + @reporter.step("Stop {service_type} service on {node}") def stop_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass], mask: bool = True): service = node.service(service_type) service.stop_service(mask) self.stopped_services.add(service) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start {service_type} service on {node}") + @reporter.step("Start {service_type} service on {node}") def start_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass]): service = node.service(service_type) service.start_service() self.stopped_services.discard(service) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start all stopped {service_type} services") + @reporter.step("Start all stopped {service_type} services") def start_stopped_services_of_type(self, service_type: type[ServiceClass]): stopped_svc = self._get_stopped_by_type(service_type) if not stopped_svc: @@ -231,7 +230,7 @@ class ClusterStateController: # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop all storage services on cluster") + @reporter.step("Stop all storage services on cluster") def stop_all_storage_services(self, reversed_order: bool = False): nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes @@ -240,7 +239,7 @@ class ClusterStateController: # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop all S3 gates on cluster") + @reporter.step("Stop all S3 gates on cluster") def stop_all_s3_gates(self, reversed_order: bool = False): nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes @@ -249,42 +248,42 @@ class ClusterStateController: # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop storage service on {node}") + @reporter.step("Stop storage service on {node}") def stop_storage_service(self, node: ClusterNode, mask: bool = True): self.stop_service_of_type(node, StorageNode, mask) # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start storage service on {node}") + @reporter.step("Start storage service on {node}") def start_storage_service(self, node: ClusterNode): self.start_service_of_type(node, StorageNode) # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start stopped storage services") + @reporter.step("Start stopped storage services") def start_stopped_storage_services(self): self.start_stopped_services_of_type(StorageNode) # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Stop s3 gate on {node}") + @reporter.step("Stop s3 gate on {node}") def stop_s3_gate(self, node: ClusterNode, mask: bool = True): self.stop_service_of_type(node, S3Gate, mask) # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start s3 gate on {node}") + @reporter.step("Start s3 gate on {node}") def start_s3_gate(self, node: ClusterNode): self.start_service_of_type(node, S3Gate) # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start stopped S3 gates") + @reporter.step("Start stopped S3 gates") def start_stopped_s3_gates(self): self.start_stopped_services_of_type(S3Gate) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Suspend {process_name} service in {node}") + @reporter.step("Suspend {process_name} service in {node}") def suspend_service(self, process_name: str, node: ClusterNode): node.host.wait_success_suspend_process(process_name) if self.suspended_services.get(process_name): @@ -293,20 +292,20 @@ class ClusterStateController: self.suspended_services[process_name] = [node] @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Resume {process_name} service in {node}") + @reporter.step("Resume {process_name} service in {node}") def resume_service(self, process_name: str, node: ClusterNode): node.host.wait_success_resume_process(process_name) if self.suspended_services.get(process_name) and node in self.suspended_services[process_name]: self.suspended_services[process_name].remove(node) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Start suspend processes services") + @reporter.step("Start suspend processes services") def resume_suspended_services(self): for process_name, list_nodes in self.suspended_services.items(): [node.host.wait_success_resume_process(process_name) for node in list_nodes] self.suspended_services = {} - @reporter.step_deco("Drop traffic to {node}, with ports - {ports}, nodes - {block_nodes}") + @reporter.step("Drop traffic to {node}, with ports - {ports}, nodes - {block_nodes}") def drop_traffic( self, mode: str, @@ -327,7 +326,7 @@ class ClusterStateController: time.sleep(wakeup_timeout) self.dropped_traffic.append(node) - @reporter.step_deco("Ping traffic") + @reporter.step("Ping traffic") def ping_traffic( self, node: ClusterNode, @@ -343,7 +342,7 @@ class ClusterStateController: return False return True - @reporter.step_deco("Start traffic to {node}") + @reporter.step("Start traffic to {node}") def restore_traffic( self, mode: str, @@ -358,12 +357,12 @@ class ClusterStateController: case "nodes": IpTablesHelper.restore_input_traffic_to_node(node=node) - @reporter.step_deco("Restore blocked nodes") + @reporter.step("Restore blocked nodes") def restore_all_traffic(self): parallel(self._restore_traffic_to_node, self.dropped_traffic) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step_deco("Hard reboot host {node} via magic SysRq option") + @reporter.step("Hard reboot host {node} via magic SysRq option") def panic_reboot_host(self, node: ClusterNode, wait_for_return: bool = True, startup_healthcheck: bool = True): shell = node.host.get_shell() shell.exec('sudo sh -c "echo 1 > /proc/sys/kernel/sysrq"') @@ -383,14 +382,14 @@ class ClusterStateController: if startup_healthcheck: self.wait_startup_healthcheck() - @reporter.step_deco("Down {interface} to {nodes}") + @reporter.step("Down {interface} to {nodes}") def down_interface(self, nodes: list[ClusterNode], interface: str): for node in nodes: if_up_down_helper.down_interface(node=node, interface=interface) assert if_up_down_helper.check_state(node=node, interface=interface) == "DOWN" self.nodes_with_modified_interface.append(node) - @reporter.step_deco("Up {interface} to {nodes}") + @reporter.step("Up {interface} to {nodes}") def up_interface(self, nodes: list[ClusterNode], interface: str): for node in nodes: if_up_down_helper.up_interface(node=node, interface=interface) @@ -398,17 +397,17 @@ class ClusterStateController: if node in self.nodes_with_modified_interface: self.nodes_with_modified_interface.remove(node) - @reporter.step_deco("Restore interface") + @reporter.step("Restore interface") def restore_interfaces(self): for node in self.nodes_with_modified_interface: if_up_down_helper.up_all_interface(node) - @reporter.step_deco("Get node time") + @reporter.step("Get node time") def get_node_date(self, node: ClusterNode) -> datetime: shell = node.host.get_shell() return datetime.datetime.strptime(shell.exec("hwclock -r").stdout.strip(), "%Y-%m-%d %H:%M:%S.%f%z") - @reporter.step_deco("Set node time to {in_date}") + @reporter.step("Set node time to {in_date}") def change_node_date(self, node: ClusterNode, in_date: datetime) -> None: shell = node.host.get_shell() shell.exec(f"date -s @{time.mktime(in_date.timetuple())}") @@ -417,7 +416,7 @@ class ClusterStateController: with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"): assert (self.get_node_date(node) - in_date) < datetime.timedelta(minutes=1) - @reporter.step_deco(f"Restore time") + @reporter.step(f"Restore time") def restore_node_date(self, node: ClusterNode) -> None: shell = node.host.get_shell() now_time = datetime.datetime.now(datetime.timezone.utc) @@ -425,14 +424,14 @@ class ClusterStateController: shell.exec(f"date -s @{time.mktime(now_time.timetuple())}") shell.exec("hwclock --systohc") - @reporter.step_deco("Change the synchronizer status to {status}") + @reporter.step("Change the synchronizer status to {status}") def set_sync_date_all_nodes(self, status: str): if status == "active": parallel(self._enable_date_synchronizer, self.cluster.cluster_nodes) return parallel(self._disable_date_synchronizer, self.cluster.cluster_nodes) - @reporter.step_deco("Set MaintenanceModeAllowed - {status}") + @reporter.step("Set MaintenanceModeAllowed - {status}") def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None: frostfs_adm = FrostfsAdm( shell=cluster_node.host.get_shell(), @@ -441,7 +440,7 @@ class ClusterStateController: ) frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}") - @reporter.step_deco("Set mode node to {status}") + @reporter.step("Set mode node to {status}") def set_mode_node(self, cluster_node: ClusterNode, wallet: str, status: str, await_tick: bool = True) -> None: rpc_endpoint = cluster_node.storage_node.get_rpc_endpoint() control_endpoint = cluster_node.service(StorageNode).get_control_endpoint() @@ -465,8 +464,7 @@ class ClusterStateController: self.check_node_status(status=status, wallet=wallet, cluster_node=cluster_node) - @wait_for_success(80, 8) - @reporter.step_deco("Check status node, status - {status}") + @wait_for_success(80, 8, title="Wait for storage status become {status}") def check_node_status(self, status: str, wallet: str, cluster_node: ClusterNode): frostfs_cli = FrostfsCli( shell=self.shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG @@ -537,13 +535,13 @@ class ClusterStateController: interfaces.append(ip) return interfaces - @reporter.step_deco("Ping node") + @reporter.step("Ping node") def _ping_host(self, node: ClusterNode): options = CommandOptions(check=False) return self.shell.exec(f"ping {node.host.config.address} -c 1", options).return_code @retry(max_attempts=60, sleep_interval=5, expected_result=HostStatus.ONLINE) - @reporter.step_deco("Waiting for {node} to go online") + @reporter.step("Waiting for {node} to go online") def _wait_for_host_online(self, node: ClusterNode): try: ping_result = self._ping_host(node) @@ -555,7 +553,7 @@ class ClusterStateController: return HostStatus.OFFLINE @retry(max_attempts=60, sleep_interval=5, expected_result=HostStatus.OFFLINE) - @reporter.step_deco("Waiting for {node} to go offline") + @reporter.step("Waiting for {node} to go offline") def _wait_for_host_offline(self, node: ClusterNode): try: ping_result = self._ping_host(node) diff --git a/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py b/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py index 078d483..66f72d6 100644 --- a/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py +++ b/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py @@ -1,13 +1,11 @@ from typing import Any -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController, StateManager from frostfs_testlib.storage.dataclasses.node_base import ServiceClass from frostfs_testlib.testing import parallel -reporter = get_reporter() - class ConfigStateManager(StateManager): def __init__(self, cluster_state_controller: ClusterStateController) -> None: @@ -15,7 +13,7 @@ class ConfigStateManager(StateManager): self.services_with_changed_config: set[tuple[ClusterNode, ServiceClass]] = set() self.cluster = self.csc.cluster - @reporter.step_deco("Change configuration for {service_type} on all nodes") + @reporter.step("Change configuration for {service_type} on all nodes") def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any]): services = self.cluster.services(service_type) nodes = self.cluster.nodes(services) @@ -25,7 +23,7 @@ class ConfigStateManager(StateManager): parallel([node.config(service_type).set for node in nodes], values=values) self.csc.start_services_of_type(service_type) - @reporter.step_deco("Change configuration for {service_type} on {node}") + @reporter.step("Change configuration for {service_type} on {node}") def set_on_node(self, node: ClusterNode, service_type: type[ServiceClass], values: dict[str, Any]): self.services_with_changed_config.add((node, service_type)) @@ -33,7 +31,7 @@ class ConfigStateManager(StateManager): node.config(service_type).set(values) self.csc.start_service_of_type(node, service_type) - @reporter.step_deco("Revert all configuration changes") + @reporter.step("Revert all configuration changes") def revert_all(self): if not self.services_with_changed_config: return @@ -44,7 +42,7 @@ class ConfigStateManager(StateManager): self.csc.start_all_stopped_services() # TODO: parallel can't have multiple parallel_items :( - @reporter.step_deco("Revert all configuration {node_and_service}") + @reporter.step("Revert all configuration {node_and_service}") def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass]): node, service_type = node_and_service self.csc.stop_service_of_type(node, service_type) diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 4b9ffc2..ace0214 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -4,16 +4,14 @@ from typing import Optional, TypedDict, TypeVar import yaml +from frostfs_testlib import reporter from frostfs_testlib.hosting.config import ServiceConfig from frostfs_testlib.hosting.interfaces import Host -from frostfs_testlib.reporter import get_reporter from frostfs_testlib.shell.interfaces import CommandResult from frostfs_testlib.storage.constants import ConfigAttributes from frostfs_testlib.testing.readable import HumanReadableABC from frostfs_testlib.utils import wallet_utils -reporter = get_reporter() - @dataclass class NodeBase(HumanReadableABC): diff --git a/src/frostfs_testlib/testing/cluster_test_base.py b/src/frostfs_testlib/testing/cluster_test_base.py index 0676813..49c6afd 100644 --- a/src/frostfs_testlib/testing/cluster_test_base.py +++ b/src/frostfs_testlib/testing/cluster_test_base.py @@ -1,7 +1,7 @@ import time from typing import Optional -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.shell import Shell from frostfs_testlib.steps import epoch @@ -9,15 +9,13 @@ from frostfs_testlib.storage.cluster import Cluster from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode from frostfs_testlib.utils import datetime_utils -reporter = get_reporter() - # To skip adding every mandatory singleton dependency to EACH test function class ClusterTestBase: shell: Shell cluster: Cluster - @reporter.step_deco("Tick {epochs_to_tick} epochs, wait {wait_block} block") + @reporter.step("Tick {epochs_to_tick} epochs, wait {wait_block} block") def tick_epochs( self, epochs_to_tick: int, diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index e1dfcd1..41d52ab 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -19,10 +19,9 @@ from typing import Dict, List, TypedDict, Union import pexpect -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetmapInfo -reporter = get_reporter() logger = logging.getLogger("NeoLogger") COLOR_GREEN = "\033[92m" COLOR_OFF = "\033[0m" @@ -65,9 +64,7 @@ def _configure_aws_cli(cmd: str, key_id: str, access_key: str, out_format: str = return cmd.decode() -def _attach_allure_log( - cmd: str, output: str, return_code: int, start_time: datetime, end_time: datetime -) -> None: +def _attach_allure_log(cmd: str, output: str, return_code: int, start_time: datetime, end_time: datetime) -> None: command_attachment = ( f"COMMAND: '{cmd}'\n" f"OUTPUT:\n {output}\n" diff --git a/src/frostfs_testlib/utils/env_utils.py b/src/frostfs_testlib/utils/env_utils.py index 6b4fb40..3fdebe1 100644 --- a/src/frostfs_testlib/utils/env_utils.py +++ b/src/frostfs_testlib/utils/env_utils.py @@ -1,13 +1,12 @@ import logging import re -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter -reporter = get_reporter() logger = logging.getLogger("NeoLogger") -@reporter.step_deco("Read environment.properties") +@reporter.step("Read environment.properties") def read_env_properties(file_path: str) -> dict: with open(file_path, "r") as file: raw_content = file.read() @@ -23,7 +22,7 @@ def read_env_properties(file_path: str) -> dict: return env_properties -@reporter.step_deco("Update data in environment.properties") +@reporter.step("Update data in environment.properties") def save_env_properties(file_path: str, env_data: dict) -> None: with open(file_path, "a+") as env_file: for env, env_value in env_data.items(): diff --git a/src/frostfs_testlib/utils/failover_utils.py b/src/frostfs_testlib/utils/failover_utils.py index 507168e..5c4d52f 100644 --- a/src/frostfs_testlib/utils/failover_utils.py +++ b/src/frostfs_testlib/utils/failover_utils.py @@ -3,7 +3,7 @@ from dataclasses import dataclass from time import sleep from typing import Optional -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.resources.common import SERVICE_MAX_STARTUP_TIME from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import neo_go_dump_keys @@ -15,12 +15,10 @@ from frostfs_testlib.storage.dataclasses.node_base import ServiceClass from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils.datetime_utils import parse_time -reporter = get_reporter() - logger = logging.getLogger("NeoLogger") -@reporter.step_deco("Check and return status of given service") +@reporter.step("Check and return status of given service") def service_status(service: str, shell: Shell) -> str: return shell.exec(f"sudo systemctl is-active {service}").stdout.rstrip() @@ -73,14 +71,14 @@ class TopCommand: ) -@reporter.step_deco("Run `top` command with specified PID") +@reporter.step("Run `top` command with specified PID") def service_status_top(service: str, shell: Shell) -> TopCommand: pid = service_pid(service, shell) output = shell.exec(f"sudo top -b -n 1 -p {pid}").stdout return TopCommand.from_stdout(output, pid) -@reporter.step_deco("Restart service n times with sleep") +@reporter.step("Restart service n times with sleep") def multiple_restart( service_type: type[NodeBase], node: ClusterNode, @@ -95,8 +93,7 @@ def multiple_restart( sleep(sleep_interval) -@reporter.step_deco("Get status of list of services and check expected status") -@wait_for_success(60, 5) +@wait_for_success(60, 5, title="Wait for services become {expected_status} on node {cluster_node}") def check_services_status(cluster_node: ClusterNode, service_list: list[ServiceClass], expected_status: str): cmd = "" for service in service_list: @@ -112,8 +109,7 @@ def check_services_status(cluster_node: ClusterNode, service_list: list[ServiceC ), f"Requested status={expected_status} not found in requested services={service_list}, list of statuses={result}" -@reporter.step_deco("Wait for active status of passed service") -@wait_for_success(60, 5) +@wait_for_success(60, 5, title="Wait for {service} become active") def wait_service_in_desired_state(service: str, shell: Shell, expected_status: Optional[str] = "active"): real_status = service_status(service=service, shell=shell) assert ( @@ -121,8 +117,7 @@ def wait_service_in_desired_state(service: str, shell: Shell, expected_status: O ), f"Service {service}: expected status= {expected_status}, real status {real_status}" -@reporter.step_deco("Run healthcheck against passed service") -@wait_for_success(parse_time(SERVICE_MAX_STARTUP_TIME), 1) +@wait_for_success(parse_time(SERVICE_MAX_STARTUP_TIME), 1, title="Wait for {service_type} passes healtcheck on {node}") def service_type_healthcheck( service_type: type[NodeBase], node: ClusterNode, @@ -133,26 +128,25 @@ def service_type_healthcheck( ), f"Healthcheck failed for {service.get_service_systemctl_name()}, IP={node.host_ip}" -@reporter.step_deco("Kill by process name") +@reporter.step("Kill by process name") def kill_by_service_name(service_type: type[NodeBase], node: ClusterNode): service_systemctl_name = node.service(service_type).get_service_systemctl_name() pid = service_pid(service_systemctl_name, node.host.get_shell()) node.host.get_shell().exec(f"sudo kill -9 {pid}") -@reporter.step_deco("Service {service} suspend") +@reporter.step("Suspend {service}") def suspend_service(shell: Shell, service: str): shell.exec(f"sudo kill -STOP {service_pid(service, shell)}") -@reporter.step_deco("Service {service} resume") +@reporter.step("Resume {service}") def resume_service(shell: Shell, service: str): shell.exec(f"sudo kill -CONT {service_pid(service, shell)}") -@reporter.step_deco("Retrieve service's pid") # retry mechanism cause when the task has been started recently '0' PID could be returned -@wait_for_success(10, 1) +@wait_for_success(10, 1, title="Get {service} pid") def service_pid(service: str, shell: Shell) -> int: output = shell.exec(f"systemctl show --property MainPID {service}").stdout.rstrip() splitted = output.split("=") @@ -161,7 +155,7 @@ def service_pid(service: str, shell: Shell) -> int: return PID -@reporter.step_deco("Wrapper for neo-go dump keys command") +@reporter.step("Wrapper for neo-go dump keys command") def dump_keys(shell: Shell, node: ClusterNode) -> dict: host = node.host service_config = host.get_service_config(node.service(MorphChain).name) @@ -169,7 +163,7 @@ def dump_keys(shell: Shell, node: ClusterNode) -> dict: return neo_go_dump_keys(shell=shell, wallet=wallet) -@reporter.step_deco("Wait for object replication") +@reporter.step("Wait for object replication") def wait_object_replication( cid: str, oid: str, diff --git a/src/frostfs_testlib/utils/file_keeper.py b/src/frostfs_testlib/utils/file_keeper.py index ad6836b..a5670cc 100644 --- a/src/frostfs_testlib/utils/file_keeper.py +++ b/src/frostfs_testlib/utils/file_keeper.py @@ -1,17 +1,15 @@ from concurrent.futures import ThreadPoolExecutor -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.storage.dataclasses.node_base import NodeBase -reporter = get_reporter() - class FileKeeper: """This class is responsible to make backup copy of modified file and restore when required (mostly after the test)""" files_to_restore: dict[NodeBase, list[str]] = {} - @reporter.step_deco("Adding {file_to_restore} from node {node} to restore list") + @reporter.step("Adding {file_to_restore} from node {node} to restore list") def add(self, node: NodeBase, file_to_restore: str): if node in self.files_to_restore and file_to_restore in self.files_to_restore[node]: # Already added @@ -26,7 +24,7 @@ class FileKeeper: shell = node.host.get_shell() shell.exec(f"cp {file_to_restore} {file_to_restore}.bak") - @reporter.step_deco("Restore files") + @reporter.step("Restore files") def restore_files(self): nodes = self.files_to_restore.keys() if not nodes: @@ -41,7 +39,7 @@ class FileKeeper: # Iterate through results for exception check if any pass - @reporter.step_deco("Restore files on node {node}") + @reporter.step("Restore files on node {node}") def _restore_files_on_node(self, node: NodeBase): shell = node.host.get_shell() for file_to_restore in self.files_to_restore[node]: diff --git a/src/frostfs_testlib/utils/file_utils.py b/src/frostfs_testlib/utils/file_utils.py index a41665e..d238106 100644 --- a/src/frostfs_testlib/utils/file_utils.py +++ b/src/frostfs_testlib/utils/file_utils.py @@ -4,10 +4,9 @@ import os import uuid from typing import Any, Optional -from frostfs_testlib.reporter import get_reporter +from frostfs_testlib import reporter from frostfs_testlib.resources.common import ASSETS_DIR -reporter = get_reporter() logger = logging.getLogger("NeoLogger") @@ -61,7 +60,7 @@ def generate_file_with_content( return file_path -@reporter.step_deco("Get File Hash") +@reporter.step("Get File Hash") def get_file_hash(file_path: str, len: Optional[int] = None, offset: Optional[int] = None) -> str: """Generates hash for the specified file. @@ -88,7 +87,7 @@ def get_file_hash(file_path: str, len: Optional[int] = None, offset: Optional[in return file_hash.hexdigest() -@reporter.step_deco("Concatenation set of files to one file") +@reporter.step("Concatenation set of files to one file") def concat_files(file_paths: list, resulting_file_path: Optional[str] = None) -> str: """Concatenates several files into a single file. From 17c1a4f14bfe14260ad8a9ad4027d1d0e589bc59 Mon Sep 17 00:00:00 2001 From: mkadilov Date: Fri, 1 Dec 2023 15:54:28 +0300 Subject: [PATCH 179/363] [#136] Added exclude_filter Added exclude_filter Signed-off-by: Mikhail Kadilov --- src/frostfs_testlib/hosting/docker_host.py | 5 +++++ src/frostfs_testlib/hosting/interfaces.py | 1 + 2 files changed, 6 insertions(+) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 0e4ea11..17146c0 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -235,6 +235,7 @@ class DockerHost(Host): since: Optional[datetime] = None, until: Optional[datetime] = None, unit: Optional[str] = None, + exclude_filter: Optional[str] = None, ) -> str: client = self._get_docker_client() filtered_logs = "" @@ -248,6 +249,10 @@ class DockerHost(Host): matches = re.findall(filter_regex, filtered_logs, re.IGNORECASE + re.MULTILINE) found = list(matches) + + if exclude_filter: + found = [match for match in found if match != exclude_filter] + if found: filtered_logs += f"{container_name}:\n{os.linesep.join(found)}" diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 84b7911..9dd6f3c 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -287,6 +287,7 @@ class Host(ABC): since: Optional[datetime] = None, until: Optional[datetime] = None, unit: Optional[str] = None, + exclude_filter: Optional[str] = None, ) -> str: """Get logs from host filtered by regex. From e65fc359fe3c3e1019b6fb98610cd612e93fc26f Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Fri, 1 Dec 2023 14:15:12 +0300 Subject: [PATCH 180/363] [#134] Add method uptime service Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/storage/dataclasses/node_base.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index ace0214..4fc7dea 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -1,8 +1,10 @@ from abc import abstractmethod from dataclasses import dataclass +from datetime import datetime, timezone from typing import Optional, TypedDict, TypeVar import yaml +from dateutil import parser from frostfs_testlib import reporter from frostfs_testlib.hosting.config import ServiceConfig @@ -170,6 +172,15 @@ class NodeBase(HumanReadableABC): def _get_service_config(self) -> ServiceConfig: return self.host.get_service_config(self.name) + def get_service_uptime(self, service: str) -> datetime: + result = self.host.get_shell().exec( + f"systemctl show {service} --property ActiveEnterTimestamp | cut -d '=' -f 2" + ) + start_time = parser.parse(result.stdout.strip()) + current_time = datetime.now(tz=timezone.utc) + active_time = current_time - start_time + return active_time + ServiceClass = TypeVar("ServiceClass", bound=NodeBase) From 81dfc723dae06b4ef4a2d87a1f0805651e950966 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 4 Dec 2023 17:59:29 +0300 Subject: [PATCH 181/363] [#137] Ability to control remote processes id and reports for load Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/k6.py | 33 ++++--- src/frostfs_testlib/load/load_report.py | 59 ++++++------- .../processes/remote_process.py | 86 ++++++++++++++++--- .../controllers/background_load_controller.py | 34 +++++++- .../controllers/cluster_state_controller.py | 10 ++- 5 files changed, 159 insertions(+), 63 deletions(-) diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index 92da8e0..2ce7c75 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -34,7 +34,6 @@ class LoadResults: class K6: _k6_process: RemoteProcess - _start_time: datetime def __init__( self, @@ -61,6 +60,18 @@ class K6: self._k6_dir: str = k6_dir + command = ( + f"{self._k6_dir}/k6 run {self._generate_env_variables()} " + f"{self._k6_dir}/scenarios/{self.load_params.scenario.value}.js" + ) + user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None + process_id = ( + self.load_params.load_id + if self.load_params.scenario != LoadScenario.VERIFY + else f"{self.load_params.load_id}_verify" + ) + self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, user, process_id) + @property def process_dir(self) -> str: return self._k6_process.process_dir @@ -111,15 +122,15 @@ class K6: reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "K6 ENV variables") return " ".join([f"-e {param}='{value}'" for param, value in env_vars.items() if value is not None]) + def get_start_time(self) -> datetime: + return datetime.fromtimestamp(self._k6_process.start_time()) + + def get_end_time(self) -> datetime: + return datetime.fromtimestamp(self._k6_process.end_time()) + def start(self) -> None: with reporter.step(f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}"): - self._start_time = int(datetime.utcnow().timestamp()) - command = ( - f"{self._k6_dir}/k6 run {self._generate_env_variables()} " - f"{self._k6_dir}/scenarios/{self.load_params.scenario.value}.js" - ) - user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None - self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, user) + self._k6_process.start() def wait_until_finished(self, soft_timeout: int = 0) -> None: with reporter.step(f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}"): @@ -128,8 +139,10 @@ class K6: else: timeout = self.load_params.load_time or 0 + start_time = int(self.get_start_time().timestamp()) + current_time = int(datetime.utcnow().timestamp()) - working_time = current_time - self._start_time + working_time = current_time - start_time remaining_time = timeout - working_time setup_teardown_time = ( @@ -146,7 +159,7 @@ class K6: original_timeout = timeout timeouts = { - "K6 start time": self._start_time, + "K6 start time": start_time, "Current time": current_time, "K6 working time": working_time, "Remaining time for load": remaining_time, diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py index ad3a26d..105d852 100644 --- a/src/frostfs_testlib/load/load_report.py +++ b/src/frostfs_testlib/load/load_report.py @@ -17,11 +17,15 @@ class LoadReport: self.start_time: Optional[datetime] = None self.end_time: Optional[datetime] = None - def set_start_time(self): - self.start_time = datetime.utcnow() + def set_start_time(self, time: datetime = None): + if time is None: + time = datetime.utcnow() + self.start_time = time - def set_end_time(self): - self.end_time = datetime.utcnow() + def set_end_time(self, time: datetime = None): + if time is None: + time = datetime.utcnow() + self.end_time = time def add_summaries(self, load_summaries: dict): self.load_summaries_list.append(load_summaries) @@ -31,6 +35,7 @@ class LoadReport: def get_report_html(self): report_sections = [ + [self.load_params, self._get_load_id_section_html], [self.load_test, self._get_load_params_section_html], [self.load_summaries_list, self._get_totals_section_html], [self.end_time, self._get_test_time_html], @@ -44,9 +49,7 @@ class LoadReport: return html def _get_load_params_section_html(self) -> str: - params: str = yaml.safe_dump( - [self.load_test], sort_keys=False, indent=2, explicit_start=True - ) + params: str = yaml.safe_dump([self.load_test], sort_keys=False, indent=2, explicit_start=True) params = params.replace("\n", "
").replace(" ", " ") section_html = f"""

Scenario params

@@ -55,8 +58,17 @@ class LoadReport: return section_html + def _get_load_id_section_html(self) -> str: + section_html = f"""

Load ID: {self.load_params.load_id}

+
""" + + return section_html + def _get_test_time_html(self) -> str: - html = f"""

Scenario duration in UTC time (from agent)

+ if not self.start_time or not self.end_time: + return "" + + html = f"""

Scenario duration

{self.start_time} - {self.end_time}

""" @@ -97,7 +109,7 @@ class LoadReport: LoadScenario.gRPC_CAR: "open model", LoadScenario.S3_CAR: "open model", LoadScenario.LOCAL: "local fill", - LoadScenario.S3_LOCAL: "local fill" + LoadScenario.S3_LOCAL: "local fill", } return model_map[self.load_params.scenario] @@ -124,10 +136,7 @@ class LoadReport: total_errors: int = 0 for node_key, errors in errors.items(): total_errors += errors - if ( - self.load_params.k6_process_allocation_strategy - == K6ProcessAllocationStrategy.PER_ENDPOINT - ): + if self.load_params.k6_process_allocation_strategy == K6ProcessAllocationStrategy.PER_ENDPOINT: per_node_errors_html += self._row(f"At {node_key}", errors) latency_html = "" @@ -139,9 +148,7 @@ class LoadReport: for param_name, param_val in latency_dict.items(): latency_values += f"{param_name}={param_val:.2f}ms " - latency_html += self._row( - f"{operation_type} latency {node_key.split(':')[0]}", latency_values - ) + latency_html += self._row(f"{operation_type} latency {node_key.split(':')[0]}", latency_values) object_size, object_size_unit = calc_unit(self.load_params.object_size, 1) duration = self._seconds_to_formatted_duration(self.load_params.load_time) @@ -180,9 +187,7 @@ class LoadReport: write_latency = {} write_errors = {} requested_write_rate = self.load_params.write_rate - requested_write_rate_str = ( - f"{requested_write_rate}op/sec" if requested_write_rate else "" - ) + requested_write_rate_str = f"{requested_write_rate}op/sec" if requested_write_rate else "" read_operations = 0 read_op_sec = 0 @@ -197,20 +202,12 @@ class LoadReport: delete_latency = {} delete_errors = {} requested_delete_rate = self.load_params.delete_rate - requested_delete_rate_str = ( - f"{requested_delete_rate}op/sec" if requested_delete_rate else "" - ) + requested_delete_rate_str = f"{requested_delete_rate}op/sec" if requested_delete_rate else "" if self.load_params.scenario in [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]: - delete_vus = max( - self.load_params.preallocated_deleters or 0, self.load_params.max_deleters or 0 - ) - write_vus = max( - self.load_params.preallocated_writers or 0, self.load_params.max_writers or 0 - ) - read_vus = max( - self.load_params.preallocated_readers or 0, self.load_params.max_readers or 0 - ) + delete_vus = max(self.load_params.preallocated_deleters or 0, self.load_params.max_deleters or 0) + write_vus = max(self.load_params.preallocated_writers or 0, self.load_params.max_writers or 0) + read_vus = max(self.load_params.preallocated_readers or 0, self.load_params.max_readers or 0) else: write_vus = self.load_params.writers read_vus = self.load_params.readers diff --git a/src/frostfs_testlib/processes/remote_process.py b/src/frostfs_testlib/processes/remote_process.py index 1252b97..5624940 100644 --- a/src/frostfs_testlib/processes/remote_process.py +++ b/src/frostfs_testlib/processes/remote_process.py @@ -15,21 +15,33 @@ from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions class RemoteProcess: - def __init__(self, cmd: str, process_dir: str, shell: Shell, cmd_inspector: Optional[CommandInspector]): + def __init__( + self, cmd: str, process_dir: str, shell: Shell, cmd_inspector: Optional[CommandInspector], proc_id: str + ): self.process_dir = process_dir self.cmd = cmd self.stdout_last_line_number = 0 self.stderr_last_line_number = 0 self.pid: Optional[str] = None self.proc_rc: Optional[int] = None + self.proc_start_time: Optional[int] = None + self.proc_end_time: Optional[int] = None self.saved_stdout: Optional[str] = None self.saved_stderr: Optional[str] = None self.shell = shell + self.proc_id: str = proc_id self.cmd_inspectors: list[CommandInspector] = [cmd_inspector] if cmd_inspector else [] @classmethod @reporter.step("Create remote process") - def create(cls, command: str, shell: Shell, working_dir: str = "/tmp", user: Optional[str] = None) -> RemoteProcess: + def create( + cls, + command: str, + shell: Shell, + working_dir: str = "/tmp", + user: Optional[str] = None, + proc_id: Optional[str] = None, + ) -> RemoteProcess: """ Create a process on a remote host. @@ -40,6 +52,7 @@ class RemoteProcess: stderr: contains script errors stdout: contains script output user: user on behalf whom command will be executed + proc_id: process string identificator Args: shell: Shell instance @@ -49,19 +62,31 @@ class RemoteProcess: Returns: RemoteProcess instance for further examination """ + if proc_id is None: + proc_id = f"{uuid.uuid4()}" + cmd_inspector = SuInspector(user) if user else None remote_process = cls( cmd=command, - process_dir=os.path.join(working_dir, f"proc_{uuid.uuid4()}"), + process_dir=os.path.join(working_dir, f"proc_{proc_id}"), shell=shell, cmd_inspector=cmd_inspector, + proc_id=proc_id, ) - remote_process._create_process_dir() - remote_process._generate_command_script(command) - remote_process._start_process() - remote_process.pid = remote_process._get_pid() + return remote_process + @reporter.step("Start remote process") + def start(self): + """ + Starts a process on a remote host. + """ + + self._create_process_dir() + self._generate_command_script() + self._start_process() + self.pid = self._get_pid() + @reporter.step("Get process stdout") def stdout(self, full: bool = False) -> str: """ @@ -130,17 +155,48 @@ class RemoteProcess: if self.proc_rc is not None: return self.proc_rc + result = self._cat_proc_file("rc") + if not result: + return None + + self.proc_rc = int(result) + return self.proc_rc + + @reporter.step("Get process start time") + def start_time(self) -> Optional[int]: + if self.proc_start_time is not None: + return self.proc_start_time + + result = self._cat_proc_file("start_time") + if not result: + return None + + self.proc_start_time = int(result) + return self.proc_start_time + + @reporter.step("Get process end time") + def end_time(self) -> Optional[int]: + if self.proc_end_time is not None: + return self.proc_end_time + + result = self._cat_proc_file("end_time") + if not result: + return None + + self.proc_end_time = int(result) + return self.proc_end_time + + def _cat_proc_file(self, file: str) -> Optional[str]: terminal = self.shell.exec( - f"cat {self.process_dir}/rc", + f"cat {self.process_dir}/{file}", CommandOptions(check=False, extra_inspectors=self.cmd_inspectors, no_log=True), ) if "No such file or directory" in terminal.stderr: return None elif terminal.stderr or terminal.return_code != 0: - raise AssertionError(f"cat process rc was not successful: {terminal.stderr}") + raise AssertionError(f"cat process {file} was not successful: {terminal.stderr}") - self.proc_rc = int(terminal.stdout) - return self.proc_rc + return terminal.stdout @reporter.step("Check if process is running") def running(self) -> bool: @@ -195,17 +251,19 @@ class RemoteProcess: return terminal.stdout.strip() @reporter.step("Generate command script") - def _generate_command_script(self, command: str) -> None: - command = command.replace('"', '\\"').replace("\\", "\\\\") + def _generate_command_script(self) -> None: + command = self.cmd.replace('"', '\\"').replace("\\", "\\\\") script = ( f"#!/bin/bash\n" f"cd {self.process_dir}\n" + f"date +%s > {self.process_dir}/start_time\n" f"{command} &\n" f"pid=\$!\n" f"cd {self.process_dir}\n" f"echo \$pid > {self.process_dir}/pid\n" f"wait \$pid\n" - f"echo $? > {self.process_dir}/rc" + f"echo $? > {self.process_dir}/rc\n" + f"date +%s > {self.process_dir}/end_time\n" ) self.shell.exec( diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index 003bb6b..5f2ed99 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -1,4 +1,5 @@ import copy +from datetime import datetime from typing import Optional import frostfs_testlib.resources.optionals as optionals @@ -10,6 +11,7 @@ from frostfs_testlib.load.load_verifiers import LoadVerifier from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +from frostfs_testlib.testing.parallel import parallel from frostfs_testlib.testing.test_control import run_optionally @@ -26,6 +28,7 @@ class BackgroundLoadController: endpoints: list[str] runner: ScenarioRunner started: bool + load_reporters: list[LoadReport] def __init__( self, @@ -45,6 +48,7 @@ class BackgroundLoadController: self.loaders_wallet = loaders_wallet self.runner = runner self.started = False + self.load_reporters = [] if load_params.endpoint_selection_strategy is None: raise RuntimeError("endpoint_selection_strategy should not be None") @@ -83,12 +87,20 @@ class BackgroundLoadController: return all_endpoints[load_type][endpoint_selection_strategy] + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) + @reporter.step("Init k6 instances") + def init_k6(self): + self.endpoints = self._get_endpoints(self.load_params.load_type, self.load_params.endpoint_selection_strategy) + self.runner.init_k6_instances(self.load_params, self.endpoints, self.k6_dir) + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @reporter.step("Prepare load instances") def prepare(self): - self.endpoints = self._get_endpoints(self.load_params.load_type, self.load_params.endpoint_selection_strategy) self.runner.prepare(self.load_params, self.cluster_nodes, self.nodes_under_load, self.k6_dir) - self.runner.init_k6_instances(self.load_params, self.endpoints, self.k6_dir) + self.init_k6() + + def append_reporter(self, load_report: LoadReport): + self.load_reporters.append(load_report) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) def start(self): @@ -128,16 +140,30 @@ class BackgroundLoadController: @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @reporter.step("Stop and get results of load") - def teardown(self, load_report: Optional[LoadReport] = None): + def teardown(self): if not self.started: return self.stop() self.load_summaries = self._get_results() self.started = False - if load_report: + + start_time = min(self._get_start_times()) + end_time = max(self._get_end_times()) + + for load_report in self.load_reporters: + load_report.set_start_time(start_time) + load_report.set_end_time(end_time) load_report.add_summaries(self.load_summaries) + def _get_start_times(self) -> list[datetime]: + futures = parallel([k6.get_start_time for k6 in self.runner.get_k6_instances()]) + return [future.result() for future in futures] + + def _get_end_times(self) -> list[datetime]: + futures = parallel([k6.get_end_time for k6 in self.runner.get_k6_instances()]) + return [future.result() for future in futures] + @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @reporter.step("Run post-load verification") def verify(self): diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 35ab6c1..301b636 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -540,8 +540,9 @@ class ClusterStateController: options = CommandOptions(check=False) return self.shell.exec(f"ping {node.host.config.address} -c 1", options).return_code - @retry(max_attempts=60, sleep_interval=5, expected_result=HostStatus.ONLINE) - @reporter.step("Waiting for {node} to go online") + @retry( + max_attempts=60, sleep_interval=10, expected_result=HostStatus.ONLINE, title="Waiting for {node} to go online" + ) def _wait_for_host_online(self, node: ClusterNode): try: ping_result = self._ping_host(node) @@ -552,8 +553,9 @@ class ClusterStateController: logger.warning(f"Host ping fails with error {err}") return HostStatus.OFFLINE - @retry(max_attempts=60, sleep_interval=5, expected_result=HostStatus.OFFLINE) - @reporter.step("Waiting for {node} to go offline") + @retry( + max_attempts=60, sleep_interval=10, expected_result=HostStatus.OFFLINE, title="Waiting for {node} to go offline" + ) def _wait_for_host_offline(self, node: ClusterNode): try: ping_result = self._ping_host(node) From ae566b413b04c747b27c63a044368eb1c219db92 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Thu, 30 Nov 2023 13:50:57 +0300 Subject: [PATCH 182/363] [#139] Use readers for init time calculation Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/load_config.py | 78 +++++++++++-------------- tests/test_load_config.py | 32 +++++++--- 2 files changed, 57 insertions(+), 53 deletions(-) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 735d8ec..c1c98fe 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -40,11 +40,18 @@ all_load_scenarios = [ LoadScenario.gRPC_CAR, LoadScenario.LOCAL, LoadScenario.S3_MULTIPART, - LoadScenario.S3_LOCAL + LoadScenario.S3_LOCAL, ] all_scenarios = all_load_scenarios.copy() + [LoadScenario.VERIFY] -constant_vus_scenarios = [LoadScenario.gRPC, LoadScenario.S3, LoadScenario.HTTP, LoadScenario.LOCAL, LoadScenario.S3_MULTIPART, LoadScenario.S3_LOCAL] +constant_vus_scenarios = [ + LoadScenario.gRPC, + LoadScenario.S3, + LoadScenario.HTTP, + LoadScenario.LOCAL, + LoadScenario.S3_MULTIPART, + LoadScenario.S3_LOCAL, +] constant_arrival_rate_scenarios = [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR] grpc_preset_scenarios = [ @@ -124,13 +131,9 @@ class Preset: # ------ GRPC ------ # Amount of containers which should be created - containers_count: Optional[int] = metadata_field( - grpc_preset_scenarios, "containers", None, False - ) + containers_count: Optional[int] = metadata_field(grpc_preset_scenarios, "containers", None, False) # Container placement policy for containers for gRPC - container_placement_policy: Optional[str] = metadata_field( - grpc_preset_scenarios, "policy", None, False - ) + container_placement_policy: Optional[str] = metadata_field(grpc_preset_scenarios, "policy", None, False) # ------ S3 ------ # Amount of buckets which should be created @@ -180,7 +183,14 @@ class LoadParams: awscli_url: Optional[str] = None # No ssl verification flag no_verify_ssl: Optional[bool] = metadata_field( - [LoadScenario.S3, LoadScenario.S3_CAR, LoadScenario.S3_MULTIPART, LoadScenario.S3_LOCAL, LoadScenario.VERIFY, LoadScenario.HTTP], + [ + LoadScenario.S3, + LoadScenario.S3_CAR, + LoadScenario.S3_MULTIPART, + LoadScenario.S3_LOCAL, + LoadScenario.VERIFY, + LoadScenario.HTTP, + ], "no-verify-ssl", "NO_VERIFY_SSL", False, @@ -198,9 +208,7 @@ class LoadParams: # Specifies the minimum duration of every single execution (i.e. iteration). # Any iterations that are shorter than this value will cause that VU to # sleep for the remainder of the time until the specified minimum duration is reached. - min_iteration_duration: Optional[str] = metadata_field( - all_load_scenarios, None, "K6_MIN_ITERATION_DURATION", False - ) + min_iteration_duration: Optional[str] = metadata_field(all_load_scenarios, None, "K6_MIN_ITERATION_DURATION", False) # Prepare/cut objects locally on client before sending prepare_locally: Optional[bool] = metadata_field( [LoadScenario.gRPC, LoadScenario.gRPC_CAR], None, "PREPARE_LOCALLY", False @@ -225,46 +233,34 @@ class LoadParams: # ------- CONSTANT ARRIVAL RATE SCENARIO PARAMS ------- # Number of iterations to start during each timeUnit period for write. - write_rate: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "WRITE_RATE", True, True - ) + write_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "WRITE_RATE", True, True) # Number of iterations to start during each timeUnit period for read. - read_rate: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "READ_RATE", True, True - ) + read_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "READ_RATE", True, True) # Number of iterations to start during each timeUnit period for delete. - delete_rate: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "DELETE_RATE", True, True - ) + delete_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "DELETE_RATE", True, True) # Amount of preAllocatedVUs for write operations. preallocated_writers: Optional[int] = metadata_field( constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True, True ) # Amount of maxVUs for write operations. - max_writers: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "MAX_WRITERS", False, True - ) + max_writers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_WRITERS", False, True) # Amount of preAllocatedVUs for read operations. preallocated_readers: Optional[int] = metadata_field( constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True, True ) # Amount of maxVUs for read operations. - max_readers: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "MAX_READERS", False, True - ) + max_readers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_READERS", False, True) # Amount of preAllocatedVUs for read operations. preallocated_deleters: Optional[int] = metadata_field( constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True, True ) # Amount of maxVUs for delete operations. - max_deleters: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "MAX_DELETERS", False, True - ) + max_deleters: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_DELETERS", False, True) # Multipart # Number of parts to upload in parallel @@ -272,20 +268,18 @@ class LoadParams: [LoadScenario.S3_MULTIPART], None, "WRITERS_MULTIPART", False, True ) # part size must be greater than (5 MB) - write_object_part_size: Optional[int] = metadata_field([LoadScenario.S3_MULTIPART], None, "WRITE_OBJ_PART_SIZE", False) + write_object_part_size: Optional[int] = metadata_field( + [LoadScenario.S3_MULTIPART], None, "WRITE_OBJ_PART_SIZE", False + ) # Period of time to apply the rate value. - time_unit: Optional[str] = metadata_field( - constant_arrival_rate_scenarios, None, "TIME_UNIT", False - ) + time_unit: Optional[str] = metadata_field(constant_arrival_rate_scenarios, None, "TIME_UNIT", False) # ------- VERIFY SCENARIO PARAMS ------- # Maximum verification time for k6 to verify objects. Default is BACKGROUND_LOAD_MAX_VERIFY_TIME (3600). verify_time: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "TIME_LIMIT", False) # Amount of Verification VU. - verify_clients: Optional[int] = metadata_field( - [LoadScenario.VERIFY], None, "CLIENTS", True, False - ) + verify_clients: Optional[int] = metadata_field([LoadScenario.VERIFY], None, "CLIENTS", True, False) # ------- LOCAL SCENARIO PARAMS ------- # Config file location (filled automatically) @@ -341,10 +335,8 @@ class LoadParams: return math.ceil(self._get_total_vus() * self.vu_init_time) def _get_total_vus(self) -> int: - vu_fields = ["writers", "preallocated_writers"] - data_fields = [ - getattr(self, field.name) or 0 for field in fields(self) if field.name in vu_fields - ] + vu_fields = ["writers", "preallocated_writers", "readers", "preallocated_readers"] + data_fields = [getattr(self, field.name) or 0 for field in fields(self) if field.name in vu_fields] return sum(data_fields) def _get_applicable_fields(self): @@ -375,9 +367,7 @@ class LoadParams: ] for field in data_fields: - actual_field_type = ( - get_args(field.type)[0] if len(get_args(field.type)) else get_args(field.type) - ) + actual_field_type = get_args(field.type)[0] if len(get_args(field.type)) else get_args(field.type) if is_dataclass(actual_field_type) and getattr(instance, field.name): fields_with_data += LoadParams._get_meta_fields(getattr(instance, field.name)) diff --git a/tests/test_load_config.py b/tests/test_load_config.py index 256a04b..926399b 100644 --- a/tests/test_load_config.py +++ b/tests/test_load_config.py @@ -12,6 +12,7 @@ from frostfs_testlib.load.load_config import ( ReadFrom, ) from frostfs_testlib.load.runners import DefaultRunner +from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.controllers.background_load_controller import BackgroundLoadController from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode @@ -53,6 +54,25 @@ class TestLoadConfig: assert repr(load_params) == expected assert f"{load_params}" == expected + def test_load_params_init_time(self): + load_params = LoadParams(load_type=LoadType.S3) + vus = 100 + + load_params.vu_init_time = BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME + # Used in time calculations + load_params.readers = vus + load_params.writers = vus + load_params.preallocated_readers = vus + load_params.preallocated_writers = vus + + # Not used in time calculations + load_params.deleters = vus + load_params.preallocated_deleters = vus + + expected = vus * 4 * BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME + actual = load_params.get_init_time() + assert actual == expected, "Incorrect time for get_init_time()" + def test_load_params_initially_have_all_values_none(self): load_params = LoadParams(load_type=LoadType.S3) self._check_all_values_none(load_params, ["load_type", "scenario"]) @@ -285,9 +305,7 @@ class TestLoadConfig: self._check_preset_params(load_params, expected_preset_args) self._check_env_vars(load_params, expected_env_vars) - @pytest.mark.parametrize( - "load_params, load_type", [(LoadScenario.VERIFY, LoadType.S3)], indirect=True - ) + @pytest.mark.parametrize("load_params, load_type", [(LoadScenario.VERIFY, LoadType.S3)], indirect=True) def test_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams): expected_env_vars = { "CLIENTS": 14, @@ -299,9 +317,7 @@ class TestLoadConfig: self._check_env_vars(load_params, expected_env_vars) - @pytest.mark.parametrize( - "load_params, load_type", [(LoadScenario.VERIFY, LoadType.gRPC)], indirect=True - ) + @pytest.mark.parametrize("load_params, load_type", [(LoadScenario.VERIFY, LoadType.gRPC)], indirect=True) def test_argument_parsing_for_grpc_verify_scenario(self, load_params: LoadParams): expected_env_vars = { "CLIENTS": 14, @@ -339,9 +355,7 @@ class TestLoadConfig: self._check_preset_params(load_params, expected_preset_args) self._check_env_vars(load_params, expected_env_vars) - @pytest.mark.parametrize( - "load_params, set_empty", [(LoadScenario.gRPC_CAR, True)], indirect=True - ) + @pytest.mark.parametrize("load_params, set_empty", [(LoadScenario.gRPC_CAR, True)], indirect=True) def test_empty_argument_parsing_for_grpc_car_scenario(self, load_params: LoadParams): expected_preset_args = [ "--size '0'", From 247d2fbab7d3cfb475da32d282651a351028eb4b Mon Sep 17 00:00:00 2001 From: anurindm Date: Tue, 21 Nov 2023 10:20:01 +0300 Subject: [PATCH 183/363] Added logger config path attribute to NodeBase class Signed-off-by: Dmitry Anurin --- src/frostfs_testlib/storage/constants.py | 1 + src/frostfs_testlib/storage/dataclasses/node_base.py | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 9ad24eb..b1b7995 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -6,6 +6,7 @@ class ConfigAttributes: CONFIG_DIR = "service_config_dir" CONFIG_PATH = "config_path" SHARD_CONFIG_PATH = "shard_config_path" + LOGGER_CONFIG_PATH = "logger_config_path" LOCAL_WALLET_PATH = "local_wallet_path" LOCAL_WALLET_CONFIG = "local_config_path" ENDPOINT_DATA_0 = "endpoint_data0" diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 4fc7dea..bf36665 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -120,6 +120,14 @@ class NodeBase(HumanReadableABC): ConfigAttributes.WALLET_CONFIG, ) + def get_logger_config_path(self) -> str: + """ + Returns config path for logger located on remote host + """ + config_attributes = self.host.get_service_config(self.name) + return self._get_attribute( + ConfigAttributes.LOGGER_CONFIG_PATH) if ConfigAttributes.LOGGER_CONFIG_PATH in config_attributes.attributes else None + @property def config_dir(self) -> str: return self._get_attribute(ConfigAttributes.CONFIG_DIR) From 54d26b226c4c2099d9f894281d5901f8579b1915 Mon Sep 17 00:00:00 2001 From: mkadilov Date: Mon, 11 Dec 2023 14:20:06 +0300 Subject: [PATCH 184/363] [#140] Executive command changed Added exception of error 'Too many requests' in log analyzer Signed-off-by: Mikhail Kadilov m.kadilov@yadro.com --- src/frostfs_testlib/hosting/docker_host.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 17146c0..05cd4b2 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -247,11 +247,10 @@ class DockerHost(Host): logger.info(f"Got exception while dumping logs of '{container_name}': {exc}") continue + if exclude_filter: + filtered_logs = filtered_logs.replace(exclude_filter, "") matches = re.findall(filter_regex, filtered_logs, re.IGNORECASE + re.MULTILINE) found = list(matches) - - if exclude_filter: - found = [match for match in found if match != exclude_filter] if found: filtered_logs += f"{container_name}:\n{os.linesep.join(found)}" From f1264bd47331837a30b8e56dc54612dcfbe3b534 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 12 Dec 2023 09:38:38 +0300 Subject: [PATCH 185/363] [#143] Change network utils Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/hosting/interfaces.py | 29 ++++++-- .../shell/command_inspectors.py | 2 +- src/frostfs_testlib/steps/network.py | 72 ++----------------- .../controllers/cluster_state_controller.py | 65 +++++------------ 4 files changed, 48 insertions(+), 120 deletions(-) diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 9dd6f3c..daea6eb 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -5,6 +5,7 @@ from typing import Optional from frostfs_testlib.hosting.config import CLIConfig, HostConfig, ServiceConfig from frostfs_testlib.shell.interfaces import Shell from frostfs_testlib.testing.readable import HumanReadableEnum +from frostfs_testlib.testing.test_control import retry class HostStatus(HumanReadableEnum): @@ -25,9 +26,7 @@ class Host(ABC): def __init__(self, config: HostConfig) -> None: self._config = config - self._service_config_by_name = { - service_config.name: service_config for service_config in config.services - } + self._service_config_by_name = {service_config.name: service_config for service_config in config.services} self._cli_config_by_name = {cli_config.name: cli_config for cli_config in config.clis} @property @@ -323,9 +322,7 @@ class Host(ABC): """ @abstractmethod - def wait_for_service_to_be_in_state( - self, systemd_service_name: str, expected_state: str, timeout: int - ) -> None: + def wait_for_service_to_be_in_state(self, systemd_service_name: str, expected_state: str, timeout: int) -> None: """ Waites for service to be in specified state. @@ -335,3 +332,23 @@ class Host(ABC): timeout: Seconds to wait """ + + def down_interface(self, interface: str) -> None: + shell = self.get_shell() + shell.exec(f"ip link set {interface} down") + + def up_interface(self, interface: str) -> None: + shell = self.get_shell() + shell.exec(f"ip link set {interface} up") + + def check_state(self, interface: str) -> str: + shell = self.get_shell() + return shell.exec(f"ip link show {interface} | sed -z 's/.*state \(.*\) mode .*/\\1/'").stdout.strip() + + @retry(max_attempts=5, sleep_interval=5, expected_result="UP") + def check_state_up(self, interface: str) -> str: + return self.check_state(interface=interface) + + @retry(max_attempts=5, sleep_interval=5, expected_result="DOWN") + def check_state_down(self, interface: str) -> str: + return self.check_state(interface=interface) diff --git a/src/frostfs_testlib/shell/command_inspectors.py b/src/frostfs_testlib/shell/command_inspectors.py index 8fe2f34..0003017 100644 --- a/src/frostfs_testlib/shell/command_inspectors.py +++ b/src/frostfs_testlib/shell/command_inspectors.py @@ -9,7 +9,7 @@ class SudoInspector(CommandInspector): def inspect(self, original_command: str, command: str) -> str: if not command.startswith("sudo"): - return f"sudo {command}" + return f"sudo -i {command}" return command diff --git a/src/frostfs_testlib/steps/network.py b/src/frostfs_testlib/steps/network.py index 64e235a..efaaf5a 100644 --- a/src/frostfs_testlib/steps/network.py +++ b/src/frostfs_testlib/steps/network.py @@ -1,77 +1,19 @@ -from frostfs_testlib import reporter +from frostfs_testlib.shell import CommandOptions from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.testing.test_control import retry -class IpTablesHelper: - @staticmethod - def drop_input_traffic_to_port(node: ClusterNode, ports: list[str]) -> None: - shell = node.host.get_shell() - for port in ports: - shell.exec(f"iptables -A INPUT -p tcp --dport {port} -j DROP") - +class IpHelper: @staticmethod def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[str]) -> None: shell = node.host.get_shell() for ip in block_ip: - shell.exec(f"iptables -A INPUT -s {ip} -j DROP") - - @staticmethod - def restore_input_traffic_to_port(node: ClusterNode) -> None: - shell = node.host.get_shell() - ports = shell.exec("iptables -L --numeric | grep DROP | awk '{print $7}'").stdout.strip().split("\n") - if ports[0] == "": - return - for port in ports: - shell.exec(f"iptables -D INPUT -p tcp --dport {port.split(':')[-1]} -j DROP") + shell.exec(f"ip route add blackhole {ip}") @staticmethod def restore_input_traffic_to_node(node: ClusterNode) -> None: shell = node.host.get_shell() - unlock_ip = shell.exec("iptables -L --numeric | grep DROP | awk '{print $4}'").stdout.strip().split("\n") - if unlock_ip[0] == "": + unlock_ip = shell.exec("ip route list | grep blackhole", CommandOptions(check=False)) + if unlock_ip.return_code != 0: return - for ip in unlock_ip: - shell.exec(f"iptables -D INPUT -s {ip} -j DROP") - - -# TODO Move class to HOST -class IfUpDownHelper: - @reporter.step("Down {interface} to {node}") - def down_interface(self, node: ClusterNode, interface: str) -> None: - shell = node.host.get_shell() - shell.exec(f"ifdown {interface}") - - @reporter.step("Up {interface} to {node}") - def up_interface(self, node: ClusterNode, interface: str) -> None: - shell = node.host.get_shell() - shell.exec(f"ifup {interface}") - - @reporter.step("Up all interface to {node}") - def up_all_interface(self, node: ClusterNode) -> None: - shell = node.host.get_shell() - interfaces = list(node.host.config.interfaces.keys()) - shell.exec("ifup -av") - for name_interface in interfaces: - self.check_state_up(node, name_interface) - - @reporter.step("Down all interface to {node}") - def down_all_interface(self, node: ClusterNode) -> None: - shell = node.host.get_shell() - interfaces = list(node.host.config.interfaces.keys()) - shell.exec("ifdown -av") - for name_interface in interfaces: - self.check_state_down(node, name_interface) - - @reporter.step("Check {node} to {interface}") - def check_state(self, node: ClusterNode, interface: str) -> str: - shell = node.host.get_shell() - return shell.exec(f"ip link show {interface} | sed -z 's/.*state \(.*\) mode .*/\\1/'").stdout.strip() - - @retry(max_attempts=5, sleep_interval=5, expected_result="UP") - def check_state_up(self, node: ClusterNode, interface: str) -> str: - return self.check_state(node=node, interface=interface) - - @retry(max_attempts=5, sleep_interval=5, expected_result="DOWN") - def check_state_down(self, node: ClusterNode, interface: str) -> str: - return self.check_state(node=node, interface=interface) + for ip in unlock_ip.stdout.strip().split("\n"): + shell.exec(f"ip route del blackhole {ip.split(' ')[1]}") diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 301b636..290503c 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -13,7 +13,7 @@ from frostfs_testlib.plugins import load_all from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG, MORPH_BLOCK_TIME from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider -from frostfs_testlib.steps.network import IfUpDownHelper, IpTablesHelper +from frostfs_testlib.steps.network import IpHelper from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass @@ -22,7 +22,6 @@ from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for from frostfs_testlib.utils.datetime_utils import parse_time logger = logging.getLogger("NeoLogger") -if_up_down_helper = IfUpDownHelper() class StateManager: @@ -305,57 +304,25 @@ class ClusterStateController: [node.host.wait_success_resume_process(process_name) for node in list_nodes] self.suspended_services = {} - @reporter.step("Drop traffic to {node}, with ports - {ports}, nodes - {block_nodes}") + @reporter.step("Drop traffic to {node}, nodes - {block_nodes}") def drop_traffic( self, - mode: str, node: ClusterNode, wakeup_timeout: int, - ports: list[str] = None, + name_interface: str, block_nodes: list[ClusterNode] = None, ) -> None: - allowed_modes = ["ports", "nodes"] - assert mode in allowed_modes - - match mode: - case "ports": - IpTablesHelper.drop_input_traffic_to_port(node, ports) - case "nodes": - list_ip = self._parse_intefaces(block_nodes) - IpTablesHelper.drop_input_traffic_to_node(node, list_ip) + list_ip = self._parse_interfaces(block_nodes, name_interface) + IpHelper.drop_input_traffic_to_node(node, list_ip) time.sleep(wakeup_timeout) self.dropped_traffic.append(node) - @reporter.step("Ping traffic") - def ping_traffic( - self, - node: ClusterNode, - nodes_list: list[ClusterNode], - expect_result: int, - ) -> bool: - shell = node.host.get_shell() - options = CommandOptions(check=False) - ips = self._parse_intefaces(nodes_list) - for ip in ips: - code = shell.exec(f"ping {ip} -c 1", options).return_code - if code != expect_result: - return False - return True - @reporter.step("Start traffic to {node}") def restore_traffic( self, - mode: str, node: ClusterNode, ) -> None: - allowed_modes = ["ports", "nodes"] - assert mode in allowed_modes - - match mode: - case "ports": - IpTablesHelper.restore_input_traffic_to_port(node=node) - case "nodes": - IpTablesHelper.restore_input_traffic_to_node(node=node) + IpHelper.restore_input_traffic_to_node(node=node) @reporter.step("Restore blocked nodes") def restore_all_traffic(self): @@ -385,22 +352,25 @@ class ClusterStateController: @reporter.step("Down {interface} to {nodes}") def down_interface(self, nodes: list[ClusterNode], interface: str): for node in nodes: - if_up_down_helper.down_interface(node=node, interface=interface) - assert if_up_down_helper.check_state(node=node, interface=interface) == "DOWN" + node.host.down_interface(interface=interface) + assert node.host.check_state(interface=interface) == "DOWN" self.nodes_with_modified_interface.append(node) @reporter.step("Up {interface} to {nodes}") def up_interface(self, nodes: list[ClusterNode], interface: str): for node in nodes: - if_up_down_helper.up_interface(node=node, interface=interface) - assert if_up_down_helper.check_state(node=node, interface=interface) == "UP" + node.host.up_interface(interface=interface) + assert node.host.check_state(interface=interface) == "UP" if node in self.nodes_with_modified_interface: self.nodes_with_modified_interface.remove(node) @reporter.step("Restore interface") def restore_interfaces(self): for node in self.nodes_with_modified_interface: - if_up_down_helper.up_all_interface(node) + dict_interfaces = node.host.config.interfaces.keys() + for name_interface in dict_interfaces: + if "mgmt" not in name_interface: + node.host.up_interface(interface=name_interface) @reporter.step("Get node time") def get_node_date(self, node: ClusterNode) -> datetime: @@ -523,15 +493,14 @@ class ClusterStateController: return disk_controller def _restore_traffic_to_node(self, node): - IpTablesHelper.restore_input_traffic_to_port(node) - IpTablesHelper.restore_input_traffic_to_node(node) + IpHelper.restore_input_traffic_to_node(node) - def _parse_intefaces(self, nodes: list[ClusterNode]): + def _parse_interfaces(self, nodes: list[ClusterNode], name_interface: str): interfaces = [] for node in nodes: dict_interfaces = node.host.config.interfaces for type, ip in dict_interfaces.items(): - if "mgmt" not in type: + if name_interface in type: interfaces.append(ip) return interfaces From be964e731f8b0ff81873ea7d39684bebee2db371 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 13 Dec 2023 13:59:37 +0300 Subject: [PATCH 186/363] [#146] Prettify verifier messages for error rates Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/load_verifiers.py | 40 +++++++++++++--------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/src/frostfs_testlib/load/load_verifiers.py b/src/frostfs_testlib/load/load_verifiers.py index fe39862..5ca92dc 100644 --- a/src/frostfs_testlib/load/load_verifiers.py +++ b/src/frostfs_testlib/load/load_verifiers.py @@ -44,22 +44,20 @@ class LoadVerifier: if deleters and not delete_operations: issues.append(f"No any delete operation was performed") - if write_operations and writers and write_errors / write_operations * 100 > self.load_params.error_threshold: - issues.append( - f"Write error rate is greater than threshold: {write_errors / write_operations * 100} > {self.load_params.error_threshold}" - ) - if read_operations and readers and read_errors / read_operations * 100 > self.load_params.error_threshold: - issues.append( - f"Read error rate is greater than threshold: {read_errors / read_operations * 100} > {self.load_params.error_threshold}" - ) - if ( - delete_operations - and deleters - and delete_errors / delete_operations * 100 > self.load_params.error_threshold - ): - issues.append( - f"Delete error rate is greater than threshold: {delete_errors / delete_operations * 100} > {self.load_params.error_threshold}" - ) + error_rate = self._get_error_rate(writers, write_operations, write_errors) + if error_rate > self.load_params.error_threshold: + rate_str = self._get_rate_str(error_rate) + issues.append(f"Write errors exceeded threshold: {rate_str} > {self.load_params.error_threshold}%") + + error_rate = self._get_error_rate(readers, read_operations, read_errors) + if error_rate > self.load_params.error_threshold: + rate_str = self._get_rate_str(error_rate) + issues.append(f"Read errors exceeded threshold: {rate_str} > {self.load_params.error_threshold}%") + + error_rate = self._get_error_rate(deleters, delete_operations, delete_errors) + if error_rate > self.load_params.error_threshold: + rate_str = self._get_rate_str(error_rate) + issues.append(f"Delete errors exceeded threshold: {rate_str} > {self.load_params.error_threshold}%") return issues @@ -76,6 +74,16 @@ class LoadVerifier: ) return verify_issues + def _get_error_rate(self, vus: int, operations: int, errors: int) -> float: + if not operations or not vus: + return 0 + + error_rate = errors / operations * 100 + return error_rate + + def _get_rate_str(self, rate: float, minimal: float = 0.01) -> str: + return f"{rate:.2f}%" if rate >= minimal else f"~{minimal}%" + def _collect_verify_issues_on_process(self, label, load_summary, verification_summary) -> list[str]: issues = [] From 89522b607c650fd85d19bf0d720adf9ef0f6d052 Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Mon, 11 Dec 2023 16:53:14 +0300 Subject: [PATCH 187/363] update percent of filling --- src/frostfs_testlib/load/k6.py | 29 ++++++++++++++++++++++++- src/frostfs_testlib/load/load_config.py | 2 ++ src/frostfs_testlib/load/runners.py | 4 +++- 3 files changed, 33 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index 2ce7c75..38167d2 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -72,6 +72,19 @@ class K6: ) self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, user, process_id) + def _get_fill_percents(self): + fill_percents = self.shell.exec("df -H --output=source,pcent,target | grep frostfs").stdout.split("\n") + return [line.split() for line in fill_percents][:-1] + + def check_fill_percent(self): + fill_percents = self._get_fill_percents() + percent_mean = 0 + for line in fill_percents: + percent_mean += float(line[1].split('%')[0]) + percent_mean = percent_mean / len(fill_percents) + logger.info(f"{self.loader.ip} mean fill percent is {percent_mean}") + return percent_mean >= self.load_params.fill_percent + @property def process_dir(self) -> str: return self._k6_process.process_dir @@ -132,7 +145,7 @@ class K6: with reporter.step(f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}"): self._k6_process.start() - def wait_until_finished(self, soft_timeout: int = 0) -> None: + def wait_until_finished(self, event, soft_timeout: int = 0) -> None: with reporter.step(f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}"): if self.load_params.scenario == LoadScenario.VERIFY: timeout = self.load_params.verify_time or 0 @@ -175,9 +188,23 @@ class K6: wait_interval = min_wait_interval if self._k6_process is None: assert "No k6 instances were executed" + while timeout > 0: + if not self.load_params.fill_percent is None: + with reporter.step(f"Check the percentage of filling of all data disks on the node"): + if self.check_fill_percent(): + logger.info(f"Stopping load on because disks is filled more then {self.load_params.fill_percent}%") + event.set() + self.stop() + return + + if event.is_set(): + self.stop() + return + if not self._k6_process.running(): return + remaining_time_hours = f"{timeout//3600}h" if timeout // 3600 != 0 else "" remaining_time_minutes = f"{timeout//60%60}m" if timeout // 60 % 60 != 0 else "" logger.info( diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index c1c98fe..df46521 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -195,6 +195,8 @@ class LoadParams: "NO_VERIFY_SSL", False, ) + # Percentage of filling of all data disks on all nodes + fill_percent: Optional[float] = None # ------- COMMON SCENARIO PARAMS ------- # Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value. diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index f5284d8..dd6d50e 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -30,6 +30,7 @@ from frostfs_testlib.testing import parallel, run_optionally from frostfs_testlib.testing.test_control import retry from frostfs_testlib.utils import datetime_utils from frostfs_testlib.utils.file_keeper import FileKeeper +from threading import Event class RunnerBase(ScenarioRunner): @@ -41,7 +42,8 @@ class RunnerBase(ScenarioRunner): @reporter.step("Wait until load finish") def wait_until_finish(self, soft_timeout: int = 0): - parallel([k6.wait_until_finished for k6 in self.k6_instances], soft_timeout=soft_timeout) + event = Event() + parallel([k6.wait_until_finished for k6 in self.k6_instances], event=event, soft_timeout=soft_timeout) @property def is_running(self): From 02f3ef6b4077c01441b7b9d063c442aaa319cc6f Mon Sep 17 00:00:00 2001 From: "d.anurin" Date: Thu, 14 Dec 2023 12:53:51 +0300 Subject: [PATCH 188/363] [#147] Provide custom environment to ssh connection Signed-off-by: Dmitry Anurin --- src/frostfs_testlib/hosting/config.py | 1 + src/frostfs_testlib/shell/command_inspectors.py | 2 +- src/frostfs_testlib/shell/ssh_shell.py | 7 +++++-- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/hosting/config.py b/src/frostfs_testlib/hosting/config.py index 88fe3e7..4ab66d7 100644 --- a/src/frostfs_testlib/hosting/config.py +++ b/src/frostfs_testlib/hosting/config.py @@ -67,6 +67,7 @@ class HostConfig: clis: list[CLIConfig] = field(default_factory=list) attributes: dict[str, str] = field(default_factory=dict) interfaces: dict[str, str] = field(default_factory=dict) + environment: dict[str, str] = field(default_factory=dict) def __post_init__(self) -> None: self.services = [ServiceConfig(**service) for service in self.services or []] diff --git a/src/frostfs_testlib/shell/command_inspectors.py b/src/frostfs_testlib/shell/command_inspectors.py index 0003017..8fe2f34 100644 --- a/src/frostfs_testlib/shell/command_inspectors.py +++ b/src/frostfs_testlib/shell/command_inspectors.py @@ -9,7 +9,7 @@ class SudoInspector(CommandInspector): def inspect(self, original_command: str, command: str) -> str: if not command.startswith("sudo"): - return f"sudo -i {command}" + return f"sudo {command}" return command diff --git a/src/frostfs_testlib/shell/ssh_shell.py b/src/frostfs_testlib/shell/ssh_shell.py index a7e6e1d..e718b4d 100644 --- a/src/frostfs_testlib/shell/ssh_shell.py +++ b/src/frostfs_testlib/shell/ssh_shell.py @@ -185,6 +185,7 @@ class SSHShell(Shell): private_key_passphrase: Optional[str] = None, port: str = "22", command_inspectors: Optional[list[CommandInspector]] = None, + custom_environment: Optional[dict] = None ) -> None: super().__init__() self.connection_provider = SshConnectionProvider() @@ -196,6 +197,8 @@ class SSHShell(Shell): self.command_inspectors = command_inspectors or [] + self.environment = custom_environment + @property def _connection(self): return self.connection_provider.provide(self.host, self.port) @@ -224,7 +227,7 @@ class SSHShell(Shell): @log_command def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult: - stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout, get_pty=True) + stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout, get_pty=True, environment=self.environment) for interactive_input in options.interactive_inputs: input = interactive_input.input if not input.endswith("\n"): @@ -251,7 +254,7 @@ class SSHShell(Shell): @log_command def _exec_non_interactive(self, command: str, options: CommandOptions) -> CommandResult: try: - stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout) + stdin, stdout, stderr = self._connection.exec_command(command, timeout=options.timeout, environment=self.environment) if options.close_stdin: stdin.close() From 3d63772f4a9dd0aec84220f77336da6157fc1666 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 13 Dec 2023 18:50:06 +0300 Subject: [PATCH 189/363] [#148] Add support for custom registry during read operations Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/load_config.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index df46521..3ea66b8 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -205,8 +205,12 @@ class LoadParams: object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE", False) # For read operations, controls from which set get objects to read read_from: Optional[ReadFrom] = None + # For read operations done from REGISTRY, controls delay which object should live before it will be used for read operation + read_age: Optional[int] = metadata_field(all_load_scenarios, None, "READ_AGE", False) # Output registry K6 file. Filled automatically. registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE", False) + # In case if we want to use custom registry file left from another load run + custom_registry: Optional[str] = None # Specifies the minimum duration of every single execution (i.e. iteration). # Any iterations that are shorter than this value will cause that VU to # sleep for the remainder of the time until the specified minimum duration is reached. @@ -294,6 +298,11 @@ class LoadParams: if self.read_from == ReadFrom.REGISTRY: self.registry_file = os.path.join(self.working_dir, f"{load_id}_registry.bolt") + + # For now it's okay to have it this way + if self.custom_registry is not None: + self.registry_file = self.custom_registry + if self.read_from == ReadFrom.PRESET: self.registry_file = None From 8e739adea5d299e5dea87b5584429a738f079d85 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Fri, 15 Dec 2023 13:13:09 +0300 Subject: [PATCH 190/363] [#150] Increased the status waiting timeout Signed-off-by: Dmitriy Zayakin --- .../storage/controllers/cluster_state_controller.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 290503c..f51be78 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -476,12 +476,12 @@ class ClusterStateController: def _enable_date_synchronizer(self, cluster_node: ClusterNode): shell = cluster_node.host.get_shell() shell.exec("timedatectl set-ntp true") - cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "active", 5) + cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "active", 15) def _disable_date_synchronizer(self, cluster_node: ClusterNode): shell = cluster_node.host.get_shell() shell.exec("timedatectl set-ntp false") - cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "inactive", 5) + cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "inactive", 15) def _get_disk_controller(self, node: StorageNode, device: str, mountpoint: str) -> DiskController: disk_controller_id = DiskController.get_id(node, device) From 663c14470981bdd1445f182dd0d6dc00c2d83662 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Fri, 17 Nov 2023 16:36:24 +0300 Subject: [PATCH 191/363] Search container by name using HTTP requests --- src/frostfs_testlib/steps/cli/container.py | 14 ++++++++------ src/frostfs_testlib/steps/s3/s3_helper.py | 6 ++++-- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index be96138..b3afd88 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -1,6 +1,7 @@ import json import logging import re +import requests from dataclasses import dataclass from time import sleep from typing import Optional, Union @@ -344,12 +345,13 @@ def _parse_cid(output: str) -> str: @reporter.step("Search container by name") -def search_container_by_name(wallet: str, name: str, shell: Shell, endpoint: str): - list_cids = list_containers(wallet, shell, endpoint) - for cid in list_cids: - cont_info = get_container(wallet, cid, shell, endpoint, True) - if cont_info.get("attributes", {}).get("Name", None) == name: - return cid +def search_container_by_name(name: str, node: ClusterNode): + node_shell = node.host.get_shell() + output = node_shell.exec(f"curl -I HEAD http://127.0.0.1:8084/{name}") + pattern = r"X-Container-Id: (\S+)" + cid = re.findall(pattern, output.stdout) + if cid: + return cid[0] return None diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index 1d7adfa..68d5379 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -231,6 +231,8 @@ def search_nodes_with_bucket( shell: Shell, endpoint: str, ) -> list[ClusterNode]: - cid = search_container_by_name(wallet=wallet, name=bucket_name, shell=shell, endpoint=endpoint) - nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster) + cid = search_container_by_name(name=bucket_name, cluster=cluster) + nodes_list = search_nodes_with_container( + wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster + ) return nodes_list From 10a6efa333cf931260fae399c266100ba6e3e9f1 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 20 Dec 2023 16:02:54 +0300 Subject: [PATCH 192/363] [#151] Refactor load report Signed-off-by: Andrey Berezin --- .../load/interfaces/summarized.py | 93 ++++++ src/frostfs_testlib/load/load_metrics.py | 268 ++++++++++-------- src/frostfs_testlib/load/load_report.py | 168 ++--------- src/frostfs_testlib/load/load_verifiers.py | 70 +---- 4 files changed, 278 insertions(+), 321 deletions(-) create mode 100644 src/frostfs_testlib/load/interfaces/summarized.py diff --git a/src/frostfs_testlib/load/interfaces/summarized.py b/src/frostfs_testlib/load/interfaces/summarized.py new file mode 100644 index 0000000..bca9822 --- /dev/null +++ b/src/frostfs_testlib/load/interfaces/summarized.py @@ -0,0 +1,93 @@ +from dataclasses import dataclass, field + +from frostfs_testlib.load.load_config import LoadParams, LoadScenario +from frostfs_testlib.load.load_metrics import get_metrics_object + + +@dataclass +class SummarizedErorrs: + total: int = field(default_factory=int) + percent: float = field(default_factory=float) + threshold: float = field(default_factory=float) + by_node: dict[str, int] = field(default_factory=dict) + + def calc_stats(self, operations): + self.total += sum(self.by_node.values()) + + if not operations: + return + + self.percent = self.total / operations * 100 + + +@dataclass +class SummarizedLatencies: + avg: float = field(default_factory=float) + min: float = field(default_factory=float) + max: float = field(default_factory=float) + by_node: dict[str, dict[str, int]] = field(default_factory=dict) + + def calc_stats(self): + if not self.by_node: + return + + avgs = [lt["avg"] for lt in self.by_node.values()] + self.avg = sum(avgs) / len(avgs) + + minimal = [lt["min"] for lt in self.by_node.values()] + self.min = min(minimal) + + maximum = [lt["max"] for lt in self.by_node.values()] + self.max = max(maximum) + + +@dataclass +class SummarizedStats: + threads: int = field(default_factory=int) + requested_rate: int = field(default_factory=int) + operations: int = field(default_factory=int) + rate: float = field(default_factory=float) + throughput: float = field(default_factory=float) + latencies: SummarizedLatencies = field(default_factory=SummarizedLatencies) + errors: SummarizedErorrs = field(default_factory=SummarizedErorrs) + passed: bool = True + + def calc_stats(self): + self.errors.calc_stats(self.operations) + self.latencies.calc_stats() + self.passed = self.errors.percent <= self.errors.threshold + + @staticmethod + def collect(load_params: LoadParams, load_summaries: dict) -> dict[str, "SummarizedStats"]: + if load_params.scenario in [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]: + delete_vus = max(load_params.preallocated_deleters or 0, load_params.max_deleters or 0) + write_vus = max(load_params.preallocated_writers or 0, load_params.max_writers or 0) + read_vus = max(load_params.preallocated_readers or 0, load_params.max_readers or 0) + else: + write_vus = load_params.writers + read_vus = load_params.readers + delete_vus = load_params.deleters + + summarized = { + "Write": SummarizedStats(threads=write_vus, requested_rate=load_params.write_rate), + "Read": SummarizedStats(threads=read_vus, requested_rate=load_params.read_rate), + "Delete": SummarizedStats(threads=delete_vus, requested_rate=load_params.delete_rate), + } + + for node_key, load_summary in load_summaries.items(): + metrics = get_metrics_object(load_params.scenario, load_summary) + for operation in metrics.operations: + target = summarized[operation._NAME] + if not operation.total_iterations: + continue + target.operations += operation.total_iterations + target.rate += operation.rate + target.latencies.by_node[node_key] = operation.latency + target.throughput += operation.throughput + if metrics.write.failed_iterations: + target.errors.by_node[node_key] = operation.failed_iterations + + for operation in summarized.values(): + operation.calc_stats() + + return summarized diff --git a/src/frostfs_testlib/load/load_metrics.py b/src/frostfs_testlib/load/load_metrics.py index 3f175cf..5502b5c 100644 --- a/src/frostfs_testlib/load/load_metrics.py +++ b/src/frostfs_testlib/load/load_metrics.py @@ -1,95 +1,43 @@ from abc import ABC -from typing import Any +from typing import Any, Optional from frostfs_testlib.load.load_config import LoadScenario -class MetricsBase(ABC): - _WRITE_SUCCESS = "" - _WRITE_ERRORS = "" - _WRITE_THROUGHPUT = "data_sent" - _WRITE_LATENCY = "" - - _READ_SUCCESS = "" - _READ_ERRORS = "" - _READ_LATENCY = "" - _READ_THROUGHPUT = "data_received" - - _DELETE_SUCCESS = "" - _DELETE_LATENCY = "" - _DELETE_ERRORS = "" +class OperationMetric(ABC): + _NAME = "" + _SUCCESS = "" + _ERRORS = "" + _THROUGHPUT = "" + _LATENCY = "" def __init__(self, summary) -> None: self.summary = summary self.metrics = summary["metrics"] @property - def write_total_iterations(self) -> int: - return self._get_metric(self._WRITE_SUCCESS) + self._get_metric(self._WRITE_ERRORS) + def total_iterations(self) -> int: + return self._get_metric(self._SUCCESS) + self._get_metric(self._ERRORS) @property - def write_success_iterations(self) -> int: - return self._get_metric(self._WRITE_SUCCESS) + def success_iterations(self) -> int: + return self._get_metric(self._SUCCESS) @property - def write_latency(self) -> dict: - return self._get_metric(self._WRITE_LATENCY) + def latency(self) -> dict: + return self._get_metric(self._LATENCY) @property - def write_rate(self) -> float: - return self._get_metric_rate(self._WRITE_SUCCESS) + def rate(self) -> float: + return self._get_metric_rate(self._SUCCESS) @property - def write_failed_iterations(self) -> int: - return self._get_metric(self._WRITE_ERRORS) + def failed_iterations(self) -> int: + return self._get_metric(self._ERRORS) @property - def write_throughput(self) -> float: - return self._get_metric_rate(self._WRITE_THROUGHPUT) - - @property - def read_total_iterations(self) -> int: - return self._get_metric(self._READ_SUCCESS) + self._get_metric(self._READ_ERRORS) - - @property - def read_success_iterations(self) -> int: - return self._get_metric(self._READ_SUCCESS) - - @property - def read_latency(self) -> dict: - return self._get_metric(self._READ_LATENCY) - - @property - def read_rate(self) -> int: - return self._get_metric_rate(self._READ_SUCCESS) - - @property - def read_failed_iterations(self) -> int: - return self._get_metric(self._READ_ERRORS) - - @property - def read_throughput(self) -> float: - return self._get_metric_rate(self._READ_THROUGHPUT) - - @property - def delete_total_iterations(self) -> int: - return self._get_metric(self._DELETE_SUCCESS) + self._get_metric(self._DELETE_ERRORS) - - @property - def delete_success_iterations(self) -> int: - return self._get_metric(self._DELETE_SUCCESS) - - @property - def delete_latency(self) -> dict: - return self._get_metric(self._DELETE_LATENCY) - - @property - def delete_failed_iterations(self) -> int: - return self._get_metric(self._DELETE_ERRORS) - - @property - def delete_rate(self) -> int: - return self._get_metric_rate(self._DELETE_SUCCESS) + def throughput(self) -> float: + return self._get_metric_rate(self._THROUGHPUT) def _get_metric(self, metric: str) -> int: metrics_method_map = { @@ -104,9 +52,7 @@ class MetricsBase(ABC): metric = self.metrics[metric] metric_type = metric["type"] if metric_type not in metrics_method_map: - raise Exception( - f"Unsupported metric type: {metric_type}, supported: {metrics_method_map.keys()}" - ) + raise Exception(f"Unsupported metric type: {metric_type}, supported: {metrics_method_map.keys()}") return metrics_method_map[metric_type](metric) @@ -119,9 +65,7 @@ class MetricsBase(ABC): metric = self.metrics[metric] metric_type = metric["type"] if metric_type not in metrics_method_map: - raise Exception( - f"Unsupported rate metric type: {metric_type}, supported: {metrics_method_map.keys()}" - ) + raise Exception(f"Unsupported rate metric type: {metric_type}, supported: {metrics_method_map.keys()}") return metrics_method_map[metric_type](metric) @@ -138,63 +82,145 @@ class MetricsBase(ABC): return metric["values"] +class WriteOperationMetric(OperationMetric): + _NAME = "Write" + _SUCCESS = "" + _ERRORS = "" + _THROUGHPUT = "data_sent" + _LATENCY = "" + + +class ReadOperationMetric(OperationMetric): + _NAME = "Read" + _SUCCESS = "" + _ERRORS = "" + _THROUGHPUT = "data_received" + _LATENCY = "" + + +class DeleteOperationMetric(OperationMetric): + _NAME = "Delete" + _SUCCESS = "" + _ERRORS = "" + _THROUGHPUT = "" + _LATENCY = "" + + +class GrpcWriteOperationMetric(WriteOperationMetric): + _SUCCESS = "frostfs_obj_put_total" + _ERRORS = "frostfs_obj_put_fails" + _LATENCY = "frostfs_obj_put_duration" + + +class GrpcReadOperationMetric(ReadOperationMetric): + _SUCCESS = "frostfs_obj_get_total" + _ERRORS = "frostfs_obj_get_fails" + _LATENCY = "frostfs_obj_get_duration" + + +class GrpcDeleteOperationMetric(DeleteOperationMetric): + _SUCCESS = "frostfs_obj_delete_total" + _ERRORS = "frostfs_obj_delete_fails" + _LATENCY = "frostfs_obj_delete_duration" + + +class S3WriteOperationMetric(WriteOperationMetric): + _SUCCESS = "aws_obj_put_total" + _ERRORS = "aws_obj_put_fails" + _LATENCY = "aws_obj_put_duration" + + +class S3ReadOperationMetric(ReadOperationMetric): + _SUCCESS = "aws_obj_get_total" + _ERRORS = "aws_obj_get_fails" + _LATENCY = "aws_obj_get_duration" + + +class S3DeleteOperationMetric(DeleteOperationMetric): + _SUCCESS = "aws_obj_delete_total" + _ERRORS = "aws_obj_delete_fails" + _LATENCY = "aws_obj_delete_duration" + + +class S3LocalWriteOperationMetric(WriteOperationMetric): + _SUCCESS = "s3local_obj_put_total" + _ERRORS = "s3local_obj_put_fails" + _LATENCY = "s3local_obj_put_duration" + + +class S3LocalReadOperationMetric(ReadOperationMetric): + _SUCCESS = "s3local_obj_get_total" + _ERRORS = "s3local_obj_get_fails" + _LATENCY = "s3local_obj_get_duration" + + +class LocalWriteOperationMetric(WriteOperationMetric): + _SUCCESS = "local_obj_put_total" + _ERRORS = "local_obj_put_fails" + _LATENCY = "local_obj_put_duration" + + +class LocalReadOperationMetric(ReadOperationMetric): + _SUCCESS = "local_obj_get_total" + _ERRORS = "local_obj_get_fails" + + +class LocalDeleteOperationMetric(DeleteOperationMetric): + _SUCCESS = "local_obj_delete_total" + _ERRORS = "local_obj_delete_fails" + + +class VerifyReadOperationMetric(ReadOperationMetric): + _SUCCESS = "verified_obj" + _ERRORS = "invalid_obj" + + +class MetricsBase(ABC): + def __init__(self) -> None: + self.write: Optional[WriteOperationMetric] = None + self.read: Optional[ReadOperationMetric] = None + self.delete: Optional[DeleteOperationMetric] = None + + @property + def operations(self) -> list[OperationMetric]: + return [metric for metric in [self.write, self.read, self.delete] if metric is not None] + + class GrpcMetrics(MetricsBase): - _WRITE_SUCCESS = "frostfs_obj_put_total" - _WRITE_ERRORS = "frostfs_obj_put_fails" - _WRITE_LATENCY = "frostfs_obj_put_duration" - - _READ_SUCCESS = "frostfs_obj_get_total" - _READ_ERRORS = "frostfs_obj_get_fails" - _READ_LATENCY = "frostfs_obj_get_duration" - - _DELETE_SUCCESS = "frostfs_obj_delete_total" - _DELETE_ERRORS = "frostfs_obj_delete_fails" - _DELETE_LATENCY = "frostfs_obj_delete_duration" + def __init__(self, summary) -> None: + super().__init__() + self.write = GrpcWriteOperationMetric(summary) + self.read = GrpcReadOperationMetric(summary) + self.delete = GrpcDeleteOperationMetric(summary) class S3Metrics(MetricsBase): - _WRITE_SUCCESS = "aws_obj_put_total" - _WRITE_ERRORS = "aws_obj_put_fails" - _WRITE_LATENCY = "aws_obj_put_duration" + def __init__(self, summary) -> None: + super().__init__() + self.write = S3WriteOperationMetric(summary) + self.read = S3ReadOperationMetric(summary) + self.delete = S3DeleteOperationMetric(summary) - _READ_SUCCESS = "aws_obj_get_total" - _READ_ERRORS = "aws_obj_get_fails" - _READ_LATENCY = "aws_obj_get_duration" - - _DELETE_SUCCESS = "aws_obj_delete_total" - _DELETE_ERRORS = "aws_obj_delete_fails" - _DELETE_LATENCY = "aws_obj_delete_duration" class S3LocalMetrics(MetricsBase): - _WRITE_SUCCESS = "s3local_obj_put_total" - _WRITE_ERRORS = "s3local_obj_put_fails" - _WRITE_LATENCY = "s3local_obj_put_duration" + def __init__(self, summary) -> None: + super().__init__() + self.write = S3LocalWriteOperationMetric(summary) + self.read = S3LocalReadOperationMetric(summary) - _READ_SUCCESS = "s3local_obj_get_total" - _READ_ERRORS = "s3local_obj_get_fails" - _READ_LATENCY = "s3local_obj_get_duration" class LocalMetrics(MetricsBase): - _WRITE_SUCCESS = "local_obj_put_total" - _WRITE_ERRORS = "local_obj_put_fails" - _WRITE_LATENCY = "local_obj_put_duration" - - _READ_SUCCESS = "local_obj_get_total" - _READ_ERRORS = "local_obj_get_fails" - - _DELETE_SUCCESS = "local_obj_delete_total" - _DELETE_ERRORS = "local_obj_delete_fails" + def __init__(self, summary) -> None: + super().__init__() + self.write = LocalWriteOperationMetric(summary) + self.read = LocalReadOperationMetric(summary) + self.delete = LocalDeleteOperationMetric(summary) class VerifyMetrics(MetricsBase): - _WRITE_SUCCESS = "N/A" - _WRITE_ERRORS = "N/A" - - _READ_SUCCESS = "verified_obj" - _READ_ERRORS = "invalid_obj" - - _DELETE_SUCCESS = "N/A" - _DELETE_ERRORS = "N/A" + def __init__(self, summary) -> None: + super().__init__() + self.read = VerifyReadOperationMetric(summary) def get_metrics_object(load_type: LoadScenario, summary: dict[str, Any]) -> MetricsBase: diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py index 105d852..22ddb54 100644 --- a/src/frostfs_testlib/load/load_report.py +++ b/src/frostfs_testlib/load/load_report.py @@ -3,8 +3,8 @@ from typing import Optional import yaml +from frostfs_testlib.load.interfaces.summarized import SummarizedStats from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario -from frostfs_testlib.load.load_metrics import get_metrics_object from frostfs_testlib.utils.converting_utils import calc_unit @@ -114,63 +114,46 @@ class LoadReport: return model_map[self.load_params.scenario] - def _get_operations_sub_section_html( - self, - operation_type: str, - total_operations: int, - requested_rate_str: str, - vus_str: str, - total_rate: float, - throughput: float, - errors: dict[str, int], - latency: dict[str, dict], - ): + def _get_operations_sub_section_html(self, operation_type: str, stats: SummarizedStats): throughput_html = "" - if throughput > 0: - throughput, unit = calc_unit(throughput) + if stats.throughput > 0: + throughput, unit = calc_unit(stats.throughput) throughput_html = self._row("Throughput", f"{throughput:.2f} {unit}/sec") per_node_errors_html = "" - total_errors = 0 - if errors: - total_errors: int = 0 - for node_key, errors in errors.items(): - total_errors += errors - if self.load_params.k6_process_allocation_strategy == K6ProcessAllocationStrategy.PER_ENDPOINT: - per_node_errors_html += self._row(f"At {node_key}", errors) + for node_key, errors in stats.errors.by_node.items(): + if self.load_params.k6_process_allocation_strategy == K6ProcessAllocationStrategy.PER_ENDPOINT: + per_node_errors_html += self._row(f"At {node_key}", errors) latency_html = "" - if latency: - for node_key, latency_dict in latency.items(): - latency_values = "N/A" - if latency_dict: - latency_values = "" - for param_name, param_val in latency_dict.items(): - latency_values += f"{param_name}={param_val:.2f}ms " + for node_key, latencies in stats.latencies.by_node.items(): + latency_values = "N/A" + if latencies: + latency_values = "" + for param_name, param_val in latencies.items(): + latency_values += f"{param_name}={param_val:.2f}ms " - latency_html += self._row(f"{operation_type} latency {node_key.split(':')[0]}", latency_values) + latency_html += self._row(f"{operation_type} latency {node_key.split(':')[0]}", latency_values) object_size, object_size_unit = calc_unit(self.load_params.object_size, 1) duration = self._seconds_to_formatted_duration(self.load_params.load_time) model = self._get_model_string() + requested_rate_str = f"{stats.requested_rate}op/sec" if stats.requested_rate else "" # write 8KB 15h49m 50op/sec 50th open model/closed model/min_iteration duration=1s - 1.636MB/s 199.57451/s - short_summary = f"{operation_type} {object_size}{object_size_unit} {duration} {requested_rate_str} {vus_str} {model} - {throughput:.2f}{unit}/s {total_rate:.2f}/s" - errors_percent = 0 - if total_operations: - errors_percent = total_errors / total_operations * 100.0 + short_summary = f"{operation_type} {object_size}{object_size_unit} {duration} {requested_rate_str} {stats.threads}th {model} - {throughput:.2f}{unit}/s {stats.rate:.2f}/s" html = f"""
{short_summary}
- {self._row("Total operations", total_operations)} - {self._row("OP/sec", f"{total_rate:.2f}")} + {self._row("Total operations", stats.operations)} + {self._row("OP/sec", f"{stats.rate:.2f}")} {throughput_html} {latency_html} {per_node_errors_html} - {self._row("Total", f"{total_errors} ({errors_percent:.2f}%)")} - {self._row("Threshold", f"{self.load_params.error_threshold:.2f}%")} + {self._row("Total", f"{stats.errors.total} ({stats.errors.percent:.2f}%)")} + {self._row("Threshold", f"{stats.errors.threshold:.2f}%")}
{short_summary}
Metrics
Errors


""" @@ -178,111 +161,12 @@ class LoadReport: def _get_totals_section_html(self): html = "" - for i, load_summaries in enumerate(self.load_summaries_list, 1): - html += f"

Load Results for load #{i}

" + for i in range(len(self.load_summaries_list)): + html += f"

Load Results for load #{i+1}

" - write_operations = 0 - write_op_sec = 0 - write_throughput = 0 - write_latency = {} - write_errors = {} - requested_write_rate = self.load_params.write_rate - requested_write_rate_str = f"{requested_write_rate}op/sec" if requested_write_rate else "" - - read_operations = 0 - read_op_sec = 0 - read_throughput = 0 - read_latency = {} - read_errors = {} - requested_read_rate = self.load_params.read_rate - requested_read_rate_str = f"{requested_read_rate}op/sec" if requested_read_rate else "" - - delete_operations = 0 - delete_op_sec = 0 - delete_latency = {} - delete_errors = {} - requested_delete_rate = self.load_params.delete_rate - requested_delete_rate_str = f"{requested_delete_rate}op/sec" if requested_delete_rate else "" - - if self.load_params.scenario in [LoadScenario.gRPC_CAR, LoadScenario.S3_CAR]: - delete_vus = max(self.load_params.preallocated_deleters or 0, self.load_params.max_deleters or 0) - write_vus = max(self.load_params.preallocated_writers or 0, self.load_params.max_writers or 0) - read_vus = max(self.load_params.preallocated_readers or 0, self.load_params.max_readers or 0) - else: - write_vus = self.load_params.writers - read_vus = self.load_params.readers - delete_vus = self.load_params.deleters - - write_vus_str = f"{write_vus}th" - read_vus_str = f"{read_vus}th" - delete_vus_str = f"{delete_vus}th" - - write_section_required = False - read_section_required = False - delete_section_required = False - - for node_key, load_summary in load_summaries.items(): - metrics = get_metrics_object(self.load_params.scenario, load_summary) - write_operations += metrics.write_total_iterations - if write_operations: - write_section_required = True - write_op_sec += metrics.write_rate - write_latency[node_key] = metrics.write_latency - write_throughput += metrics.write_throughput - if metrics.write_failed_iterations: - write_errors[node_key] = metrics.write_failed_iterations - - read_operations += metrics.read_total_iterations - if read_operations: - read_section_required = True - read_op_sec += metrics.read_rate - read_throughput += metrics.read_throughput - read_latency[node_key] = metrics.read_latency - if metrics.read_failed_iterations: - read_errors[node_key] = metrics.read_failed_iterations - - delete_operations += metrics.delete_total_iterations - if delete_operations: - delete_section_required = True - delete_op_sec += metrics.delete_rate - delete_latency[node_key] = metrics.delete_latency - if metrics.delete_failed_iterations: - delete_errors[node_key] = metrics.delete_failed_iterations - - if write_section_required: - html += self._get_operations_sub_section_html( - "Write", - write_operations, - requested_write_rate_str, - write_vus_str, - write_op_sec, - write_throughput, - write_errors, - write_latency, - ) - - if read_section_required: - html += self._get_operations_sub_section_html( - "Read", - read_operations, - requested_read_rate_str, - read_vus_str, - read_op_sec, - read_throughput, - read_errors, - read_latency, - ) - - if delete_section_required: - html += self._get_operations_sub_section_html( - "Delete", - delete_operations, - requested_delete_rate_str, - delete_vus_str, - delete_op_sec, - 0, - delete_errors, - delete_latency, - ) + summarized = SummarizedStats.collect(self.load_params, self.load_summaries_list[i]) + for operation_type, stats in summarized.items(): + if stats.operations: + html += self._get_operations_sub_section_html(operation_type, stats) return html diff --git a/src/frostfs_testlib/load/load_verifiers.py b/src/frostfs_testlib/load/load_verifiers.py index 5ca92dc..cbf6f64 100644 --- a/src/frostfs_testlib/load/load_verifiers.py +++ b/src/frostfs_testlib/load/load_verifiers.py @@ -1,4 +1,5 @@ from frostfs_testlib import reporter +from frostfs_testlib.load.interfaces.summarized import SummarizedStats from frostfs_testlib.load.load_config import LoadParams, LoadScenario from frostfs_testlib.load.load_metrics import get_metrics_object @@ -8,56 +9,16 @@ class LoadVerifier: self.load_params = load_params def collect_load_issues(self, load_summaries: dict[str, dict]) -> list[str]: - write_operations = 0 - write_errors = 0 - - read_operations = 0 - read_errors = 0 - - delete_operations = 0 - delete_errors = 0 - - writers = self.load_params.writers or self.load_params.preallocated_writers or 0 - readers = self.load_params.readers or self.load_params.preallocated_readers or 0 - deleters = self.load_params.deleters or self.load_params.preallocated_deleters or 0 - - for load_summary in load_summaries.values(): - metrics = get_metrics_object(self.load_params.scenario, load_summary) - - if writers: - write_operations += metrics.write_total_iterations - write_errors += metrics.write_failed_iterations - - if readers: - read_operations += metrics.read_total_iterations - read_errors += metrics.read_failed_iterations - - if deleters: - delete_operations += metrics.delete_total_iterations - delete_errors += metrics.delete_failed_iterations - + summarized = SummarizedStats.collect(self.load_params, load_summaries) issues = [] - if writers and not write_operations: - issues.append(f"No any write operation was performed") - if readers and not read_operations: - issues.append(f"No any read operation was performed") - if deleters and not delete_operations: - issues.append(f"No any delete operation was performed") - error_rate = self._get_error_rate(writers, write_operations, write_errors) - if error_rate > self.load_params.error_threshold: - rate_str = self._get_rate_str(error_rate) - issues.append(f"Write errors exceeded threshold: {rate_str} > {self.load_params.error_threshold}%") + for operation_type, stats in summarized.items(): + if stats.threads and not stats.operations: + issues.append(f"No any {operation_type.lower()} operation was performed") - error_rate = self._get_error_rate(readers, read_operations, read_errors) - if error_rate > self.load_params.error_threshold: - rate_str = self._get_rate_str(error_rate) - issues.append(f"Read errors exceeded threshold: {rate_str} > {self.load_params.error_threshold}%") - - error_rate = self._get_error_rate(deleters, delete_operations, delete_errors) - if error_rate > self.load_params.error_threshold: - rate_str = self._get_rate_str(error_rate) - issues.append(f"Delete errors exceeded threshold: {rate_str} > {self.load_params.error_threshold}%") + if stats.errors.percent > stats.errors.threshold: + rate_str = self._get_rate_str(stats.errors.percent) + issues.append(f"{operation_type} errors exceeded threshold: {rate_str} > {stats.errors.threshold}%") return issues @@ -74,13 +35,6 @@ class LoadVerifier: ) return verify_issues - def _get_error_rate(self, vus: int, operations: int, errors: int) -> float: - if not operations or not vus: - return 0 - - error_rate = errors / operations * 100 - return error_rate - def _get_rate_str(self, rate: float, minimal: float = 0.01) -> str: return f"{rate:.2f}%" if rate >= minimal else f"~{minimal}%" @@ -95,13 +49,13 @@ class LoadVerifier: delete_success = 0 if deleters > 0: - delete_success = load_metrics.delete_success_iterations + delete_success = load_metrics.delete.success_iterations if verification_summary: verify_metrics = get_metrics_object(LoadScenario.VERIFY, verification_summary) - verified_objects = verify_metrics.read_success_iterations - invalid_objects = verify_metrics.read_failed_iterations - total_left_objects = load_metrics.write_success_iterations - delete_success + verified_objects = verify_metrics.read.success_iterations + invalid_objects = verify_metrics.read.failed_iterations + total_left_objects = load_metrics.write.success_iterations - delete_success # Due to interruptions we may see total verified objects to be less than written on writers count if abs(total_left_objects - verified_objects) > writers: From 73c362c307bc4eda76acbd4dcdaf2b37112c4592 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 22 Dec 2023 11:33:41 +0300 Subject: [PATCH 193/363] [#153] Fix stat calculation and add error threshold Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/interfaces/summarized.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/load/interfaces/summarized.py b/src/frostfs_testlib/load/interfaces/summarized.py index bca9822..a005963 100644 --- a/src/frostfs_testlib/load/interfaces/summarized.py +++ b/src/frostfs_testlib/load/interfaces/summarized.py @@ -84,7 +84,8 @@ class SummarizedStats: target.rate += operation.rate target.latencies.by_node[node_key] = operation.latency target.throughput += operation.throughput - if metrics.write.failed_iterations: + target.errors.threshold = load_params.error_threshold + if operation.failed_iterations: target.errors.by_node[node_key] = operation.failed_iterations for operation in summarized.values(): From a4d1082ed558f7c95c2ebe3f8452720a848f681c Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Thu, 21 Dec 2023 10:47:43 +0300 Subject: [PATCH 194/363] Shards are attribute of StorageNode class --- src/frostfs_testlib/hosting/interfaces.py | 16 ++- .../storage/dataclasses/frostfs_services.py | 26 ++++- .../storage/dataclasses/shard.py | 99 +++++++++++++++++++ 3 files changed, 135 insertions(+), 6 deletions(-) create mode 100644 src/frostfs_testlib/storage/dataclasses/shard.py diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index daea6eb..3b2d718 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -219,12 +219,22 @@ class Host(ABC): """ @abstractmethod - def delete_pilorama(self, service_name: str) -> None: + def delete_file(self, file_path: str) -> None: """ - Deletes all pilorama.db files in the node. + Deletes file with provided file path Args: - service_name: Name of storage node service. + file_path: full path to the file to delete + + """ + + @abstractmethod + def is_file_exist(self, file_path: str) -> bool: + """ + Checks if file exist + + Args: + file_path: full path to the file to check """ diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index 6413ded..33e7894 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -3,7 +3,7 @@ import yaml from frostfs_testlib.blockchain import RPCClient from frostfs_testlib.storage.constants import ConfigAttributes from frostfs_testlib.storage.dataclasses.node_base import NodeBase - +from frostfs_testlib.storage.dataclasses.shard import Shard class InnerRing(NodeBase): """ @@ -148,6 +148,20 @@ class StorageNode(NodeBase): def get_shards_config(self) -> tuple[str, dict]: return self.get_config(self.get_shard_config_path()) + def get_shards(self) -> list[Shard]: + config = self.get_shards_config()[1] + config["storage"]["shard"].pop("default") + return [Shard.from_object(shard) for shard in config["storage"]["shard"].values()] + + def get_shards_from_env(self) -> list[Shard]: + config = self.get_shards_config()[1] + configObj = ConfigObj(StringIO(config)) + + pattern = f"{SHARD_PREFIX}\d*" + num_shards = len(set(re.findall(pattern, self.get_shards_config()))) + + return [Shard.from_config_object(configObj, shard_id) for shard_id in range(num_shards)] + def get_control_endpoint(self) -> str: return self._get_attribute(ConfigAttributes.CONTROL_ENDPOINT) @@ -157,6 +171,9 @@ class StorageNode(NodeBase): def get_data_directory(self) -> str: return self.host.get_data_directory(self.name) + def get_storage_config(self) -> str: + return self.host.get_storage_config(self.name) + def get_http_hostname(self) -> str: return self._get_attribute(ConfigAttributes.HTTP_HOSTNAME) @@ -169,8 +186,11 @@ class StorageNode(NodeBase): def delete_fstree(self): self.host.delete_fstree(self.name) - def delete_pilorama(self): - self.host.delete_pilorama(self.name) + def delete_file(self, file_path: str) -> None: + self.host.delete_file(file_path) + + def is_file_exist(self, file_path) -> bool: + return self.host.is_file_exist(file_path) def delete_metabase(self): self.host.delete_metabase(self.name) diff --git a/src/frostfs_testlib/storage/dataclasses/shard.py b/src/frostfs_testlib/storage/dataclasses/shard.py new file mode 100644 index 0000000..584138d --- /dev/null +++ b/src/frostfs_testlib/storage/dataclasses/shard.py @@ -0,0 +1,99 @@ +import json +import pathlib +import re +from dataclasses import dataclass +from io import StringIO + +import allure +import pytest +import yaml +from configobj import ConfigObj +from frostfs_testlib.cli import FrostfsCli +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT +from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG + +SHARD_PREFIX = "FROSTFS_STORAGE_SHARD_" +BLOBSTOR_PREFIX = "_BLOBSTOR_" + + +@dataclass +class Blobstor: + path: str + path_type: str + + def __eq__(self, other) -> bool: + if not isinstance(other, self.__class__): + raise RuntimeError(f"Only two {self.__class__.__name__} instances can be compared") + return self.path == other.path and self.path_type == other.path_type + + def __hash__(self): + return hash((self.path, self.path_type)) + + @staticmethod + def from_config_object(section: ConfigObj, shard_id: str, blobstor_id: str): + var_prefix = f"{SHARD_PREFIX}{shard_id}{BLOBSTOR_PREFIX}{blobstor_id}" + return Blobstor(section.get(f"{var_prefix}_PATH"), section.get(f"{var_prefix}_TYPE")) + + +@dataclass +class Shard: + blobstor: list[Blobstor] + metabase: str + writecache: str + pilorama: str + + def __eq__(self, other) -> bool: + if not isinstance(other, self.__class__): + raise RuntimeError(f"Only two {self.__class__.__name__} instances can be compared") + return ( + set(self.blobstor) == set(other.blobstor) + and self.metabase == other.metabase + and self.writecache == other.writecache + and self.pilorama == other.pilorama + ) + + def __hash__(self): + return hash((self.metabase, self.writecache)) + + @staticmethod + def _get_blobstor_count_from_section(config_object: ConfigObj, shard_id: int): + pattern = f"{SHARD_PREFIX}{shard_id}{BLOBSTOR_PREFIX}" + blobstors = {key[: len(pattern) + 2] for key in config_object.keys() if pattern in key} + return len(blobstors) + + @staticmethod + def from_config_object(config_object: ConfigObj, shard_id: int): + var_prefix = f"{SHARD_PREFIX}{shard_id}" + + blobstor_count = Shard._get_blobstor_count_from_section(config_object, shard_id) + blobstors = [ + Blobstor.from_config_object(config_object, shard_id, blobstor_id) for blobstor_id in range(blobstor_count) + ] + + write_cache_enabled = config_object.as_bool(f"{var_prefix}_WRITECACHE_ENABLED") + + return Shard( + blobstors, + config_object.get(f"{var_prefix}_METABASE_PATH"), + config_object.get(f"{var_prefix}_WRITECACHE_PATH") if write_cache_enabled else "", + ) + + @staticmethod + def from_object(shard): + metabase = shard["metabase"]["path"] if "path" in shard["metabase"] else shard["metabase"] + writecache = shard["writecache"]["path"] if "path" in shard["writecache"] else shard["writecache"] + + # Currently due to issue we need to check if pilorama exists in keys + # TODO: make pilorama mandatory after fix + if shard.get("pilorama"): + pilorama = shard["pilorama"]["path"] if "path" in shard["pilorama"] else shard["pilorama"] + else: + pilorama = None + + return Shard( + blobstor=[Blobstor(path=blobstor["path"], path_type=blobstor["type"]) for blobstor in shard["blobstor"]], + metabase=metabase, + writecache=writecache, + pilorama=pilorama + ) + From a3bda0b34f828ee16f577d1e3a59bf0dffe729b0 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Thu, 11 Jan 2024 13:42:02 +0300 Subject: [PATCH 195/363] [#154] Change func search container Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/steps/s3/s3_helper.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index 68d5379..dbd3765 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -231,8 +231,10 @@ def search_nodes_with_bucket( shell: Shell, endpoint: str, ) -> list[ClusterNode]: - cid = search_container_by_name(name=bucket_name, cluster=cluster) - nodes_list = search_nodes_with_container( - wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster - ) + cid = None + for cluster_node in cluster.cluster_nodes: + cid = search_container_by_name(name=bucket_name, node=cluster_node) + if cid: + break + nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster) return nodes_list From d6a2cf92a262e718afd81009c236b98b396682af Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Thu, 11 Jan 2024 14:51:07 +0300 Subject: [PATCH 196/363] [#155] Change args to optionally Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/cli/frostfs_cli/shards.py | 27 +++++++------------ 1 file changed, 10 insertions(+), 17 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/shards.py b/src/frostfs_testlib/cli/frostfs_cli/shards.py index 6b47ac2..1727249 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/shards.py +++ b/src/frostfs_testlib/cli/frostfs_cli/shards.py @@ -68,11 +68,7 @@ class FrostfsCliShards(CliCommand): return self._execute_with_password( "control shards set-mode", wallet_password, - **{ - param: value - for param, value in locals().items() - if param not in ["self", "wallet_password"] - }, + **{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]}, ) def dump( @@ -105,18 +101,14 @@ class FrostfsCliShards(CliCommand): return self._execute_with_password( "control shards dump", wallet_password, - **{ - param: value - for param, value in locals().items() - if param not in ["self", "wallet_password"] - }, + **{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]}, ) def list( self, endpoint: str, - wallet: str, - wallet_password: str, + wallet: Optional[str] = None, + wallet_password: Optional[str] = None, address: Optional[str] = None, json_mode: bool = False, timeout: Optional[str] = None, @@ -135,12 +127,13 @@ class FrostfsCliShards(CliCommand): Returns: Command's result. """ + if not wallet_password: + return self._execute( + "control shards list", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) return self._execute_with_password( "control shards list", wallet_password, - **{ - param: value - for param, value in locals().items() - if param not in ["self", "wallet_password"] - }, + **{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]}, ) From df8d99d83cf4efd3ceb8b5c629e1f6083e92f153 Mon Sep 17 00:00:00 2001 From: Liza Date: Fri, 29 Dec 2023 01:25:13 +0300 Subject: [PATCH 197/363] [#156] load_time in the format of days, hours and minutes; new params Signed-off-by: Liza --- src/frostfs_testlib/load/load_config.py | 44 +++++++++++- tests/test_load_config.py | 89 +++++++++++++++++++++++++ 2 files changed, 131 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 3ea66b8..6f355fc 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -3,11 +3,28 @@ import os from dataclasses import dataclass, field, fields, is_dataclass from enum import Enum from types import MappingProxyType -from typing import Any, Optional, get_args +from typing import Any, Callable, Optional, get_args from frostfs_testlib.utils.converting_utils import calc_unit +def convert_time_to_seconds(time: int | str | None) -> int: + if time is None: + return None + if str(time).isdigit(): + seconds = int(time) + else: + days, hours, minutes = 0, 0, 0 + if "d" in time: + days, time = time.split("d") + if "h" in time: + hours, time = time.split("h") + if "min" in time: + minutes = time.replace("min", "") + seconds = int(days) * 86400 + int(hours) * 3600 + int(minutes) * 60 + return seconds + + class LoadType(Enum): gRPC = "grpc" S3 = "s3" @@ -76,6 +93,7 @@ def metadata_field( scenario_variable: Optional[str] = None, string_repr: Optional[bool] = True, distributed: Optional[bool] = False, + formatter: Optional[Callable] = None, ): return field( default=None, @@ -85,6 +103,7 @@ def metadata_field( "env_variable": scenario_variable, "string_repr": string_repr, "distributed": distributed, + "formatter": formatter, }, ) @@ -200,7 +219,9 @@ class LoadParams: # ------- COMMON SCENARIO PARAMS ------- # Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value. - load_time: Optional[int] = metadata_field(all_load_scenarios, None, "DURATION", False) + load_time: Optional[int] = metadata_field( + all_load_scenarios, None, "DURATION", False, formatter=convert_time_to_seconds + ) # Object size in KB for load and preset. object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE", False) # For read operations, controls from which set get objects to read @@ -384,6 +405,25 @@ class LoadParams: return fields_with_data or [] + def _get_field_formatter(self, field_name: str) -> Callable | None: + data_fields = fields(self) + formatters = [ + field.metadata["formatter"] + for field in data_fields + if field.name == field_name and "formatter" in field.metadata and field.metadata["formatter"] != None + ] + if formatters: + return formatters[0] + + return None + + def __setattr__(self, field_name, value): + formatter = self._get_field_formatter(field_name) + if formatter: + value = formatter(value) + + super().__setattr__(field_name, value) + def __str__(self) -> str: load_type_str = self.scenario.value if self.scenario else self.load_type.value # TODO: migrate load_params defaults to testlib diff --git a/tests/test_load_config.py b/tests/test_load_config.py index 926399b..f4fa022 100644 --- a/tests/test_load_config.py +++ b/tests/test_load_config.py @@ -141,6 +141,8 @@ class TestLoadConfig: "--workers '7'", "--containers '16'", "--policy 'container_placement_policy'", + "--ignore-errors", + "--sleep '19'", ] expected_env_vars = { "DURATION": 9, @@ -151,6 +153,7 @@ class TestLoadConfig: "WRITERS": 7, "READERS": 7, "DELETERS": 8, + "READ_AGE": 8, "PREGEN_JSON": "pregen_json", "PREPARE_LOCALLY": True, } @@ -167,6 +170,8 @@ class TestLoadConfig: "--workers '7'", "--containers '16'", "--policy 'container_placement_policy'", + "--ignore-errors", + "--sleep '19'", ] expected_env_vars = { "DURATION": 9, @@ -184,6 +189,7 @@ class TestLoadConfig: "TIME_UNIT": "time_unit", "WRITE_RATE": 10, "READ_RATE": 9, + "READ_AGE": 8, "DELETE_RATE": 11, "PREPARE_LOCALLY": True, } @@ -201,6 +207,8 @@ class TestLoadConfig: "--workers '7'", "--buckets '13'", "--location 's3_location'", + "--ignore-errors", + "--sleep '19'", ] expected_env_vars = { "DURATION": 9, @@ -211,6 +219,7 @@ class TestLoadConfig: "WRITERS": 7, "READERS": 7, "DELETERS": 8, + "READ_AGE": 8, "NO_VERIFY_SSL": True, "PREGEN_JSON": "pregen_json", } @@ -218,6 +227,44 @@ class TestLoadConfig: self._check_preset_params(load_params, expected_preset_args) self._check_env_vars(load_params, expected_env_vars) + @pytest.mark.parametrize("load_params", [LoadScenario.S3_CAR], indirect=True) + def test_argument_parsing_for_s3_car_scenario_with_stringed_time(self, load_params: LoadParams): + load_params.load_time = "2d3h5min" + expected_preset_args = [ + "--size '11'", + "--preload_obj '13'", + "--no-verify-ssl", + "--out 'pregen_json'", + "--workers '7'", + "--buckets '13'", + "--location 's3_location'", + "--ignore-errors", + "--sleep '19'", + ] + expected_env_vars = { + "DURATION": 183900, + "WRITE_OBJ_SIZE": 11, + "REGISTRY_FILE": "registry_file", + "K6_MIN_ITERATION_DURATION": "min_iteration_duration", + "K6_SETUP_TIMEOUT": "setup_timeout", + "NO_VERIFY_SSL": True, + "MAX_WRITERS": 11, + "MAX_READERS": 11, + "MAX_DELETERS": 12, + "PRE_ALLOC_DELETERS": 21, + "PRE_ALLOC_READERS": 20, + "PRE_ALLOC_WRITERS": 20, + "PREGEN_JSON": "pregen_json", + "TIME_UNIT": "time_unit", + "WRITE_RATE": 10, + "READ_RATE": 9, + "READ_AGE": 8, + "DELETE_RATE": 11, + } + + self._check_preset_params(load_params, expected_preset_args) + self._check_env_vars(load_params, expected_env_vars) + @pytest.mark.parametrize("load_params", [LoadScenario.S3_CAR], indirect=True) def test_argument_parsing_for_s3_car_scenario(self, load_params: LoadParams): expected_preset_args = [ @@ -228,6 +275,8 @@ class TestLoadConfig: "--workers '7'", "--buckets '13'", "--location 's3_location'", + "--ignore-errors", + "--sleep '19'", ] expected_env_vars = { "DURATION": 9, @@ -246,6 +295,7 @@ class TestLoadConfig: "TIME_UNIT": "time_unit", "WRITE_RATE": 10, "READ_RATE": 9, + "READ_AGE": 8, "DELETE_RATE": 11, } @@ -262,6 +312,8 @@ class TestLoadConfig: "--workers '7'", "--containers '16'", "--policy 'container_placement_policy'", + "--ignore-errors", + "--sleep '19'", ] expected_env_vars = { "DURATION": 9, @@ -273,6 +325,7 @@ class TestLoadConfig: "WRITERS": 7, "READERS": 7, "DELETERS": 8, + "READ_AGE": 8, "PREGEN_JSON": "pregen_json", } @@ -288,6 +341,8 @@ class TestLoadConfig: "--workers '7'", "--containers '16'", "--policy 'container_placement_policy'", + "--ignore-errors", + "--sleep '19'", ] expected_env_vars = { "CONFIG_FILE": "config_file", @@ -299,6 +354,7 @@ class TestLoadConfig: "WRITERS": 7, "READERS": 7, "DELETERS": 8, + "READ_AGE": 8, "PREGEN_JSON": "pregen_json", } @@ -338,6 +394,7 @@ class TestLoadConfig: "--workers '0'", "--containers '0'", "--policy ''", + "--sleep '0'", ] expected_env_vars = { "DURATION": 0, @@ -348,6 +405,7 @@ class TestLoadConfig: "WRITERS": 0, "READERS": 0, "DELETERS": 0, + "READ_AGE": 0, "PREGEN_JSON": "", "PREPARE_LOCALLY": False, } @@ -364,6 +422,7 @@ class TestLoadConfig: "--workers '0'", "--containers '0'", "--policy ''", + "--sleep '0'", ] expected_env_vars = { "DURATION": 0, @@ -382,6 +441,7 @@ class TestLoadConfig: "WRITE_RATE": 0, "READ_RATE": 0, "DELETE_RATE": 0, + "READ_AGE": 0, "PREPARE_LOCALLY": False, } @@ -397,6 +457,7 @@ class TestLoadConfig: "--workers '0'", "--buckets '0'", "--location ''", + "--sleep '0'", ] expected_env_vars = { "DURATION": 0, @@ -407,6 +468,7 @@ class TestLoadConfig: "WRITERS": 0, "READERS": 0, "DELETERS": 0, + "READ_AGE": 0, "NO_VERIFY_SSL": False, "PREGEN_JSON": "", } @@ -423,6 +485,7 @@ class TestLoadConfig: "--workers '0'", "--buckets '0'", "--location ''", + "--sleep '0'", ] expected_env_vars = { "DURATION": 0, @@ -442,6 +505,7 @@ class TestLoadConfig: "WRITE_RATE": 0, "READ_RATE": 0, "DELETE_RATE": 0, + "READ_AGE": 0, } self._check_preset_params(load_params, expected_preset_args) @@ -456,6 +520,7 @@ class TestLoadConfig: "--workers '0'", "--containers '0'", "--policy ''", + "--sleep '0'", ] expected_env_vars = { "DURATION": 0, @@ -467,6 +532,7 @@ class TestLoadConfig: "WRITERS": 0, "READERS": 0, "DELETERS": 0, + "READ_AGE": 0, "PREGEN_JSON": "", } @@ -482,6 +548,7 @@ class TestLoadConfig: "--workers '0'", "--containers '0'", "--policy ''", + "--sleep '0'", ] expected_env_vars = { "CONFIG_FILE": "", @@ -493,6 +560,7 @@ class TestLoadConfig: "WRITERS": 0, "READERS": 0, "DELETERS": 0, + "READ_AGE": 0, "PREGEN_JSON": "", } @@ -531,6 +599,27 @@ class TestLoadConfig: self._check_env_vars(load_params, expected_env_vars) + @pytest.mark.parametrize( + "load_params, load_type", + [(LoadScenario.gRPC, LoadType.gRPC)], + indirect=True, + ) + @pytest.mark.parametrize( + "load_time, expected_seconds", + [ + (300, 300), + ("2d3h45min", 186300), + ("1d6h", 108000), + ("1d", 86400), + ("1d1min", 86460), + ("2h", 7200), + ("2h2min", 7320), + ], + ) + def test_convert_time_to_seconds(self, load_params: LoadParams, load_time: str | int, expected_seconds: int): + load_params.load_time = load_time + assert load_params.load_time == expected_seconds + def _check_preset_params(self, load_params: LoadParams, expected_preset_args: list[str]): preset_parameters = load_params.get_preset_arguments() assert sorted(preset_parameters) == sorted(expected_preset_args) From be36a10f1e9e56a55843b8b653a39b7479a8eb38 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 12 Jan 2024 18:23:04 +0300 Subject: [PATCH 198/363] [#157] fix for dev-env and unit-tests Signed-off-by: Andrey Berezin --- src/frostfs_testlib/hosting/docker_host.py | 14 ++++++++------ src/frostfs_testlib/steps/epoch.py | 2 +- tests/test_dataclasses.py | 16 +++++----------- tests/test_hosting.py | 14 +++++--------- 4 files changed, 19 insertions(+), 27 deletions(-) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 05cd4b2..3c9883a 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -152,9 +152,7 @@ class DockerHost(Host): timeout=service_attributes.start_timeout, ) - def wait_for_service_to_be_in_state( - self, systemd_service_name: str, expected_state: str, timeout: int - ) -> None: + def wait_for_service_to_be_in_state(self, systemd_service_name: str, expected_state: str, timeout: int) -> None: raise NotImplementedError("Not implemented for docker") def get_data_directory(self, service_name: str) -> str: @@ -181,6 +179,12 @@ class DockerHost(Host): def delete_pilorama(self, service_name: str) -> None: raise NotImplementedError("Not implemented for docker") + def delete_file(self, file_path: str) -> None: + raise NotImplementedError("Not implemented for docker") + + def is_file_exist(self, file_path: str) -> None: + raise NotImplementedError("Not implemented for docker") + def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None: volume_path = self.get_data_directory(service_name) @@ -305,9 +309,7 @@ class DockerHost(Host): return container return None - def _wait_for_container_to_be_in_state( - self, container_name: str, expected_state: str, timeout: int - ) -> None: + def _wait_for_container_to_be_in_state(self, container_name: str, expected_state: str, timeout: int) -> None: iterations = 10 iteration_wait_time = timeout / iterations diff --git a/src/frostfs_testlib/steps/epoch.py b/src/frostfs_testlib/steps/epoch.py index 5a43ba3..ef8f85a 100644 --- a/src/frostfs_testlib/steps/epoch.py +++ b/src/frostfs_testlib/steps/epoch.py @@ -87,7 +87,7 @@ def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] alive_node = alive_node if alive_node else cluster.services(StorageNode)[0] remote_shell = alive_node.host.get_shell() - if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH: + if "force_transactions" not in alive_node.host.config.attributes: # If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests) frostfs_adm = FrostfsAdm( shell=remote_shell, diff --git a/tests/test_dataclasses.py b/tests/test_dataclasses.py index f1cc51e..19f3832 100644 --- a/tests/test_dataclasses.py +++ b/tests/test_dataclasses.py @@ -4,13 +4,7 @@ import pytest from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper from frostfs_testlib.storage.dataclasses.acl import EACLRole -from frostfs_testlib.storage.dataclasses.frostfs_services import ( - HTTPGate, - InnerRing, - MorphChain, - S3Gate, - StorageNode, -) +from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.object_size import ObjectSize @@ -22,10 +16,10 @@ class TestDataclassesStr: [ (Boto3ClientWrapper, "Boto3 client"), (AwsCliClient, "AWS CLI"), - (ObjectSize("simple", 1), "simple object size"), - (ObjectSize("simple", 10), "simple object size"), - (ObjectSize("complex", 5000), "complex object size"), - (ObjectSize("complex", 5555), "complex object size"), + (ObjectSize("simple", 1), "simple"), + (ObjectSize("simple", 10), "simple"), + (ObjectSize("complex", 5000), "complex"), + (ObjectSize("complex", 5555), "complex"), (StorageNode, "StorageNode"), (MorphChain, "MorphChain"), (S3Gate, "S3Gate"), diff --git a/tests/test_hosting.py b/tests/test_hosting.py index 14be8c5..39580cb 100644 --- a/tests/test_hosting.py +++ b/tests/test_hosting.py @@ -15,6 +15,7 @@ class TestHosting(TestCase): HOST1 = { "address": HOST1_ADDRESS, "plugin_name": HOST1_PLUGIN, + "healthcheck_plugin_name": "basic", "attributes": HOST1_ATTRIBUTES, "clis": HOST1_CLIS, "services": HOST1_SERVICES, @@ -32,6 +33,7 @@ class TestHosting(TestCase): HOST2 = { "address": HOST2_ADDRESS, "plugin_name": HOST2_PLUGIN, + "healthcheck_plugin_name": "basic", "attributes": HOST2_ATTRIBUTES, "clis": HOST2_CLIS, "services": HOST2_SERVICES, @@ -52,18 +54,14 @@ class TestHosting(TestCase): self.assertEqual(host1.config.plugin_name, self.HOST1_PLUGIN) self.assertDictEqual(host1.config.attributes, self.HOST1_ATTRIBUTES) self.assertListEqual(host1.config.clis, [CLIConfig(**cli) for cli in self.HOST1_CLIS]) - self.assertListEqual( - host1.config.services, [ServiceConfig(**service) for service in self.HOST1_SERVICES] - ) + self.assertListEqual(host1.config.services, [ServiceConfig(**service) for service in self.HOST1_SERVICES]) host2 = hosting.get_host_by_address(self.HOST2_ADDRESS) self.assertEqual(host2.config.address, self.HOST2_ADDRESS) self.assertEqual(host2.config.plugin_name, self.HOST2_PLUGIN) self.assertDictEqual(host2.config.attributes, self.HOST2_ATTRIBUTES) self.assertListEqual(host2.config.clis, [CLIConfig(**cli) for cli in self.HOST2_CLIS]) - self.assertListEqual( - host2.config.services, [ServiceConfig(**service) for service in self.HOST2_SERVICES] - ) + self.assertListEqual(host2.config.services, [ServiceConfig(**service) for service in self.HOST2_SERVICES]) def test_get_host_by_service(self): hosting = Hosting() @@ -104,9 +102,7 @@ class TestHosting(TestCase): services = hosting.find_service_configs(rf"^{self.SERVICE_NAME_PREFIX}") self.assertEqual(len(services), 2) for service in services: - self.assertEqual( - service.name[: len(self.SERVICE_NAME_PREFIX)], self.SERVICE_NAME_PREFIX - ) + self.assertEqual(service.name[: len(self.SERVICE_NAME_PREFIX)], self.SERVICE_NAME_PREFIX) service1 = hosting.find_service_configs(self.SERVICE1["name"]) self.assertEqual(len(service1), 1) From 40fa2c24cc159909d3b9fec391f56dffc4edc7da Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Fri, 12 Jan 2024 20:25:39 +0300 Subject: [PATCH 199/363] rename local_config_path --- src/frostfs_testlib/storage/constants.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index b1b7995..710262a 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -8,7 +8,7 @@ class ConfigAttributes: SHARD_CONFIG_PATH = "shard_config_path" LOGGER_CONFIG_PATH = "logger_config_path" LOCAL_WALLET_PATH = "local_wallet_path" - LOCAL_WALLET_CONFIG = "local_config_path" + LOCAL_WALLET_CONFIG = "local_wallet_config_path" ENDPOINT_DATA_0 = "endpoint_data0" ENDPOINT_DATA_1 = "endpoint_data1" ENDPOINT_INTERNAL = "endpoint_internal0" From c0a25ab699088a79c535aeded3b4150e922be5dd Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Thu, 18 Jan 2024 10:41:36 +0300 Subject: [PATCH 200/363] Support of custom version parameter instead of --version for all bins --- src/frostfs_testlib/utils/version_utils.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/utils/version_utils.py b/src/frostfs_testlib/utils/version_utils.py index 42bde6d..75ce8a5 100644 --- a/src/frostfs_testlib/utils/version_utils.py +++ b/src/frostfs_testlib/utils/version_utils.py @@ -40,16 +40,20 @@ def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]: for service_config in host.config.services: exec_path = service_config.attributes.get("exec_path") requires_check = service_config.attributes.get("requires_version_check", "true") + version_parameter = service_config.attributes.get("custom_version_parameter", "--version") if exec_path: binary_path_by_name[service_config.name] = { "exec_path": exec_path, "check": requires_check.lower() == "true", + "version_parameter": version_parameter, } for cli_config in host.config.clis: requires_check = cli_config.attributes.get("requires_version_check", "true") + version_parameter = service_config.attributes.get("custom_version_parameter", "--version") binary_path_by_name[cli_config.name] = { "exec_path": cli_config.exec_path, "check": requires_check.lower() == "true", + "version_parameter": version_parameter, } shell = host.get_shell() @@ -57,7 +61,7 @@ def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]: for binary_name, binary in binary_path_by_name.items(): try: binary_path = binary["exec_path"] - result = shell.exec(f"{binary_path} --version") + result = shell.exec(f"{binary_path} {binary['version_parameter']}") versions_at_host[binary_name] = {"version": _parse_version(result.stdout), "check": binary["check"]} except Exception as exc: logger.error(f"Cannot get version for {binary_path} because of\n{exc}") From 328e43fe674d16e90c162d1bd6cfb2fd463de012 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 22 Jan 2024 14:14:10 +0300 Subject: [PATCH 201/363] [#162] Refactor frostfs-cli functional Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/cli/frostfs_cli/container.py | 8 +------- src/frostfs_testlib/cli/frostfs_cli/object.py | 6 ++---- src/frostfs_testlib/defaults.py | 2 +- src/frostfs_testlib/resources/cli.py | 2 +- src/frostfs_testlib/steps/cli/container.py | 7 +++---- src/frostfs_testlib/steps/cli/object.py | 13 +++++++------ src/frostfs_testlib/storage/constants.py | 1 + .../storage/dataclasses/node_base.py | 15 +++++++++++++-- 8 files changed, 29 insertions(+), 25 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/container.py b/src/frostfs_testlib/cli/frostfs_cli/container.py index 5ea8ba8..374c880 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/container.py +++ b/src/frostfs_testlib/cli/frostfs_cli/container.py @@ -65,7 +65,6 @@ class FrostfsCliContainer(CliCommand): ttl: Optional[int] = None, xhdr: Optional[dict] = None, force: bool = False, - timeout: Optional[str] = None, ) -> CommandResult: """ Delete an existing container. @@ -81,7 +80,6 @@ class FrostfsCliContainer(CliCommand): ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. - timeout: Timeout for the operation (default 15s). Returns: Command's result. @@ -298,9 +296,5 @@ class FrostfsCliContainer(CliCommand): return self._execute( f"container nodes {from_str}", - **{ - param: value - for param, value in locals().items() - if param not in ["self", "from_file", "from_str"] - }, + **{param: value for param, value in locals().items() if param not in ["self", "from_file", "from_str"]}, ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py index 476af68..0e4654b 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/object.py +++ b/src/frostfs_testlib/cli/frostfs_cli/object.py @@ -124,9 +124,7 @@ class FrostfsCliObject(CliCommand): """ return self._execute( "object hash", - **{ - param: value for param, value in locals().items() if param not in ["self", "params"] - }, + **{param: value for param, value in locals().items() if param not in ["self", "params"]}, ) def head( @@ -355,8 +353,8 @@ class FrostfsCliObject(CliCommand): def nodes( self, rpc_endpoint: str, - wallet: str, cid: str, + wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, generate_key: Optional = None, diff --git a/src/frostfs_testlib/defaults.py b/src/frostfs_testlib/defaults.py index 687fbd6..22097be 100644 --- a/src/frostfs_testlib/defaults.py +++ b/src/frostfs_testlib/defaults.py @@ -1,5 +1,5 @@ class Options: - DEFAULT_SHELL_TIMEOUT = 90 + DEFAULT_SHELL_TIMEOUT = 120 @staticmethod def get_default_shell_timeout(): diff --git a/src/frostfs_testlib/resources/cli.py b/src/frostfs_testlib/resources/cli.py index 5f7d468..06a9832 100644 --- a/src/frostfs_testlib/resources/cli.py +++ b/src/frostfs_testlib/resources/cli.py @@ -9,4 +9,4 @@ FROSTFS_ADM_EXEC = os.getenv("FROSTFS_ADM_EXEC", "frostfs-adm") # Config for frostfs-adm utility. Optional if tests are running against devenv FROSTFS_ADM_CONFIG_PATH = os.getenv("FROSTFS_ADM_CONFIG_PATH") -CLI_DEFAULT_TIMEOUT = os.getenv("CLI_DEFAULT_TIMEOUT", None) +CLI_DEFAULT_TIMEOUT = os.getenv("CLI_DEFAULT_TIMEOUT", "100s") diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index b3afd88..3cc3f35 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -1,11 +1,12 @@ import json import logging import re -import requests from dataclasses import dataclass from time import sleep from typing import Optional, Union +import requests + from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC @@ -291,18 +292,17 @@ def delete_container( force: bool = False, session_token: Optional[str] = None, await_mode: bool = False, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, ) -> None: """ A wrapper for `frostfs-cli container delete` call. Args: + await_mode: Block execution until container is removed. wallet (str): path to a wallet on whose behalf we delete the container cid (str): ID of the container to delete shell: executor for cli command endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key force (bool): do not check whether container contains locks and remove immediately session_token: a path to session token file - timeout: Timeout for the operation. This function doesn't return anything. """ @@ -314,7 +314,6 @@ def delete_container( force=force, session=session_token, await_mode=await_mode, - timeout=timeout, ) diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index 803524a..610b76a 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -732,23 +732,24 @@ def neo_go_query_height(shell: Shell, endpoint: str) -> dict: @reporter.step("Search object nodes") def get_object_nodes( cluster: Cluster, - wallet: str, cid: str, oid: str, - shell: Shell, - endpoint: str, + alive_node: ClusterNode, bearer: str = "", xhdr: Optional[dict] = None, is_direct: bool = False, verify_presence_all: bool = False, - wallet_config: Optional[str] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, ) -> list[ClusterNode]: - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + shell = alive_node.host.get_shell() + endpoint = alive_node.storage_node.get_rpc_endpoint() + wallet = alive_node.storage_node.get_remote_wallet_path() + wallet_config = alive_node.storage_node.get_remote_wallet_config_path() + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config) result_object_nodes = cli.object.nodes( rpc_endpoint=endpoint, - wallet=wallet, cid=cid, oid=oid, bearer=bearer, diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 710262a..5b9d694 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -9,6 +9,7 @@ class ConfigAttributes: LOGGER_CONFIG_PATH = "logger_config_path" LOCAL_WALLET_PATH = "local_wallet_path" LOCAL_WALLET_CONFIG = "local_wallet_config_path" + REMOTE_WALLET_CONFIG = "remote_wallet_config_path" ENDPOINT_DATA_0 = "endpoint_data0" ENDPOINT_DATA_1 = "endpoint_data1" ENDPOINT_INTERNAL = "endpoint_internal0" diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index bf36665..72b12a9 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -114,6 +114,14 @@ class NodeBase(HumanReadableABC): ConfigAttributes.CONFIG_PATH, ) + def get_remote_wallet_config_path(self) -> str: + """ + Returns node config file path located on remote host + """ + return self._get_attribute( + ConfigAttributes.REMOTE_WALLET_CONFIG, + ) + def get_wallet_config_path(self) -> str: return self._get_attribute( ConfigAttributes.LOCAL_WALLET_CONFIG, @@ -125,8 +133,11 @@ class NodeBase(HumanReadableABC): Returns config path for logger located on remote host """ config_attributes = self.host.get_service_config(self.name) - return self._get_attribute( - ConfigAttributes.LOGGER_CONFIG_PATH) if ConfigAttributes.LOGGER_CONFIG_PATH in config_attributes.attributes else None + return ( + self._get_attribute(ConfigAttributes.LOGGER_CONFIG_PATH) + if ConfigAttributes.LOGGER_CONFIG_PATH in config_attributes.attributes + else None + ) @property def config_dir(self) -> str: From e04fac0770be267b0c2cf094d164c239d7a3be07 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 22 Jan 2024 19:06:38 +0300 Subject: [PATCH 202/363] [#164] Add local flag to preset in load Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/load_config.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 6f355fc..f072a4e 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -166,6 +166,9 @@ class Preset: # Flag to control preset erorrs ignore_errors: Optional[bool] = metadata_field(all_load_scenarios, "ignore-errors", None, False) + # Flag to ensure created containers store data on local endpoints + local: Optional[bool] = metadata_field(grpc_preset_scenarios, "local", None, False) + @dataclass class LoadParams: From 82f9df088a78b284299ce9deb6bec317d48a51ca Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 24 Jan 2024 15:23:26 +0300 Subject: [PATCH 203/363] [#167] Strip components for new xk6 archive and update unit tests Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/runners.py | 4 ++-- tests/test_load_config.py | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index dd6d50e..532c590 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -4,6 +4,7 @@ import math import re import time from dataclasses import fields +from threading import Event from typing import Optional from urllib.parse import urlparse @@ -30,7 +31,6 @@ from frostfs_testlib.testing import parallel, run_optionally from frostfs_testlib.testing.test_control import retry from frostfs_testlib.utils import datetime_utils from frostfs_testlib.utils.file_keeper import FileKeeper -from threading import Event class RunnerBase(ScenarioRunner): @@ -314,7 +314,7 @@ class LocalRunner(RunnerBase): with reporter.step("Download K6"): shell.exec(f"sudo rm -rf {k6_dir};sudo mkdir {k6_dir}") shell.exec(f"sudo curl -so {k6_dir}/k6.tar.gz {load_params.k6_url}") - shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz -C {k6_dir}") + shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz --strip-components 2 -C {k6_dir}") shell.exec(f"sudo chmod -R 777 {k6_dir}") with reporter.step("Create empty_passwd"): diff --git a/tests/test_load_config.py b/tests/test_load_config.py index f4fa022..834d051 100644 --- a/tests/test_load_config.py +++ b/tests/test_load_config.py @@ -143,6 +143,7 @@ class TestLoadConfig: "--policy 'container_placement_policy'", "--ignore-errors", "--sleep '19'", + "--local", ] expected_env_vars = { "DURATION": 9, @@ -172,6 +173,7 @@ class TestLoadConfig: "--policy 'container_placement_policy'", "--ignore-errors", "--sleep '19'", + "--local", ] expected_env_vars = { "DURATION": 9, @@ -304,6 +306,7 @@ class TestLoadConfig: @pytest.mark.parametrize("load_params", [LoadScenario.HTTP], indirect=True) def test_argument_parsing_for_http_scenario(self, load_params: LoadParams): + load_params.preset.local = False expected_preset_args = [ "--no-verify-ssl", "--size '11'", @@ -334,6 +337,7 @@ class TestLoadConfig: @pytest.mark.parametrize("load_params", [LoadScenario.LOCAL], indirect=True) def test_argument_parsing_for_local_scenario(self, load_params: LoadParams): + load_params.preset.local = False expected_preset_args = [ "--size '11'", "--preload_obj '13'", From 0d7a15877c17551f4f2a3dc00ce06ac46a8d6769 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 26 Jan 2024 15:29:02 +0300 Subject: [PATCH 204/363] [#169] Update metrics Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/load_metrics.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/frostfs_testlib/load/load_metrics.py b/src/frostfs_testlib/load/load_metrics.py index 5502b5c..2dad3f6 100644 --- a/src/frostfs_testlib/load/load_metrics.py +++ b/src/frostfs_testlib/load/load_metrics.py @@ -107,66 +107,66 @@ class DeleteOperationMetric(OperationMetric): class GrpcWriteOperationMetric(WriteOperationMetric): - _SUCCESS = "frostfs_obj_put_total" + _SUCCESS = "frostfs_obj_put_success" _ERRORS = "frostfs_obj_put_fails" _LATENCY = "frostfs_obj_put_duration" class GrpcReadOperationMetric(ReadOperationMetric): - _SUCCESS = "frostfs_obj_get_total" + _SUCCESS = "frostfs_obj_get_success" _ERRORS = "frostfs_obj_get_fails" _LATENCY = "frostfs_obj_get_duration" class GrpcDeleteOperationMetric(DeleteOperationMetric): - _SUCCESS = "frostfs_obj_delete_total" + _SUCCESS = "frostfs_obj_delete_success" _ERRORS = "frostfs_obj_delete_fails" _LATENCY = "frostfs_obj_delete_duration" class S3WriteOperationMetric(WriteOperationMetric): - _SUCCESS = "aws_obj_put_total" + _SUCCESS = "aws_obj_put_success" _ERRORS = "aws_obj_put_fails" _LATENCY = "aws_obj_put_duration" class S3ReadOperationMetric(ReadOperationMetric): - _SUCCESS = "aws_obj_get_total" + _SUCCESS = "aws_obj_get_success" _ERRORS = "aws_obj_get_fails" _LATENCY = "aws_obj_get_duration" class S3DeleteOperationMetric(DeleteOperationMetric): - _SUCCESS = "aws_obj_delete_total" + _SUCCESS = "aws_obj_delete_success" _ERRORS = "aws_obj_delete_fails" _LATENCY = "aws_obj_delete_duration" class S3LocalWriteOperationMetric(WriteOperationMetric): - _SUCCESS = "s3local_obj_put_total" + _SUCCESS = "s3local_obj_put_success" _ERRORS = "s3local_obj_put_fails" _LATENCY = "s3local_obj_put_duration" class S3LocalReadOperationMetric(ReadOperationMetric): - _SUCCESS = "s3local_obj_get_total" + _SUCCESS = "s3local_obj_get_success" _ERRORS = "s3local_obj_get_fails" _LATENCY = "s3local_obj_get_duration" class LocalWriteOperationMetric(WriteOperationMetric): - _SUCCESS = "local_obj_put_total" + _SUCCESS = "local_obj_put_success" _ERRORS = "local_obj_put_fails" _LATENCY = "local_obj_put_duration" class LocalReadOperationMetric(ReadOperationMetric): - _SUCCESS = "local_obj_get_total" + _SUCCESS = "local_obj_get_success" _ERRORS = "local_obj_get_fails" class LocalDeleteOperationMetric(DeleteOperationMetric): - _SUCCESS = "local_obj_delete_total" + _SUCCESS = "local_obj_delete_success" _ERRORS = "local_obj_delete_fails" From 6caa77dedfc0f4ecedc440c0e734bd5b695f5787 Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Wed, 31 Jan 2024 16:42:30 +0300 Subject: [PATCH 205/363] [#172] parallel get remote binaries versions --- src/frostfs_testlib/utils/version_utils.py | 100 +++++++++++++-------- 1 file changed, 61 insertions(+), 39 deletions(-) diff --git a/src/frostfs_testlib/utils/version_utils.py b/src/frostfs_testlib/utils/version_utils.py index 75ce8a5..2c1f4ab 100644 --- a/src/frostfs_testlib/utils/version_utils.py +++ b/src/frostfs_testlib/utils/version_utils.py @@ -2,10 +2,11 @@ import logging import re from frostfs_testlib.cli import FrostfsAdm, FrostfsCli -from frostfs_testlib.hosting import Hosting +from frostfs_testlib.hosting import Host, Hosting from frostfs_testlib.resources.cli import FROSTFS_ADM_EXEC, FROSTFS_AUTHMATE_EXEC, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell +from frostfs_testlib.testing.parallel import parallel logger = logging.getLogger("NeoLogger") @@ -33,53 +34,74 @@ def get_local_binaries_versions(shell: Shell) -> dict[str, str]: return versions +def parallel_binary_verions(host: Host) -> dict[str, str]: + versions_by_host = {} + + binary_path_by_name = {} # Maps binary name to executable path + for service_config in host.config.services: + exec_path = service_config.attributes.get("exec_path") + requires_check = service_config.attributes.get("requires_version_check", "true") + if exec_path: + binary_path_by_name[service_config.name] = { + "exec_path": exec_path, + "check": requires_check.lower() == "true", + } + for cli_config in host.config.clis: + requires_check = cli_config.attributes.get("requires_version_check", "true") + binary_path_by_name[cli_config.name] = { + "exec_path": cli_config.exec_path, + "check": requires_check.lower() == "true", + } + + shell = host.get_shell() + versions_at_host = {} + for binary_name, binary in binary_path_by_name.items(): + try: + binary_path = binary["exec_path"] + result = shell.exec(f"{binary_path} --version") + versions_at_host[binary_name] = {"version": _parse_version(result.stdout), "check": binary["check"]} + except Exception as exc: + logger.error(f"Cannot get version for {binary_path} because of\n{exc}") + versions_at_host[binary_name] = {"version": "Unknown", "check": binary["check"]} + versions_by_host[host.config.address] = versions_at_host + return versions_by_host + + def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]: versions_by_host = {} - for host in hosting.hosts: - binary_path_by_name = {} # Maps binary name to executable path - for service_config in host.config.services: - exec_path = service_config.attributes.get("exec_path") - requires_check = service_config.attributes.get("requires_version_check", "true") - version_parameter = service_config.attributes.get("custom_version_parameter", "--version") - if exec_path: - binary_path_by_name[service_config.name] = { - "exec_path": exec_path, - "check": requires_check.lower() == "true", - "version_parameter": version_parameter, - } - for cli_config in host.config.clis: - requires_check = cli_config.attributes.get("requires_version_check", "true") - version_parameter = service_config.attributes.get("custom_version_parameter", "--version") - binary_path_by_name[cli_config.name] = { - "exec_path": cli_config.exec_path, - "check": requires_check.lower() == "true", - "version_parameter": version_parameter, - } - - shell = host.get_shell() - versions_at_host = {} - for binary_name, binary in binary_path_by_name.items(): - try: - binary_path = binary["exec_path"] - result = shell.exec(f"{binary_path} {binary['version_parameter']}") - versions_at_host[binary_name] = {"version": _parse_version(result.stdout), "check": binary["check"]} - except Exception as exc: - logger.error(f"Cannot get version for {binary_path} because of\n{exc}") - versions_at_host[binary_name] = {"version": "Unknown", "check": binary["check"]} - versions_by_host[host.config.address] = versions_at_host + future_binary_verions = parallel(parallel_binary_verions, parallel_items=hosting.hosts) + for future in future_binary_verions: + versions_by_host.update(future.result()) # Consolidate versions across all hosts + cheak_versions = {} + exсeptions = [] + exception = set() + previous_host = None versions = {} + captured_version = None for host, binary_versions in versions_by_host.items(): for name, binary in binary_versions.items(): - captured_version = versions.get(name, {}).get("version") version = binary["version"] - if captured_version: - assert captured_version == version, f"Binary {name} has inconsistent version on host {host}" + if not cheak_versions.get(f'{name[:-2]}', None): + captured_version = cheak_versions.get(f'{name[:-2]}',{}).get(host, {}).get(captured_version) + cheak_versions[f'{name[:-2]}'] = {host: {version: name}} else: - versions[name] = {"version": version, "check": binary["check"]} - return versions - + captured_version = list(cheak_versions.get(f'{name[:-2]}',{}).get(previous_host).keys())[0] + cheak_versions[f'{name[:-2]}'].update({host:{version:name}}) + + if captured_version and captured_version != version: + exception.add(name[:-2]) + + versions[name] = {"version": version, "check": binary["check"]} + previous_host = host + if exception: + for i in exception: + for host in versions_by_host.keys(): + for version, name in cheak_versions.get(i).get(host).items(): + exсeptions.append(f'Binary {name} has inconsistent version {version} on host {host}') + exсeptions.append('\n') + return versions, exсeptions def _parse_version(version_output: str) -> str: version = re.search(r"version[:\s]*v?(.+)", version_output, re.IGNORECASE) From 8ba2cb80308cd588ca2fb360bfbcb7a231be7573 Mon Sep 17 00:00:00 2001 From: mkadilov Date: Wed, 31 Jan 2024 15:43:24 +0300 Subject: [PATCH 206/363] [#171] Components versions check Components versions check Signed-off-by: Mikhail Kadilov m.kadilov@yadro.com --- pyproject.toml | 10 +++++----- src/frostfs_testlib/storage/cluster.py | 8 ++++---- src/frostfs_testlib/storage/constants.py | 8 -------- 3 files changed, 9 insertions(+), 17 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 7d3e5b0..74a163e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,11 +51,11 @@ basic = "frostfs_testlib.healthcheck.basic_healthcheck:BasicHealthcheck" config = "frostfs_testlib.storage.controllers.state_managers.config_state_manager:ConfigStateManager" [project.entry-points."frostfs.testlib.services"] -s = "frostfs_testlib.storage.dataclasses.frostfs_services:StorageNode" -s3-gate = "frostfs_testlib.storage.dataclasses.frostfs_services:S3Gate" -http-gate = "frostfs_testlib.storage.dataclasses.frostfs_services:HTTPGate" -morph-chain = "frostfs_testlib.storage.dataclasses.frostfs_services:MorphChain" -ir = "frostfs_testlib.storage.dataclasses.frostfs_services:InnerRing" +frostfs-storage = "frostfs_testlib.storage.dataclasses.frostfs_services:StorageNode" +frostfs-s3 = "frostfs_testlib.storage.dataclasses.frostfs_services:S3Gate" +frostfs-http = "frostfs_testlib.storage.dataclasses.frostfs_services:HTTPGate" +neo-go = "frostfs_testlib.storage.dataclasses.frostfs_services:MorphChain" +frostfs-ir = "frostfs_testlib.storage.dataclasses.frostfs_services:InnerRing" [tool.isort] profile = "black" diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index 313215a..c867515 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -105,7 +105,7 @@ class ClusterNode: service_entry = self.class_registry.get_entry(service_type) service_name = service_entry["hosting_service_name"] - pattern = f"{service_name}{self.id:02}" + pattern = f"{service_name}_{self.id:02}" config = self.host.get_service_config(pattern) return service_type( @@ -120,7 +120,7 @@ class ClusterNode: svcs_names_on_node = [svc.name for svc in self.host.config.services] for entry in self.class_registry._class_mapping.values(): hosting_svc_name = entry["hosting_service_name"] - pattern = f"{hosting_svc_name}{self.id:02}" + pattern = f"{hosting_svc_name}_{self.id:02}" if pattern in svcs_names_on_node: config = self.host.get_service_config(pattern) svcs.append( @@ -267,13 +267,13 @@ class Cluster: service_name = service["hosting_service_name"] cls: type[NodeBase] = service["cls"] - pattern = f"{service_name}\d*$" + pattern = f"{service_name}_\d*$" configs = self.hosting.find_service_configs(pattern) found_nodes = [] for config in configs: # config.name is something like s3-gate01. Cut last digits to know service type - service_type = re.findall(".*\D", config.name)[0] + service_type = re.findall("(.*)_\d+", config.name)[0] # exclude unsupported services if service_type != service_name: continue diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 5b9d694..3d75988 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -18,11 +18,3 @@ class ConfigAttributes: UN_LOCODE = "un_locode" HTTP_HOSTNAME = "http_hostname" S3_HOSTNAME = "s3_hostname" - - -class _FrostfsServicesNames: - STORAGE = "s" - S3_GATE = "s3-gate" - HTTP_GATE = "http-gate" - MORPH_CHAIN = "morph-chain" - INNER_RING = "ir" From d79fd87ede254bfd483a25934dfd1c54c6c61201 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 5 Feb 2024 12:41:29 +0300 Subject: [PATCH 207/363] [#174] Add flag to remove registry file Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/load_config.py | 2 ++ src/frostfs_testlib/load/runners.py | 9 +++++++++ 2 files changed, 11 insertions(+) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index f072a4e..1932e69 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -235,6 +235,8 @@ class LoadParams: registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE", False) # In case if we want to use custom registry file left from another load run custom_registry: Optional[str] = None + # In case if we want to use custom registry file left from another load run + force_fresh_registry: Optional[bool] = None # Specifies the minimum duration of every single execution (i.e. iteration). # Any iterations that are shorter than this value will cause that VU to # sleep for the remainder of the time until the specified minimum duration is reached. diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index 532c590..d456270 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -78,6 +78,10 @@ class DefaultRunner(RunnerBase): nodes_under_load: list[ClusterNode], k6_dir: str, ): + if load_params.force_fresh_registry and load_params.custom_registry: + with reporter.step("Forcing fresh registry files"): + parallel(self._force_fresh_registry, self.loaders, load_params) + if load_params.load_type != LoadType.S3: return @@ -88,6 +92,11 @@ class DefaultRunner(RunnerBase): parallel(self._prepare_loader, self.loaders, load_params, grpc_peer, s3_public_keys, k6_dir) + def _force_fresh_registry(self, loader: Loader, load_params: LoadParams): + with reporter.step(f"Forcing fresh registry on {loader.ip}"): + shell = loader.get_shell() + shell.exec(f"rm -f {load_params.registry_file}") + def _prepare_loader( self, loader: Loader, From 4f3814690e750df0840274c7b4a6a1de0028ebbf Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 5 Feb 2024 18:49:45 +0300 Subject: [PATCH 208/363] [TrueCloudLab/xk6-frostfs#125] Add acl option Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/load_config.py | 2 ++ tests/test_load_config.py | 13 +++++++++++++ 2 files changed, 15 insertions(+) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 1932e69..532be16 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -147,6 +147,8 @@ class Preset: pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False) # Workers count for preset workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False) + # Acl for container/buckets + acl: Optional[str] = metadata_field(all_load_scenarios, "acl", None, False) # ------ GRPC ------ # Amount of containers which should be created diff --git a/tests/test_load_config.py b/tests/test_load_config.py index 834d051..8f28621 100644 --- a/tests/test_load_config.py +++ b/tests/test_load_config.py @@ -136,6 +136,7 @@ class TestLoadConfig: def test_argument_parsing_for_grpc_scenario(self, load_params: LoadParams): expected_preset_args = [ "--size '11'", + "--acl 'acl'", "--preload_obj '13'", "--out 'pregen_json'", "--workers '7'", @@ -174,6 +175,7 @@ class TestLoadConfig: "--ignore-errors", "--sleep '19'", "--local", + "--acl 'acl'", ] expected_env_vars = { "DURATION": 9, @@ -211,6 +213,7 @@ class TestLoadConfig: "--location 's3_location'", "--ignore-errors", "--sleep '19'", + "--acl 'acl'", ] expected_env_vars = { "DURATION": 9, @@ -242,6 +245,7 @@ class TestLoadConfig: "--location 's3_location'", "--ignore-errors", "--sleep '19'", + "--acl 'acl'", ] expected_env_vars = { "DURATION": 183900, @@ -279,6 +283,7 @@ class TestLoadConfig: "--location 's3_location'", "--ignore-errors", "--sleep '19'", + "--acl 'acl'", ] expected_env_vars = { "DURATION": 9, @@ -317,6 +322,7 @@ class TestLoadConfig: "--policy 'container_placement_policy'", "--ignore-errors", "--sleep '19'", + "--acl 'acl'", ] expected_env_vars = { "DURATION": 9, @@ -347,6 +353,7 @@ class TestLoadConfig: "--policy 'container_placement_policy'", "--ignore-errors", "--sleep '19'", + "--acl 'acl'", ] expected_env_vars = { "CONFIG_FILE": "config_file", @@ -399,6 +406,7 @@ class TestLoadConfig: "--containers '0'", "--policy ''", "--sleep '0'", + "--acl ''", ] expected_env_vars = { "DURATION": 0, @@ -427,6 +435,7 @@ class TestLoadConfig: "--containers '0'", "--policy ''", "--sleep '0'", + "--acl ''", ] expected_env_vars = { "DURATION": 0, @@ -462,6 +471,7 @@ class TestLoadConfig: "--buckets '0'", "--location ''", "--sleep '0'", + "--acl ''", ] expected_env_vars = { "DURATION": 0, @@ -490,6 +500,7 @@ class TestLoadConfig: "--buckets '0'", "--location ''", "--sleep '0'", + "--acl ''", ] expected_env_vars = { "DURATION": 0, @@ -525,6 +536,7 @@ class TestLoadConfig: "--containers '0'", "--policy ''", "--sleep '0'", + "--acl ''", ] expected_env_vars = { "DURATION": 0, @@ -553,6 +565,7 @@ class TestLoadConfig: "--containers '0'", "--policy ''", "--sleep '0'", + "--acl ''", ] expected_env_vars = { "CONFIG_FILE": "", From 751381cd60b909076a371d2d2973a5523a59ca1a Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 14 Feb 2024 16:16:59 +0300 Subject: [PATCH 209/363] Add GenericCli utility Signed-off-by: Andrey Berezin --- src/frostfs_testlib/cli/__init__.py | 1 + src/frostfs_testlib/cli/generic_cli.py | 30 ++++++++ src/frostfs_testlib/hosting/config.py | 5 +- src/frostfs_testlib/hosting/interfaces.py | 4 +- src/frostfs_testlib/steps/cli/container.py | 6 +- src/frostfs_testlib/steps/http/http_gate.py | 74 +++++++++---------- .../controllers/cluster_state_controller.py | 2 + .../storage/dataclasses/frostfs_services.py | 29 ++------ 8 files changed, 80 insertions(+), 71 deletions(-) create mode 100644 src/frostfs_testlib/cli/generic_cli.py diff --git a/src/frostfs_testlib/cli/__init__.py b/src/frostfs_testlib/cli/__init__.py index 3799be9..7e3d243 100644 --- a/src/frostfs_testlib/cli/__init__.py +++ b/src/frostfs_testlib/cli/__init__.py @@ -1,4 +1,5 @@ from frostfs_testlib.cli.frostfs_adm import FrostfsAdm from frostfs_testlib.cli.frostfs_authmate import FrostfsAuthmate from frostfs_testlib.cli.frostfs_cli import FrostfsCli +from frostfs_testlib.cli.generic_cli import GenericCli from frostfs_testlib.cli.neogo import NeoGo, NetworkType diff --git a/src/frostfs_testlib/cli/generic_cli.py b/src/frostfs_testlib/cli/generic_cli.py new file mode 100644 index 0000000..2a80159 --- /dev/null +++ b/src/frostfs_testlib/cli/generic_cli.py @@ -0,0 +1,30 @@ +from typing import Optional + +from frostfs_testlib.hosting.interfaces import Host +from frostfs_testlib.shell.interfaces import CommandOptions, Shell + + +class GenericCli(object): + def __init__(self, cli_name: str, host: Host) -> None: + self.host = host + self.cli_name = cli_name + + def __call__( + self, + args: Optional[str] = "", + pipes: Optional[str] = "", + shell: Optional[Shell] = None, + options: Optional[CommandOptions] = None, + ): + if not shell: + shell = self.host.get_shell() + + cli_config = self.host.get_cli_config(self.cli_name, True) + extra_args = "" + exec_path = self.cli_name + if cli_config: + extra_args = " ".join(cli_config.extra_args) + exec_path = cli_config.exec_path + + cmd = f"{exec_path} {args} {extra_args} {pipes}" + return shell.exec(cmd, options) diff --git a/src/frostfs_testlib/hosting/config.py b/src/frostfs_testlib/hosting/config.py index 4ab66d7..8b256cc 100644 --- a/src/frostfs_testlib/hosting/config.py +++ b/src/frostfs_testlib/hosting/config.py @@ -10,9 +10,7 @@ class ParsedAttributes: def parse(cls, attributes: dict[str, Any]): # Pick attributes supported by the class field_names = set(field.name for field in fields(cls)) - supported_attributes = { - key: value for key, value in attributes.items() if key in field_names - } + supported_attributes = {key: value for key, value in attributes.items() if key in field_names} return cls(**supported_attributes) @@ -29,6 +27,7 @@ class CLIConfig: name: str exec_path: str attributes: dict[str, str] = field(default_factory=dict) + extra_args: list[str] = field(default_factory=list) @dataclass diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 3b2d718..13051e2 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -54,7 +54,7 @@ class Host(ABC): raise ValueError(f"Unknown service name: '{service_name}'") return service_config - def get_cli_config(self, cli_name: str) -> CLIConfig: + def get_cli_config(self, cli_name: str, allow_empty: bool = False) -> CLIConfig: """Returns config of CLI tool with specified name. The CLI must be located on this host. @@ -66,7 +66,7 @@ class Host(ABC): Config of the CLI tool. """ cli_config = self._cli_config_by_name.get(cli_name) - if cli_config is None: + if cli_config is None and not allow_empty: raise ValueError(f"Unknown CLI name: '{cli_name}'") return cli_config diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index 3cc3f35..82ff407 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -8,7 +8,7 @@ from typing import Optional, Union import requests from frostfs_testlib import reporter -from frostfs_testlib.cli import FrostfsCli +from frostfs_testlib.cli import FrostfsCli, GenericCli from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell @@ -345,8 +345,8 @@ def _parse_cid(output: str) -> str: @reporter.step("Search container by name") def search_container_by_name(name: str, node: ClusterNode): - node_shell = node.host.get_shell() - output = node_shell.exec(f"curl -I HEAD http://127.0.0.1:8084/{name}") + curl = GenericCli("curl", node.host) + output = curl(f"-I http://127.0.0.1:8084/{name}") pattern = r"X-Container-Id: (\S+)" cid = re.findall(pattern, output.stdout) if cid: diff --git a/src/frostfs_testlib/steps/http/http_gate.py b/src/frostfs_testlib/steps/http/http_gate.py index a8c9899..3f4d838 100644 --- a/src/frostfs_testlib/steps/http/http_gate.py +++ b/src/frostfs_testlib/steps/http/http_gate.py @@ -11,13 +11,14 @@ from urllib.parse import quote_plus import requests from frostfs_testlib import reporter +from frostfs_testlib.cli import GenericCli from frostfs_testlib.resources.common import SIMPLE_OBJECT_SIZE from frostfs_testlib.s3.aws_cli_client import command_options from frostfs_testlib.shell import Shell from frostfs_testlib.shell.local_shell import LocalShell from frostfs_testlib.steps.cli.object import get_object from frostfs_testlib.steps.storage_policy import get_nodes_without_object -from frostfs_testlib.storage.cluster import StorageNode +from frostfs_testlib.storage.cluster import ClusterNode, StorageNode from frostfs_testlib.testing.test_control import retry from frostfs_testlib.utils.file_utils import get_file_hash @@ -31,8 +32,7 @@ local_shell = LocalShell() def get_via_http_gate( cid: str, oid: str, - endpoint: str, - http_hostname: str, + node: ClusterNode, request_path: Optional[str] = None, timeout: Optional[int] = 300, ): @@ -40,18 +40,19 @@ def get_via_http_gate( This function gets given object from HTTP gate cid: container id to get object from oid: object ID - endpoint: http gate endpoint - http_hostname: http host name on the node + node: node to make request request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}] """ # if `request_path` parameter omitted, use default if request_path is None: - request = f"{endpoint}/get/{cid}/{oid}" + request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}" else: - request = f"{endpoint}{request_path}" + request = f"{node.http_gate.get_endpoint()}{request_path}" - resp = requests.get(request, headers={"Host": http_hostname}, stream=True, timeout=timeout, verify=False) + resp = requests.get( + request, headers={"Host": node.storage_node.get_http_hostname()[0]}, stream=True, timeout=timeout, verify=False + ) if not resp.ok: raise Exception( @@ -72,15 +73,14 @@ def get_via_http_gate( @reporter.step("Get via Zip HTTP Gate") -def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str, http_hostname: str, timeout: Optional[int] = 300): +def get_via_zip_http_gate(cid: str, prefix: str, node: ClusterNode, timeout: Optional[int] = 300): """ This function gets given object from HTTP gate cid: container id to get object from prefix: common prefix - endpoint: http gate endpoint - http_hostname: http host name on the node + node: node to make request """ - request = f"{endpoint}/zip/{cid}/{prefix}" + request = f"{node.http_gate.get_endpoint()}/zip/{cid}/{prefix}" resp = requests.get(request, stream=True, timeout=timeout, verify=False) if not resp.ok: @@ -109,8 +109,7 @@ def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str, http_hostname: s def get_via_http_gate_by_attribute( cid: str, attribute: dict, - endpoint: str, - http_hostname: str, + node: ClusterNode, request_path: Optional[str] = None, timeout: Optional[int] = 300, ): @@ -126,11 +125,13 @@ def get_via_http_gate_by_attribute( attr_value = quote_plus(str(attribute.get(attr_name))) # if `request_path` parameter ommited, use default if request_path is None: - request = f"{endpoint}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}" + request = f"{node.http_gate.get_endpoint()}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}" else: - request = f"{endpoint}{request_path}" + request = f"{node.http_gate.get_endpoint()}{request_path}" - resp = requests.get(request, stream=True, timeout=timeout, verify=False, headers={"Host": http_hostname}) + resp = requests.get( + request, stream=True, timeout=timeout, verify=False, headers={"Host": node.storage_node.get_http_hostname()[0]} + ) if not resp.ok: raise Exception( @@ -247,19 +248,18 @@ def upload_via_http_gate_curl( @retry(max_attempts=3, sleep_interval=1) @reporter.step("Get via HTTP Gate using Curl") -def get_via_http_curl(cid: str, oid: str, endpoint: str, http_hostname: str) -> str: +def get_via_http_curl(cid: str, oid: str, node: ClusterNode) -> str: """ This function gets given object from HTTP gate using curl utility. cid: CID to get object from oid: object OID - endpoint: http gate endpoint - http_hostname: http host name of the node + node: node for request """ - request = f"{endpoint}/get/{cid}/{oid}" + request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}" file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}") - cmd = f'curl -k -H "Host: {http_hostname}" {request} > {file_path}' - local_shell.exec(cmd) + curl = GenericCli("curl", node.host) + curl(f'-k -H "Host: {node.storage_node.get_http_hostname()[0]}"', f"{request} > {file_path}", shell=local_shell) return file_path @@ -274,12 +274,11 @@ def _attach_allure_step(request: str, status_code: int, req_type="GET"): def try_to_get_object_and_expect_error( cid: str, oid: str, + node: ClusterNode, error_pattern: str, - endpoint: str, - http_hostname: str, ) -> None: try: - get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname) + get_via_http_gate(cid=cid, oid=oid, node=node) raise AssertionError(f"Expected error on getting object with cid: {cid}") except Exception as err: match = error_pattern.casefold() in str(err).casefold() @@ -292,13 +291,10 @@ def get_object_by_attr_and_verify_hashes( file_name: str, cid: str, attrs: dict, - endpoint: str, - http_hostname: str, + node: ClusterNode, ) -> None: - got_file_path_http = get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname) - got_file_path_http_attr = get_via_http_gate_by_attribute( - cid=cid, attribute=attrs, endpoint=endpoint, http_hostname=http_hostname - ) + got_file_path_http = get_via_http_gate(cid=cid, oid=oid, node=node) + got_file_path_http_attr = get_via_http_gate_by_attribute(cid=cid, attribute=attrs, node=node) assert_hashes_are_equal(file_name, got_file_path_http, got_file_path_http_attr) @@ -309,8 +305,7 @@ def verify_object_hash( cid: str, shell: Shell, nodes: list[StorageNode], - endpoint: str, - http_hostname: str, + request_node: ClusterNode, object_getter=None, ) -> None: @@ -336,7 +331,7 @@ def verify_object_hash( shell=shell, endpoint=random_node.get_rpc_endpoint(), ) - got_file_path_http = object_getter(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname) + got_file_path_http = object_getter(cid=cid, oid=oid, node=request_node) assert_hashes_are_equal(file_name, got_file_path, got_file_path_http) @@ -365,10 +360,9 @@ def attr_into_str_header_curl(attrs: dict) -> list: def try_to_get_object_via_passed_request_and_expect_error( cid: str, oid: str, + node: ClusterNode, error_pattern: str, - endpoint: str, http_request_path: str, - http_hostname: str, attrs: Optional[dict] = None, ) -> None: try: @@ -376,17 +370,15 @@ def try_to_get_object_via_passed_request_and_expect_error( get_via_http_gate( cid=cid, oid=oid, - endpoint=endpoint, + node=node, request_path=http_request_path, - http_hostname=http_hostname, ) else: get_via_http_gate_by_attribute( cid=cid, attribute=attrs, - endpoint=endpoint, + node=node, request_path=http_request_path, - http_hostname=http_hostname, ) raise AssertionError(f"Expected error on getting object with cid: {cid}") except Exception as err: diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index f51be78..69df675 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -326,6 +326,8 @@ class ClusterStateController: @reporter.step("Restore blocked nodes") def restore_all_traffic(self): + if not self.dropped_traffic: + return parallel(self._restore_traffic_to_node, self.dropped_traffic) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index 33e7894..ddc650a 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -5,6 +5,7 @@ from frostfs_testlib.storage.constants import ConfigAttributes from frostfs_testlib.storage.dataclasses.node_base import NodeBase from frostfs_testlib.storage.dataclasses.shard import Shard + class InnerRing(NodeBase): """ Class represents inner ring node in a cluster @@ -17,11 +18,7 @@ class InnerRing(NodeBase): def service_healthcheck(self) -> bool: health_metric = "frostfs_ir_ir_health" - output = ( - self.host.get_shell() - .exec(f"curl -s localhost:6662 | grep {health_metric} | sed 1,2d") - .stdout - ) + output = self.host.get_shell().exec(f"curl -s localhost:6662 | grep {health_metric} | sed 1,2d").stdout return health_metric in output def get_netmap_cleaner_threshold(self) -> str: @@ -50,11 +47,7 @@ class S3Gate(NodeBase): def service_healthcheck(self) -> bool: health_metric = "frostfs_s3_gw_state_health" - output = ( - self.host.get_shell() - .exec(f"curl -s localhost:8086 | grep {health_metric} | sed 1,2d") - .stdout - ) + output = self.host.get_shell().exec(f"curl -s localhost:8086 | grep {health_metric} | sed 1,2d").stdout return health_metric in output @property @@ -72,11 +65,7 @@ class HTTPGate(NodeBase): def service_healthcheck(self) -> bool: health_metric = "frostfs_http_gw_state_health" - output = ( - self.host.get_shell() - .exec(f"curl -s localhost:5662 | grep {health_metric} | sed 1,2d") - .stdout - ) + output = self.host.get_shell().exec(f"curl -s localhost:5662 | grep {health_metric} | sed 1,2d").stdout return health_metric in output @property @@ -135,11 +124,7 @@ class StorageNode(NodeBase): def service_healthcheck(self) -> bool: health_metric = "frostfs_node_state_health" - output = ( - self.host.get_shell() - .exec(f"curl -s localhost:6672 | grep {health_metric} | sed 1,2d") - .stdout - ) + output = self.host.get_shell().exec(f"curl -s localhost:6672 | grep {health_metric} | sed 1,2d").stdout return health_metric in output def get_shard_config_path(self) -> str: @@ -174,10 +159,10 @@ class StorageNode(NodeBase): def get_storage_config(self) -> str: return self.host.get_storage_config(self.name) - def get_http_hostname(self) -> str: + def get_http_hostname(self) -> list[str]: return self._get_attribute(ConfigAttributes.HTTP_HOSTNAME) - def get_s3_hostname(self) -> str: + def get_s3_hostname(self) -> list[str]: return self._get_attribute(ConfigAttributes.S3_HOSTNAME) def delete_blobovnicza(self): From 55cebc042c49a59f699136d9258be6682ef0fbff Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 19 Feb 2024 17:48:09 +0300 Subject: [PATCH 210/363] [#183] Read all configuration files for service config Signed-off-by: Andrey Berezin --- src/frostfs_testlib/storage/cluster.py | 5 +- .../configuration/service_configuration.py | 75 ++++++++++++------- .../storage/dataclasses/frostfs_services.py | 18 ++--- .../storage/dataclasses/node_base.py | 9 ++- .../storage/dataclasses/shard.py | 13 +--- 5 files changed, 68 insertions(+), 52 deletions(-) diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index c867515..23130cb 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -9,7 +9,6 @@ from frostfs_testlib.hosting import Host, Hosting from frostfs_testlib.hosting.config import ServiceConfig from frostfs_testlib.storage import get_service_registry from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml -from frostfs_testlib.storage.configuration.service_configuration import ServiceConfiguration from frostfs_testlib.storage.constants import ConfigAttributes from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass @@ -72,6 +71,7 @@ class ClusterNode: def s3_gate(self) -> S3Gate: return self.service(S3Gate) + # TODO: Deprecated. Use config with ServiceConfigurationYml interface def get_config(self, config_file_path: str) -> dict: shell = self.host.get_shell() @@ -81,6 +81,7 @@ class ClusterNode: config = yaml.safe_load(config_text) return config + # TODO: Deprecated. Use config with ServiceConfigurationYml interface def save_config(self, new_config: dict, config_file_path: str) -> None: shell = self.host.get_shell() @@ -88,7 +89,7 @@ class ClusterNode: shell.exec(f"echo '{config_str}' | sudo tee {config_file_path}") def config(self, service_type: type[ServiceClass]) -> ServiceConfigurationYml: - return ServiceConfiguration(self.service(service_type)) + return self.service(service_type).config def service(self, service_type: type[ServiceClass]) -> ServiceClass: """ diff --git a/src/frostfs_testlib/storage/configuration/service_configuration.py b/src/frostfs_testlib/storage/configuration/service_configuration.py index f7b3be7..fddd64a 100644 --- a/src/frostfs_testlib/storage/configuration/service_configuration.py +++ b/src/frostfs_testlib/storage/configuration/service_configuration.py @@ -5,51 +5,74 @@ from typing import Any import yaml from frostfs_testlib import reporter -from frostfs_testlib.shell.interfaces import CommandOptions +from frostfs_testlib.shell.interfaces import CommandOptions, Shell from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml -from frostfs_testlib.storage.dataclasses.node_base import ServiceClass + + +def extend_dict(extend_me: dict, extend_by: dict): + if isinstance(extend_by, dict): + for k, v in extend_by.items(): + if k in extend_me: + extend_dict(extend_me.get(k), v) + else: + extend_me[k] = v + else: + extend_me += extend_by class ServiceConfiguration(ServiceConfigurationYml): - def __init__(self, service: "ServiceClass") -> None: - self.service = service - self.shell = self.service.host.get_shell() - self.confd_path = os.path.join(self.service.config_dir, "conf.d") + def __init__(self, service_name: str, shell: Shell, config_dir: str, main_config_path: str) -> None: + self.service_name = service_name + self.shell = shell + self.main_config_path = main_config_path + self.confd_path = os.path.join(config_dir, "conf.d") self.custom_file = os.path.join(self.confd_path, "99_changes.yml") def _path_exists(self, path: str) -> bool: return not self.shell.exec(f"test -e {path}", options=CommandOptions(check=False)).return_code - def _get_data_from_file(self, path: str) -> dict: - content = self.shell.exec(f"cat {path}").stdout - data = yaml.safe_load(content) - return data + def _get_config_files(self): + config_files = [self.main_config_path] - def get(self, key: str) -> str: - with reporter.step(f"Get {key} configuration value for {self.service}"): - config_files = [self.service.main_config_path] + if self._path_exists(self.confd_path): + files = self.shell.exec(f"find {self.confd_path} -type f").stdout.strip().split() + # Sorting files in backwards order from latest to first one + config_files.extend(sorted(files, key=lambda x: -int(re.findall("^\d+", os.path.basename(x))[0]))) - if self._path_exists(self.confd_path): - files = self.shell.exec(f"find {self.confd_path} -type f").stdout.strip().split() - # Sorting files in backwards order from latest to first one - config_files.extend(sorted(files, key=lambda x: -int(re.findall("^\d+", os.path.basename(x))[0]))) + return config_files - result = None - for file in files: - data = self._get_data_from_file(file) - result = self._find_option(key, data) - if result is not None: - break + def _get_configuration(self, config_files: list[str]) -> dict: + if not config_files: + return [{}] + splitter = "+++++" + files_str = " ".join(config_files) + all_content = self.shell.exec( + f"echo Getting config files; for file in {files_str}; do (echo {splitter}; sudo cat ${{file}}); done" + ).stdout + files_content = all_content.split("+++++")[1:] + files_data = [yaml.safe_load(file_content) for file_content in files_content] + + mergedData = {} + for data in files_data: + extend_dict(mergedData, data) + + return mergedData + + def get(self, key: str) -> str | Any: + with reporter.step(f"Get {key} configuration value for {self.service_name}"): + config_files = self._get_config_files() + configuration = self._get_configuration(config_files) + result = self._find_option(key, configuration) return result def set(self, values: dict[str, Any]): - with reporter.step(f"Change configuration for {self.service}"): + with reporter.step(f"Change configuration for {self.service_name}"): if not self._path_exists(self.confd_path): self.shell.exec(f"mkdir {self.confd_path}") if self._path_exists(self.custom_file): - data = self._get_data_from_file(self.custom_file) + data = self._get_configuration([self.custom_file]) else: data = {} @@ -61,5 +84,5 @@ class ServiceConfiguration(ServiceConfigurationYml): self.shell.exec(f"chmod 777 {self.custom_file}") def revert(self): - with reporter.step(f"Revert changed options for {self.service}"): + with reporter.step(f"Revert changed options for {self.service_name}"): self.shell.exec(f"rm -rf {self.custom_file}") diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index ddc650a..9e671d5 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -127,25 +127,23 @@ class StorageNode(NodeBase): output = self.host.get_shell().exec(f"curl -s localhost:6672 | grep {health_metric} | sed 1,2d").stdout return health_metric in output + # TODO: Deprecated. Use new approach with config def get_shard_config_path(self) -> str: return self._get_attribute(ConfigAttributes.SHARD_CONFIG_PATH) + # TODO: Deprecated. Use new approach with config def get_shards_config(self) -> tuple[str, dict]: return self.get_config(self.get_shard_config_path()) def get_shards(self) -> list[Shard]: - config = self.get_shards_config()[1] - config["storage"]["shard"].pop("default") - return [Shard.from_object(shard) for shard in config["storage"]["shard"].values()] + shards = self.config.get("storage:shard") - def get_shards_from_env(self) -> list[Shard]: - config = self.get_shards_config()[1] - configObj = ConfigObj(StringIO(config)) + if not shards: + raise RuntimeError(f"Cannot get shards information for {self.name} on {self.host.config.address}") - pattern = f"{SHARD_PREFIX}\d*" - num_shards = len(set(re.findall(pattern, self.get_shards_config()))) - - return [Shard.from_config_object(configObj, shard_id) for shard_id in range(num_shards)] + if "default" in shards: + shards.pop("default") + return [Shard.from_object(shard) for shard in shards.values()] def get_control_endpoint(self) -> str: return self._get_attribute(ConfigAttributes.CONTROL_ENDPOINT) diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 72b12a9..8291345 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -10,6 +10,7 @@ from frostfs_testlib import reporter from frostfs_testlib.hosting.config import ServiceConfig from frostfs_testlib.hosting.interfaces import Host from frostfs_testlib.shell.interfaces import CommandResult +from frostfs_testlib.storage.configuration.service_configuration import ServiceConfiguration, ServiceConfigurationYml from frostfs_testlib.storage.constants import ConfigAttributes from frostfs_testlib.testing.readable import HumanReadableABC from frostfs_testlib.utils import wallet_utils @@ -147,7 +148,11 @@ class NodeBase(HumanReadableABC): def main_config_path(self) -> str: return self._get_attribute(ConfigAttributes.CONFIG_PATH) - # TODO: Deprecated + @property + def config(self) -> ServiceConfigurationYml: + return ServiceConfiguration(self.name, self.host.get_shell(), self.config_dir, self.main_config_path) + + # TODO: Deprecated. Use config with ServiceConfigurationYml interface def get_config(self, config_file_path: Optional[str] = None) -> tuple[str, dict]: if config_file_path is None: config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH) @@ -160,7 +165,7 @@ class NodeBase(HumanReadableABC): config = yaml.safe_load(config_text) return config_file_path, config - # TODO: Deprecated + # TODO: Deprecated. Use config with ServiceConfigurationYml interface def save_config(self, new_config: dict, config_file_path: Optional[str] = None) -> None: if config_file_path is None: config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH) diff --git a/src/frostfs_testlib/storage/dataclasses/shard.py b/src/frostfs_testlib/storage/dataclasses/shard.py index 584138d..170a477 100644 --- a/src/frostfs_testlib/storage/dataclasses/shard.py +++ b/src/frostfs_testlib/storage/dataclasses/shard.py @@ -1,16 +1,6 @@ -import json -import pathlib -import re from dataclasses import dataclass -from io import StringIO -import allure -import pytest -import yaml from configobj import ConfigObj -from frostfs_testlib.cli import FrostfsCli -from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT -from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG SHARD_PREFIX = "FROSTFS_STORAGE_SHARD_" BLOBSTOR_PREFIX = "_BLOBSTOR_" @@ -94,6 +84,5 @@ class Shard: blobstor=[Blobstor(path=blobstor["path"], path_type=blobstor["type"]) for blobstor in shard["blobstor"]], metabase=metabase, writecache=writecache, - pilorama=pilorama + pilorama=pilorama, ) - From 273f0d13a52c63f45eda5a4c92df01b8ce76a309 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 20 Feb 2024 13:27:45 +0300 Subject: [PATCH 211/363] [#184] Add streaming param Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/load_config.py | 2 ++ tests/test_load_config.py | 13 +++++++++++++ 2 files changed, 15 insertions(+) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 532be16..7bde399 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -221,6 +221,8 @@ class LoadParams: ) # Percentage of filling of all data disks on all nodes fill_percent: Optional[float] = None + # if set, the payload is generated on the fly and is not read into memory fully. + streaming: Optional[int] = metadata_field(all_load_scenarios, None, "STREAMING", False) # ------- COMMON SCENARIO PARAMS ------- # Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value. diff --git a/tests/test_load_config.py b/tests/test_load_config.py index 8f28621..dc019b7 100644 --- a/tests/test_load_config.py +++ b/tests/test_load_config.py @@ -156,6 +156,7 @@ class TestLoadConfig: "READERS": 7, "DELETERS": 8, "READ_AGE": 8, + "STREAMING": 9, "PREGEN_JSON": "pregen_json", "PREPARE_LOCALLY": True, } @@ -195,6 +196,7 @@ class TestLoadConfig: "READ_RATE": 9, "READ_AGE": 8, "DELETE_RATE": 11, + "STREAMING": 9, "PREPARE_LOCALLY": True, } @@ -225,6 +227,7 @@ class TestLoadConfig: "READERS": 7, "DELETERS": 8, "READ_AGE": 8, + "STREAMING": 9, "NO_VERIFY_SSL": True, "PREGEN_JSON": "pregen_json", } @@ -265,6 +268,7 @@ class TestLoadConfig: "WRITE_RATE": 10, "READ_RATE": 9, "READ_AGE": 8, + "STREAMING": 9, "DELETE_RATE": 11, } @@ -303,6 +307,7 @@ class TestLoadConfig: "WRITE_RATE": 10, "READ_RATE": 9, "READ_AGE": 8, + "STREAMING": 9, "DELETE_RATE": 11, } @@ -335,6 +340,7 @@ class TestLoadConfig: "READERS": 7, "DELETERS": 8, "READ_AGE": 8, + "STREAMING": 9, "PREGEN_JSON": "pregen_json", } @@ -366,6 +372,7 @@ class TestLoadConfig: "READERS": 7, "DELETERS": 8, "READ_AGE": 8, + "STREAMING": 9, "PREGEN_JSON": "pregen_json", } @@ -418,6 +425,7 @@ class TestLoadConfig: "READERS": 0, "DELETERS": 0, "READ_AGE": 0, + "STREAMING": 0, "PREGEN_JSON": "", "PREPARE_LOCALLY": False, } @@ -455,6 +463,7 @@ class TestLoadConfig: "READ_RATE": 0, "DELETE_RATE": 0, "READ_AGE": 0, + "STREAMING": 0, "PREPARE_LOCALLY": False, } @@ -483,6 +492,7 @@ class TestLoadConfig: "READERS": 0, "DELETERS": 0, "READ_AGE": 0, + "STREAMING": 0, "NO_VERIFY_SSL": False, "PREGEN_JSON": "", } @@ -521,6 +531,7 @@ class TestLoadConfig: "READ_RATE": 0, "DELETE_RATE": 0, "READ_AGE": 0, + "STREAMING": 0, } self._check_preset_params(load_params, expected_preset_args) @@ -549,6 +560,7 @@ class TestLoadConfig: "READERS": 0, "DELETERS": 0, "READ_AGE": 0, + "STREAMING": 0, "PREGEN_JSON": "", } @@ -578,6 +590,7 @@ class TestLoadConfig: "READERS": 0, "DELETERS": 0, "READ_AGE": 0, + "STREAMING": 0, "PREGEN_JSON": "", } From 3fc3eaadf32eeaf6af65e4ff673582ad9412cb8c Mon Sep 17 00:00:00 2001 From: mkadilov Date: Mon, 19 Feb 2024 13:01:29 +0300 Subject: [PATCH 212/363] [#182] Refactoring old functions for FrostfsCli Refactoring old functions for FrostfsCli Signed-off-by: Mikhail Kadilov m.kadilov@yadro.com --- .../cli/frostfs_cli/control.py | 29 +++- src/frostfs_testlib/cli/frostfs_cli/object.py | 2 +- src/frostfs_testlib/cli/frostfs_cli/shards.py | 10 +- src/frostfs_testlib/steps/node_management.py | 137 +++++++++++------- .../storage/controllers/shards_watcher.py | 6 +- 5 files changed, 121 insertions(+), 63 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/control.py b/src/frostfs_testlib/cli/frostfs_cli/control.py index bfcd6ec..2cddfdf 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/control.py +++ b/src/frostfs_testlib/cli/frostfs_cli/control.py @@ -39,14 +39,12 @@ class FrostfsCliControl(CliCommand): address: Optional[str] = None, timeout: Optional[str] = None, ) -> CommandResult: - """Set status of the storage node in FrostFS network map + """Health check for FrostFS storage nodes Args: wallet: Path to the wallet or binary key address: Address of wallet account endpoint: Remote node control address (as 'multiaddr' or ':') - force: Force turning to local maintenance - status: New netmap status keyword ('online', 'offline', 'maintenance') timeout: Timeout for an operation (default 15s) Returns: @@ -56,3 +54,28 @@ class FrostfsCliControl(CliCommand): "control healthcheck", **{param: value for param, value in locals().items() if param not in ["self"]}, ) + + def drop_objects( + self, + endpoint: str, + objects: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + wallet: Path to the wallet or binary key + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + objects: List of object addresses to be removed in string format + timeout: Timeout for an operation (default 15s) + + Returns: + Command`s result. + """ + return self._execute( + "control drop-objects", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) \ No newline at end of file diff --git a/src/frostfs_testlib/cli/frostfs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py index 0e4654b..38a69e4 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/object.py +++ b/src/frostfs_testlib/cli/frostfs_cli/object.py @@ -357,7 +357,7 @@ class FrostfsCliObject(CliCommand): wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, - generate_key: Optional = None, + generate_key: Optional[bool] = None, oid: Optional[str] = None, trace: bool = False, root: bool = False, diff --git a/src/frostfs_testlib/cli/frostfs_cli/shards.py b/src/frostfs_testlib/cli/frostfs_cli/shards.py index 1727249..4399b13 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/shards.py +++ b/src/frostfs_testlib/cli/frostfs_cli/shards.py @@ -39,10 +39,10 @@ class FrostfsCliShards(CliCommand): def set_mode( self, endpoint: str, - wallet: str, - wallet_password: str, mode: str, id: Optional[list[str]], + wallet: Optional[str] = None, + wallet_password: Optional[str] = None, address: Optional[str] = None, all: bool = False, clear_errors: bool = False, @@ -65,6 +65,11 @@ class FrostfsCliShards(CliCommand): Returns: Command's result. """ + if not wallet_password: + return self._execute( + "control shards set-mode", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) return self._execute_with_password( "control shards set-mode", wallet_password, @@ -137,3 +142,4 @@ class FrostfsCliShards(CliCommand): wallet_password, **{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]}, ) + diff --git a/src/frostfs_testlib/steps/node_management.py b/src/frostfs_testlib/steps/node_management.py index 28e3820..dd38279 100644 --- a/src/frostfs_testlib/steps/node_management.py +++ b/src/frostfs_testlib/steps/node_management.py @@ -13,7 +13,6 @@ from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.shell import Shell from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align from frostfs_testlib.storage.cluster import Cluster, StorageNode -from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate from frostfs_testlib.utils import datetime_utils logger = logging.getLogger("NeoLogger") @@ -52,9 +51,24 @@ def storage_node_healthcheck(node: StorageNode) -> HealthStatus: Returns: health status as HealthStatus object. """ - command = "control healthcheck" - output = _run_control_command_with_retries(node, command) - return HealthStatus.from_stdout(output) + + host = node.host + service_config = host.get_service_config(node.name) + wallet_path = service_config.attributes["wallet_path"] + wallet_password = service_config.attributes["wallet_password"] + control_endpoint = service_config.attributes["control_endpoint"] + + shell = host.get_shell() + wallet_config_path = f"/tmp/{node.name}-config.yaml" + wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' + shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") + + cli_config = host.get_cli_config("frostfs-cli") + + cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) + result = cli.control.healthcheck(control_endpoint) + + return HealthStatus.from_stdout(result.stdout) @reporter.step("Set status for {node}") @@ -66,8 +80,21 @@ def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) -> status: online or offline. retries (optional, int): number of retry attempts if it didn't work from the first time """ - command = f"control set-status --status {status}" - _run_control_command_with_retries(node, command, retries) + host = node.host + service_config = host.get_service_config(node.name) + wallet_path = service_config.attributes["wallet_path"] + wallet_password = service_config.attributes["wallet_password"] + control_endpoint = service_config.attributes["control_endpoint"] + + shell = host.get_shell() + wallet_config_path = f"/tmp/{node.name}-config.yaml" + wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' + shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") + + cli_config = host.get_cli_config("frostfs-cli") + + cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) + cli.control.set_status(control_endpoint, status) @reporter.step("Get netmap snapshot") @@ -91,7 +118,7 @@ def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str: @reporter.step("Get shard list for {node}") -def node_shard_list(node: StorageNode) -> list[str]: +def node_shard_list(node: StorageNode, json: Optional[bool] = None) -> list[str]: """ The function returns list of shards for specified storage node. Args: @@ -99,31 +126,72 @@ def node_shard_list(node: StorageNode) -> list[str]: Returns: list of shards. """ - command = "control shards list" - output = _run_control_command_with_retries(node, command) - return re.findall(r"Shard (.*):", output) + host = node.host + service_config = host.get_service_config(node.name) + wallet_path = service_config.attributes["wallet_path"] + wallet_password = service_config.attributes["wallet_password"] + control_endpoint = service_config.attributes["control_endpoint"] + + shell = host.get_shell() + wallet_config_path = f"/tmp/{node.name}-config.yaml" + wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' + shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") + + cli_config = host.get_cli_config("frostfs-cli") + + cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) + result = cli.shards.list(endpoint=control_endpoint, json_mode=json) + + return re.findall(r"Shard (.*):", result.stdout) @reporter.step("Shard set for {node}") -def node_shard_set_mode(node: StorageNode, shard: str, mode: str) -> str: +def node_shard_set_mode(node: StorageNode, shard: list[str], mode: str) -> None: """ The function sets mode for specified shard. Args: node: node on which shard mode should be set. """ - command = f"control shards set-mode --id {shard} --mode {mode}" - return _run_control_command_with_retries(node, command) + host = node.host + service_config = host.get_service_config(node.name) + wallet_path = service_config.attributes["wallet_path"] + wallet_password = service_config.attributes["wallet_password"] + control_endpoint = service_config.attributes["control_endpoint"] + + shell = host.get_shell() + wallet_config_path = f"/tmp/{node.name}-config.yaml" + wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' + shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") + + cli_config = host.get_cli_config("frostfs-cli") + + cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) + cli.shards.set_mode(endpoint=control_endpoint, mode=mode, id=shard) @reporter.step("Drop object from {node}") -def drop_object(node: StorageNode, cid: str, oid: str) -> str: +def drop_object(node: StorageNode, cid: str, oid: str) -> None: """ The function drops object from specified node. Args: - node_id str: node from which object should be dropped. + node: node from which object should be dropped. """ - command = f"control drop-objects -o {cid}/{oid}" - return _run_control_command_with_retries(node, command) + host = node.host + service_config = host.get_service_config(node.name) + wallet_path = service_config.attributes["wallet_path"] + wallet_password = service_config.attributes["wallet_password"] + control_endpoint = service_config.attributes["control_endpoint"] + + shell = host.get_shell() + wallet_config_path = f"/tmp/{node.name}-config.yaml" + wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' + shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") + + cli_config = host.get_cli_config("frostfs-cli") + + cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) + objects = f"{cid}/{oid}" + cli.control.drop_objects(control_endpoint, objects) @reporter.step("Delete data from host for node {node}") @@ -238,38 +306,3 @@ def remove_nodes_from_map_morph( config_file=FROSTFS_ADM_CONFIG_PATH, ) frostfsadm.morph.remove_nodes(node_netmap_keys) - - -def _run_control_command_with_retries(node: StorageNode, command: str, retries: int = 0) -> str: - for attempt in range(1 + retries): # original attempt + specified retries - try: - return _run_control_command(node, command) - except AssertionError as err: - if attempt < retries: - logger.warning(f"Command {command} failed with error {err} and will be retried") - continue - raise AssertionError(f"Command {command} failed with error {err}") from err - - -def _run_control_command(node: StorageNode, command: str) -> None: - host = node.host - - service_config = host.get_service_config(node.name) - wallet_path = service_config.attributes["wallet_path"] - wallet_password = service_config.attributes["wallet_password"] - control_endpoint = service_config.attributes["control_endpoint"] - - shell = host.get_shell() - wallet_config_path = f"/tmp/{node.name}-config.yaml" - wallet_config = f'password: "{wallet_password}"' - shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") - - cli_config = host.get_cli_config("frostfs-cli") - - # TODO: implement cli.control - # cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path) - result = shell.exec( - f"{cli_config.exec_path} {command} --endpoint {control_endpoint} " - f"--wallet {wallet_path} --config {wallet_config_path}" - ) - return result.stdout diff --git a/src/frostfs_testlib/storage/controllers/shards_watcher.py b/src/frostfs_testlib/storage/controllers/shards_watcher.py index 95a419e..ad07ff4 100644 --- a/src/frostfs_testlib/storage/controllers/shards_watcher.py +++ b/src/frostfs_testlib/storage/controllers/shards_watcher.py @@ -97,8 +97,6 @@ class ShardsWatcher: response = shards_cli.list( endpoint=self.storage_node.get_control_endpoint(), - wallet=self.storage_node.get_remote_wallet_path(), - wallet_password=self.storage_node.get_wallet_password(), json_mode=True, ) @@ -110,9 +108,7 @@ class ShardsWatcher: self.storage_node.host.get_cli_config("frostfs-cli").exec_path, ) return shards_cli.set_mode( - self.storage_node.get_control_endpoint(), - self.storage_node.get_remote_wallet_path(), - self.storage_node.get_wallet_password(), + endpoint=self.storage_node.get_control_endpoint(), mode=mode, id=[shard_id], clear_errors=clear_errors, From f5a7ff5c90e4c67c394881f7405e57104d464aef Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 21 Feb 2024 17:59:15 +0300 Subject: [PATCH 213/363] [#185] Add prometheus load parameters --- src/frostfs_testlib/load/k6.py | 32 +++++++++++++++--------- src/frostfs_testlib/load/load_config.py | 33 ++++++++++++++++++++++++- tests/test_load_config.py | 15 ++++++++++- 3 files changed, 67 insertions(+), 13 deletions(-) diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index 38167d2..1e98b98 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -61,7 +61,7 @@ class K6: self._k6_dir: str = k6_dir command = ( - f"{self._k6_dir}/k6 run {self._generate_env_variables()} " + f"{self._generate_env_variables()}{self._k6_dir}/k6 run {self._generate_k6_variables()} " f"{self._k6_dir}/scenarios/{self.load_params.scenario.value}.js" ) user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None @@ -75,12 +75,12 @@ class K6: def _get_fill_percents(self): fill_percents = self.shell.exec("df -H --output=source,pcent,target | grep frostfs").stdout.split("\n") return [line.split() for line in fill_percents][:-1] - + def check_fill_percent(self): fill_percents = self._get_fill_percents() percent_mean = 0 for line in fill_percents: - percent_mean += float(line[1].split('%')[0]) + percent_mean += float(line[1].split("%")[0]) percent_mean = percent_mean / len(fill_percents) logger.info(f"{self.loader.ip} mean fill percent is {percent_mean}") return percent_mean >= self.load_params.fill_percent @@ -125,9 +125,9 @@ class K6: self.preset_output = result.stdout.strip("\n") return self.preset_output - @reporter.step("Generate K6 command") - def _generate_env_variables(self) -> str: - env_vars = self.load_params.get_env_vars() + @reporter.step("Generate K6 variables") + def _generate_k6_variables(self) -> str: + env_vars = self.load_params.get_k6_vars() env_vars[f"{self.load_params.load_type.value.upper()}_ENDPOINTS"] = ",".join(self.endpoints) env_vars["SUMMARY_JSON"] = self.summary_json @@ -135,6 +135,14 @@ class K6: reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "K6 ENV variables") return " ".join([f"-e {param}='{value}'" for param, value in env_vars.items() if value is not None]) + @reporter.step("Generate env variables") + def _generate_env_variables(self) -> str: + env_vars = self.load_params.get_env_vars() + if not env_vars: + return "" + reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "ENV variables") + return " ".join([f"{param}='{value}'" for param, value in env_vars.items() if value is not None]) + " " + def get_start_time(self) -> datetime: return datetime.fromtimestamp(self._k6_process.start_time()) @@ -188,23 +196,25 @@ class K6: wait_interval = min_wait_interval if self._k6_process is None: assert "No k6 instances were executed" - + while timeout > 0: if not self.load_params.fill_percent is None: with reporter.step(f"Check the percentage of filling of all data disks on the node"): if self.check_fill_percent(): - logger.info(f"Stopping load on because disks is filled more then {self.load_params.fill_percent}%") + logger.info( + f"Stopping load on because disks is filled more then {self.load_params.fill_percent}%" + ) event.set() self.stop() return - + if event.is_set(): self.stop() return - + if not self._k6_process.running(): return - + remaining_time_hours = f"{timeout//3600}h" if timeout // 3600 != 0 else "" remaining_time_minutes = f"{timeout//60%60}m" if timeout // 60 % 60 != 0 else "" logger.info( diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 7bde399..b859971 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -94,16 +94,18 @@ def metadata_field( string_repr: Optional[bool] = True, distributed: Optional[bool] = False, formatter: Optional[Callable] = None, + env_variable: Optional[str] = None, ): return field( default=None, metadata={ "applicable_scenarios": applicable_scenarios, "preset_argument": preset_param, - "env_variable": scenario_variable, + "scenario_variable": scenario_variable, "string_repr": string_repr, "distributed": distributed, "formatter": formatter, + "env_variable": env_variable, }, ) @@ -172,6 +174,20 @@ class Preset: local: Optional[bool] = metadata_field(grpc_preset_scenarios, "local", None, False) +@dataclass +class PrometheusParams: + # Prometheus server URL + server_url: Optional[str] = metadata_field( + all_load_scenarios, env_variable="K6_PROMETHEUS_RW_SERVER_URL", string_repr=False + ) + # Prometheus trend stats + trend_stats: Optional[str] = metadata_field( + all_load_scenarios, env_variable="K6_PROMETHEUS_RW_TREND_STATS", string_repr=False + ) + # Additional tags + metrics_tags: Optional[str] = metadata_field(all_load_scenarios, None, "METRIC_TAGS", False) + + @dataclass class LoadParams: # ------- CONTROL PARAMS ------- @@ -223,6 +239,10 @@ class LoadParams: fill_percent: Optional[float] = None # if set, the payload is generated on the fly and is not read into memory fully. streaming: Optional[int] = metadata_field(all_load_scenarios, None, "STREAMING", False) + # Output format + output: Optional[str] = metadata_field(all_load_scenarios, None, "K6_OUT", False) + # Prometheus params + prometheus: Optional[PrometheusParams] = None # ------- COMMON SCENARIO PARAMS ------- # Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value. @@ -339,6 +359,17 @@ class LoadParams: if self.preset: self.preset.pregen_json = os.path.join(self.working_dir, f"{load_id}_prepare.json") + def get_k6_vars(self): + env_vars = { + meta_field.metadata["scenario_variable"]: meta_field.value + for meta_field in self._get_meta_fields(self) + if self.scenario in meta_field.metadata["applicable_scenarios"] + and meta_field.metadata["scenario_variable"] + and meta_field.value is not None + } + + return env_vars + def get_env_vars(self): env_vars = { meta_field.metadata["env_variable"]: meta_field.value diff --git a/tests/test_load_config.py b/tests/test_load_config.py index dc019b7..62339f6 100644 --- a/tests/test_load_config.py +++ b/tests/test_load_config.py @@ -157,6 +157,7 @@ class TestLoadConfig: "DELETERS": 8, "READ_AGE": 8, "STREAMING": 9, + "K6_OUT": "output", "PREGEN_JSON": "pregen_json", "PREPARE_LOCALLY": True, } @@ -181,6 +182,7 @@ class TestLoadConfig: expected_env_vars = { "DURATION": 9, "WRITE_OBJ_SIZE": 11, + "K6_OUT": "output", "REGISTRY_FILE": "registry_file", "K6_MIN_ITERATION_DURATION": "min_iteration_duration", "K6_SETUP_TIMEOUT": "setup_timeout", @@ -221,6 +223,7 @@ class TestLoadConfig: "DURATION": 9, "WRITE_OBJ_SIZE": 11, "REGISTRY_FILE": "registry_file", + "K6_OUT": "output", "K6_MIN_ITERATION_DURATION": "min_iteration_duration", "K6_SETUP_TIMEOUT": "setup_timeout", "WRITERS": 7, @@ -254,6 +257,7 @@ class TestLoadConfig: "DURATION": 183900, "WRITE_OBJ_SIZE": 11, "REGISTRY_FILE": "registry_file", + "K6_OUT": "output", "K6_MIN_ITERATION_DURATION": "min_iteration_duration", "K6_SETUP_TIMEOUT": "setup_timeout", "NO_VERIFY_SSL": True, @@ -293,6 +297,7 @@ class TestLoadConfig: "DURATION": 9, "WRITE_OBJ_SIZE": 11, "REGISTRY_FILE": "registry_file", + "K6_OUT": "output", "K6_MIN_ITERATION_DURATION": "min_iteration_duration", "K6_SETUP_TIMEOUT": "setup_timeout", "NO_VERIFY_SSL": True, @@ -332,6 +337,7 @@ class TestLoadConfig: expected_env_vars = { "DURATION": 9, "WRITE_OBJ_SIZE": 11, + "K6_OUT": "output", "NO_VERIFY_SSL": True, "REGISTRY_FILE": "registry_file", "K6_MIN_ITERATION_DURATION": "min_iteration_duration", @@ -365,6 +371,7 @@ class TestLoadConfig: "CONFIG_FILE": "config_file", "DURATION": 9, "WRITE_OBJ_SIZE": 11, + "K6_OUT": "output", "REGISTRY_FILE": "registry_file", "K6_MIN_ITERATION_DURATION": "min_iteration_duration", "K6_SETUP_TIMEOUT": "setup_timeout", @@ -419,6 +426,7 @@ class TestLoadConfig: "DURATION": 0, "WRITE_OBJ_SIZE": 0, "REGISTRY_FILE": "", + "K6_OUT": "", "K6_MIN_ITERATION_DURATION": "", "K6_SETUP_TIMEOUT": "", "WRITERS": 0, @@ -449,6 +457,7 @@ class TestLoadConfig: "DURATION": 0, "WRITE_OBJ_SIZE": 0, "REGISTRY_FILE": "", + "K6_OUT": "", "K6_MIN_ITERATION_DURATION": "", "K6_SETUP_TIMEOUT": "", "MAX_WRITERS": 0, @@ -486,6 +495,7 @@ class TestLoadConfig: "DURATION": 0, "WRITE_OBJ_SIZE": 0, "REGISTRY_FILE": "", + "K6_OUT": "", "K6_MIN_ITERATION_DURATION": "", "K6_SETUP_TIMEOUT": "", "WRITERS": 0, @@ -516,6 +526,7 @@ class TestLoadConfig: "DURATION": 0, "WRITE_OBJ_SIZE": 0, "REGISTRY_FILE": "", + "K6_OUT": "", "K6_MIN_ITERATION_DURATION": "", "K6_SETUP_TIMEOUT": "", "NO_VERIFY_SSL": False, @@ -554,6 +565,7 @@ class TestLoadConfig: "WRITE_OBJ_SIZE": 0, "NO_VERIFY_SSL": False, "REGISTRY_FILE": "", + "K6_OUT": "", "K6_MIN_ITERATION_DURATION": "", "K6_SETUP_TIMEOUT": "", "WRITERS": 0, @@ -584,6 +596,7 @@ class TestLoadConfig: "DURATION": 0, "WRITE_OBJ_SIZE": 0, "REGISTRY_FILE": "", + "K6_OUT": "", "K6_MIN_ITERATION_DURATION": "", "K6_SETUP_TIMEOUT": "", "WRITERS": 0, @@ -655,7 +668,7 @@ class TestLoadConfig: assert sorted(preset_parameters) == sorted(expected_preset_args) def _check_env_vars(self, load_params: LoadParams, expected_env_vars: dict[str, str]): - env_vars = load_params.get_env_vars() + env_vars = load_params.get_k6_vars() assert env_vars == expected_env_vars def _check_all_values_none(self, dataclass, skip_fields=None): From 22b41b227fbddf0704f5529d24d85552ac08d340 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 27 Feb 2024 11:57:54 +0300 Subject: [PATCH 214/363] [#186] Add total bytes to report Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/interfaces/summarized.py | 2 ++ src/frostfs_testlib/load/load_metrics.py | 4 ++++ src/frostfs_testlib/load/load_report.py | 6 ++++++ 3 files changed, 12 insertions(+) diff --git a/src/frostfs_testlib/load/interfaces/summarized.py b/src/frostfs_testlib/load/interfaces/summarized.py index a005963..54947b4 100644 --- a/src/frostfs_testlib/load/interfaces/summarized.py +++ b/src/frostfs_testlib/load/interfaces/summarized.py @@ -50,6 +50,7 @@ class SummarizedStats: throughput: float = field(default_factory=float) latencies: SummarizedLatencies = field(default_factory=SummarizedLatencies) errors: SummarizedErorrs = field(default_factory=SummarizedErorrs) + total_bytes: int = field(default_factory=int) passed: bool = True def calc_stats(self): @@ -85,6 +86,7 @@ class SummarizedStats: target.latencies.by_node[node_key] = operation.latency target.throughput += operation.throughput target.errors.threshold = load_params.error_threshold + target.total_bytes = operation.total_bytes if operation.failed_iterations: target.errors.by_node[node_key] = operation.failed_iterations diff --git a/src/frostfs_testlib/load/load_metrics.py b/src/frostfs_testlib/load/load_metrics.py index 2dad3f6..035ce8b 100644 --- a/src/frostfs_testlib/load/load_metrics.py +++ b/src/frostfs_testlib/load/load_metrics.py @@ -39,6 +39,10 @@ class OperationMetric(ABC): def throughput(self) -> float: return self._get_metric_rate(self._THROUGHPUT) + @property + def total_bytes(self) -> float: + return self._get_metric(self._THROUGHPUT) + def _get_metric(self, metric: str) -> int: metrics_method_map = { "counter": self._get_counter_metric, diff --git a/src/frostfs_testlib/load/load_report.py b/src/frostfs_testlib/load/load_report.py index 22ddb54..2dfac26 100644 --- a/src/frostfs_testlib/load/load_report.py +++ b/src/frostfs_testlib/load/load_report.py @@ -120,6 +120,11 @@ class LoadReport: throughput, unit = calc_unit(stats.throughput) throughput_html = self._row("Throughput", f"{throughput:.2f} {unit}/sec") + bytes_html = "" + if stats.total_bytes > 0: + total_bytes, total_bytes_unit = calc_unit(stats.total_bytes) + bytes_html = self._row("Total transferred", f"{total_bytes:.2f} {total_bytes_unit}") + per_node_errors_html = "" for node_key, errors in stats.errors.by_node.items(): if self.load_params.k6_process_allocation_strategy == K6ProcessAllocationStrategy.PER_ENDPOINT: @@ -148,6 +153,7 @@ class LoadReport: Metrics {self._row("Total operations", stats.operations)} {self._row("OP/sec", f"{stats.rate:.2f}")} + {bytes_html} {throughput_html} {latency_html} Errors From 09a7f66d1eda7d8eb251ad306be6181b1fb82a40 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 1 Mar 2024 02:15:40 +0300 Subject: [PATCH 215/363] [#188] Add CredentialsProvider Signed-off-by: Andrey Berezin --- pyproject.toml | 3 ++ .../credentials/authmate_s3.py | 49 +++++++++++++++++++ src/frostfs_testlib/credentials/interfaces.py | 25 ++++++++++ src/frostfs_testlib/hosting/config.py | 1 + src/frostfs_testlib/steps/s3/s3_helper.py | 49 +------------------ 5 files changed, 79 insertions(+), 48 deletions(-) create mode 100644 src/frostfs_testlib/credentials/authmate_s3.py create mode 100644 src/frostfs_testlib/credentials/interfaces.py diff --git a/pyproject.toml b/pyproject.toml index 74a163e..c9aaf74 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -57,6 +57,9 @@ frostfs-http = "frostfs_testlib.storage.dataclasses.frostfs_services:HTTPGate" neo-go = "frostfs_testlib.storage.dataclasses.frostfs_services:MorphChain" frostfs-ir = "frostfs_testlib.storage.dataclasses.frostfs_services:InnerRing" +[project.entry-points."frostfs.testlib.credentials_providers"] +authmate = "frostfs_testlib.credentials.authmate_s3:AuthmateS3CredentialsProvider" + [tool.isort] profile = "black" src_paths = ["src", "tests"] diff --git a/src/frostfs_testlib/credentials/authmate_s3.py b/src/frostfs_testlib/credentials/authmate_s3.py new file mode 100644 index 0000000..c77765c --- /dev/null +++ b/src/frostfs_testlib/credentials/authmate_s3.py @@ -0,0 +1,49 @@ +import re +from datetime import datetime + +from frostfs_testlib import reporter +from frostfs_testlib.cli import FrostfsAuthmate +from frostfs_testlib.credentials.interfaces import S3CredentialsProvider +from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC +from frostfs_testlib.shell import Shell +from frostfs_testlib.steps.cli.container import list_containers +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo + + +class AuthmateS3CredentialsProvider(S3CredentialsProvider): + @reporter.step("Init S3 Credentials using Authmate CLI") + def provide(self, cluster_node: ClusterNode) -> tuple[str, str]: + cluster: Cluster = self.stash["cluster"] + shell: Shell = self.stash["shell"] + wallet: WalletInfo = self.stash["wallet"] + endpoint = cluster_node.storage_node.get_rpc_endpoint() + + gate_public_keys = [s3gate.get_wallet_public_key() for s3gate in cluster.s3_gates] + # unique short bucket name + bucket = f"bucket_{hex(int(datetime.now().timestamp()*1000000))}" + + frostfs_authmate: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC) + issue_secret_output = frostfs_authmate.secret.issue( + wallet=wallet.path, + peer=endpoint, + gate_public_key=gate_public_keys, + wallet_password=wallet.password, + container_policy=self.stash.get("location_constraints"), + container_friendly_name=bucket, + ).stdout + + aws_access_key_id = str( + re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group("aws_access_key_id") + ) + aws_secret_access_key = str( + re.search(r"secret_access_key.*:\s.(?P\w*)", issue_secret_output).group( + "aws_secret_access_key" + ) + ) + cid = str(re.search(r"container_id.*:\s.(?P\w*)", issue_secret_output).group("container_id")) + + containers_list = list_containers(wallet.path, shell, endpoint) + assert cid in containers_list, f"Expected cid {cid} in {containers_list}" + + return aws_access_key_id, aws_secret_access_key diff --git a/src/frostfs_testlib/credentials/interfaces.py b/src/frostfs_testlib/credentials/interfaces.py new file mode 100644 index 0000000..8db43ad --- /dev/null +++ b/src/frostfs_testlib/credentials/interfaces.py @@ -0,0 +1,25 @@ +from abc import abstractmethod + +from frostfs_testlib.plugins import load_plugin +from frostfs_testlib.storage.cluster import ClusterNode + + +class S3CredentialsProvider(object): + stash: dict + + def __init__(self, stash: dict) -> None: + self.stash = stash + + @abstractmethod + def provide(self, cluster_node: ClusterNode) -> tuple[str, str]: + raise NotImplementedError("Directly called abstract class?") + + +class CredentialsProvider(object): + stash: dict + S3: S3CredentialsProvider + + def __init__(self, s3_plugin_name: str) -> None: + self.stash = {} + s3cls = load_plugin("frostfs.testlib.credentials_providers", s3_plugin_name) + self.S3 = s3cls(self.stash) diff --git a/src/frostfs_testlib/hosting/config.py b/src/frostfs_testlib/hosting/config.py index 8b256cc..310eab2 100644 --- a/src/frostfs_testlib/hosting/config.py +++ b/src/frostfs_testlib/hosting/config.py @@ -62,6 +62,7 @@ class HostConfig: plugin_name: str healthcheck_plugin_name: str address: str + s3_creds_plugin_name: str = field(default="authmate") services: list[ServiceConfig] = field(default_factory=list) clis: list[CLIConfig] = field(default_factory=list) attributes: dict[str, str] = field(default_factory=dict) diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index dbd3765..f717fd4 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -1,25 +1,15 @@ -import json import logging import os -import re -import uuid from datetime import datetime, timedelta from typing import Optional from dateutil.parser import parse from frostfs_testlib import reporter -from frostfs_testlib.cli import FrostfsAuthmate -from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC -from frostfs_testlib.resources.common import CREDENTIALS_CREATE_TIMEOUT from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus -from frostfs_testlib.shell import CommandOptions, InteractiveInput, Shell -from frostfs_testlib.shell.interfaces import SshCredentials +from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.container import search_container_by_name, search_nodes_with_container from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo -from frostfs_testlib.utils.cli_utils import _run_with_passwd logger = logging.getLogger("NeoLogger") @@ -161,43 +151,6 @@ def assert_s3_acl(acl_grants: list, permitted_users: str): logger.error("FULL_CONTROL is given to All Users") -@reporter.step("Init S3 Credentials") -def init_s3_credentials( - wallet: WalletInfo, - shell: Shell, - cluster: Cluster, - policy: Optional[dict] = None, - s3gates: Optional[list[S3Gate]] = None, - container_placement_policy: Optional[str] = None, -): - gate_public_keys = [] - bucket = str(uuid.uuid4()) - if not s3gates: - s3gates = [cluster.s3_gates[0]] - for s3gate in s3gates: - gate_public_keys.append(s3gate.get_wallet_public_key()) - frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC) - issue_secret_output = frostfs_authmate_exec.secret.issue( - wallet=wallet.path, - peer=cluster.default_rpc_endpoint, - gate_public_key=gate_public_keys, - wallet_password=wallet.password, - container_policy=policy, - container_friendly_name=bucket, - container_placement_policy=container_placement_policy, - ).stdout - aws_access_key_id = str( - re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group("aws_access_key_id") - ) - aws_secret_access_key = str( - re.search(r"secret_access_key.*:\s.(?P\w*)", issue_secret_output).group( - "aws_secret_access_key" - ) - ) - cid = str(re.search(r"container_id.*:\s.(?P\w*)", issue_secret_output).group("container_id")) - return cid, aws_access_key_id, aws_secret_access_key - - @reporter.step("Delete bucket with all objects") def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str): versioning_status = s3_client.get_bucket_versioning_status(bucket) From 25925c637bad213695bb0a7d8f90e8ba902de517 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 11 Mar 2024 19:23:10 +0300 Subject: [PATCH 216/363] [#191] Credentials work overhaul Signed-off-by: Andrey Berezin --- pyproject.toml | 10 +- .../cli/frostfs_cli/container.py | 16 +-- src/frostfs_testlib/cli/frostfs_cli/netmap.py | 8 +- src/frostfs_testlib/cli/frostfs_cli/object.py | 16 +-- .../cli/frostfs_cli/session.py | 10 +- src/frostfs_testlib/cli/frostfs_cli/util.py | 14 +-- ...authmate_s3.py => authmate_s3_provider.py} | 32 +++-- src/frostfs_testlib/credentials/interfaces.py | 50 ++++++-- .../credentials/wallet_factory_provider.py | 14 +++ src/frostfs_testlib/hosting/config.py | 2 + src/frostfs_testlib/load/k6.py | 34 ++---- src/frostfs_testlib/load/runners.py | 114 ++++-------------- .../s3/curl_bucket_resolver.py | 16 +++ src/frostfs_testlib/s3/interfaces.py | 27 +++-- src/frostfs_testlib/steps/acl.py | 42 +++---- src/frostfs_testlib/steps/cli/container.py | 92 ++++++-------- src/frostfs_testlib/steps/cli/object.py | 99 +++++---------- .../steps/complex_object_actions.py | 14 +-- src/frostfs_testlib/steps/epoch.py | 8 +- src/frostfs_testlib/steps/s3/s3_helper.py | 11 +- src/frostfs_testlib/steps/session_token.py | 17 ++- src/frostfs_testlib/steps/storage_object.py | 6 +- src/frostfs_testlib/steps/storage_policy.py | 16 ++- src/frostfs_testlib/steps/tombstone.py | 16 +-- .../controllers/background_load_controller.py | 10 +- .../controllers/cluster_state_controller.py | 37 +++--- .../storage/dataclasses/acl.py | 20 ++- .../dataclasses/storage_object_info.py | 3 +- .../storage/dataclasses/wallet.py | 35 +++--- src/frostfs_testlib/utils/version_utils.py | 26 ++-- src/frostfs_testlib/utils/wallet_utils.py | 40 +++--- 31 files changed, 370 insertions(+), 485 deletions(-) rename src/frostfs_testlib/credentials/{authmate_s3.py => authmate_s3_provider.py} (58%) create mode 100644 src/frostfs_testlib/credentials/wallet_factory_provider.py create mode 100644 src/frostfs_testlib/s3/curl_bucket_resolver.py diff --git a/pyproject.toml b/pyproject.toml index c9aaf74..5a38dba 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -58,15 +58,19 @@ neo-go = "frostfs_testlib.storage.dataclasses.frostfs_services:MorphChain" frostfs-ir = "frostfs_testlib.storage.dataclasses.frostfs_services:InnerRing" [project.entry-points."frostfs.testlib.credentials_providers"] -authmate = "frostfs_testlib.credentials.authmate_s3:AuthmateS3CredentialsProvider" +authmate = "frostfs_testlib.credentials.authmate_s3_provider:AuthmateS3CredentialsProvider" +wallet_factory = "frostfs_testlib.credentials.wallet_factory_provider:WalletFactoryProvider" + +[project.entry-points."frostfs.testlib.bucket_cid_resolver"] +frostfs = "frostfs_testlib.s3.curl_bucket_resolver:CurlBucketContainerResolver" [tool.isort] profile = "black" src_paths = ["src", "tests"] -line_length = 120 +line_length = 140 [tool.black] -line-length = 120 +line-length = 140 target-version = ["py310"] [tool.bumpver] diff --git a/src/frostfs_testlib/cli/frostfs_cli/container.py b/src/frostfs_testlib/cli/frostfs_cli/container.py index 374c880..b5592e8 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/container.py +++ b/src/frostfs_testlib/cli/frostfs_cli/container.py @@ -8,7 +8,7 @@ class FrostfsCliContainer(CliCommand): def create( self, rpc_endpoint: str, - wallet: str, + wallet: Optional[str] = None, address: Optional[str] = None, attributes: Optional[dict] = None, basic_acl: Optional[str] = None, @@ -57,8 +57,8 @@ class FrostfsCliContainer(CliCommand): def delete( self, rpc_endpoint: str, - wallet: str, cid: str, + wallet: Optional[str] = None, address: Optional[str] = None, await_mode: bool = False, session: Optional[str] = None, @@ -93,8 +93,8 @@ class FrostfsCliContainer(CliCommand): def get( self, rpc_endpoint: str, - wallet: str, cid: str, + wallet: Optional[str] = None, address: Optional[str] = None, await_mode: bool = False, to: Optional[str] = None, @@ -129,8 +129,8 @@ class FrostfsCliContainer(CliCommand): def get_eacl( self, rpc_endpoint: str, - wallet: str, cid: str, + wallet: Optional[str] = None, address: Optional[str] = None, await_mode: bool = False, to: Optional[str] = None, @@ -166,7 +166,7 @@ class FrostfsCliContainer(CliCommand): def list( self, rpc_endpoint: str, - wallet: str, + wallet: Optional[str] = None, address: Optional[str] = None, owner: Optional[str] = None, ttl: Optional[int] = None, @@ -197,8 +197,8 @@ class FrostfsCliContainer(CliCommand): def list_objects( self, rpc_endpoint: str, - wallet: str, cid: str, + wallet: Optional[str] = None, address: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, @@ -227,8 +227,8 @@ class FrostfsCliContainer(CliCommand): def set_eacl( self, rpc_endpoint: str, - wallet: str, cid: str, + wallet: Optional[str] = None, address: Optional[str] = None, await_mode: bool = False, table: Optional[str] = None, @@ -264,8 +264,8 @@ class FrostfsCliContainer(CliCommand): def search_node( self, rpc_endpoint: str, - wallet: str, cid: str, + wallet: Optional[str] = None, address: Optional[str] = None, ttl: Optional[int] = None, from_file: Optional[str] = None, diff --git a/src/frostfs_testlib/cli/frostfs_cli/netmap.py b/src/frostfs_testlib/cli/frostfs_cli/netmap.py index 8920893..d219940 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/netmap.py +++ b/src/frostfs_testlib/cli/frostfs_cli/netmap.py @@ -8,7 +8,7 @@ class FrostfsCliNetmap(CliCommand): def epoch( self, rpc_endpoint: str, - wallet: str, + wallet: Optional[str] = None, address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, @@ -38,7 +38,7 @@ class FrostfsCliNetmap(CliCommand): def netinfo( self, rpc_endpoint: str, - wallet: str, + wallet: Optional[str] = None, address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, @@ -68,7 +68,7 @@ class FrostfsCliNetmap(CliCommand): def nodeinfo( self, rpc_endpoint: str, - wallet: str, + wallet: Optional[str] = None, address: Optional[str] = None, generate_key: bool = False, json: bool = False, @@ -100,7 +100,7 @@ class FrostfsCliNetmap(CliCommand): def snapshot( self, rpc_endpoint: str, - wallet: str, + wallet: Optional[str] = None, address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, diff --git a/src/frostfs_testlib/cli/frostfs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py index 38a69e4..5d5bd91 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/object.py +++ b/src/frostfs_testlib/cli/frostfs_cli/object.py @@ -8,9 +8,9 @@ class FrostfsCliObject(CliCommand): def delete( self, rpc_endpoint: str, - wallet: str, cid: str, oid: str, + wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, session: Optional[str] = None, @@ -44,9 +44,9 @@ class FrostfsCliObject(CliCommand): def get( self, rpc_endpoint: str, - wallet: str, cid: str, oid: str, + wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, file: Optional[str] = None, @@ -88,9 +88,9 @@ class FrostfsCliObject(CliCommand): def hash( self, rpc_endpoint: str, - wallet: str, cid: str, oid: str, + wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, range: Optional[str] = None, @@ -130,9 +130,9 @@ class FrostfsCliObject(CliCommand): def head( self, rpc_endpoint: str, - wallet: str, cid: str, oid: str, + wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, file: Optional[str] = None, @@ -176,9 +176,9 @@ class FrostfsCliObject(CliCommand): def lock( self, rpc_endpoint: str, - wallet: str, cid: str, oid: str, + wallet: Optional[str] = None, lifetime: Optional[int] = None, expire_at: Optional[int] = None, address: Optional[str] = None, @@ -216,9 +216,9 @@ class FrostfsCliObject(CliCommand): def put( self, rpc_endpoint: str, - wallet: str, cid: str, file: str, + wallet: Optional[str] = None, address: Optional[str] = None, attributes: Optional[dict] = None, bearer: Optional[str] = None, @@ -267,10 +267,10 @@ class FrostfsCliObject(CliCommand): def range( self, rpc_endpoint: str, - wallet: str, cid: str, oid: str, range: str, + wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, file: Optional[str] = None, @@ -311,8 +311,8 @@ class FrostfsCliObject(CliCommand): def search( self, rpc_endpoint: str, - wallet: str, cid: str, + wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, filters: Optional[list] = None, diff --git a/src/frostfs_testlib/cli/frostfs_cli/session.py b/src/frostfs_testlib/cli/frostfs_cli/session.py index e21cc23..857b13e 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/session.py +++ b/src/frostfs_testlib/cli/frostfs_cli/session.py @@ -9,7 +9,6 @@ class FrostfsCliSession(CliCommand): self, rpc_endpoint: str, wallet: str, - wallet_password: str, out: str, lifetime: Optional[int] = None, address: Optional[str] = None, @@ -30,12 +29,7 @@ class FrostfsCliSession(CliCommand): Returns: Command's result. """ - return self._execute_with_password( + return self._execute( "session create", - wallet_password, - **{ - param: value - for param, value in locals().items() - if param not in ["self", "wallet_password"] - }, + **{param: value for param, value in locals().items() if param not in ["self"]}, ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/util.py b/src/frostfs_testlib/cli/frostfs_cli/util.py index 99acd0a..7914169 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/util.py +++ b/src/frostfs_testlib/cli/frostfs_cli/util.py @@ -6,12 +6,12 @@ from frostfs_testlib.shell import CommandResult class FrostfsCliUtil(CliCommand): def sign_bearer_token( - self, - wallet: str, - from_file: str, - to_file: str, - address: Optional[str] = None, - json: Optional[bool] = False, + self, + from_file: str, + to_file: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + json: Optional[bool] = False, ) -> CommandResult: """ Sign bearer token to use it in requests. @@ -33,9 +33,9 @@ class FrostfsCliUtil(CliCommand): def sign_session_token( self, - wallet: str, from_file: str, to_file: str, + wallet: Optional[str] = None, address: Optional[str] = None, ) -> CommandResult: """ diff --git a/src/frostfs_testlib/credentials/authmate_s3.py b/src/frostfs_testlib/credentials/authmate_s3_provider.py similarity index 58% rename from src/frostfs_testlib/credentials/authmate_s3.py rename to src/frostfs_testlib/credentials/authmate_s3_provider.py index c77765c..6343b5a 100644 --- a/src/frostfs_testlib/credentials/authmate_s3.py +++ b/src/frostfs_testlib/credentials/authmate_s3_provider.py @@ -1,25 +1,26 @@ import re from datetime import datetime +from typing import Optional from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsAuthmate -from frostfs_testlib.credentials.interfaces import S3CredentialsProvider +from frostfs_testlib.credentials.interfaces import S3Credentials, S3CredentialsProvider, User from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC -from frostfs_testlib.shell import Shell +from frostfs_testlib.shell import LocalShell from frostfs_testlib.steps.cli.container import list_containers -from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate class AuthmateS3CredentialsProvider(S3CredentialsProvider): @reporter.step("Init S3 Credentials using Authmate CLI") - def provide(self, cluster_node: ClusterNode) -> tuple[str, str]: - cluster: Cluster = self.stash["cluster"] - shell: Shell = self.stash["shell"] - wallet: WalletInfo = self.stash["wallet"] + def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None) -> S3Credentials: + cluster_nodes: list[ClusterNode] = self.cluster.cluster_nodes + shell = LocalShell() + wallet = user.wallet endpoint = cluster_node.storage_node.get_rpc_endpoint() - gate_public_keys = [s3gate.get_wallet_public_key() for s3gate in cluster.s3_gates] + gate_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes] # unique short bucket name bucket = f"bucket_{hex(int(datetime.now().timestamp()*1000000))}" @@ -29,21 +30,18 @@ class AuthmateS3CredentialsProvider(S3CredentialsProvider): peer=endpoint, gate_public_key=gate_public_keys, wallet_password=wallet.password, - container_policy=self.stash.get("location_constraints"), + container_policy=location_constraints, container_friendly_name=bucket, ).stdout - aws_access_key_id = str( - re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group("aws_access_key_id") - ) + aws_access_key_id = str(re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group("aws_access_key_id")) aws_secret_access_key = str( - re.search(r"secret_access_key.*:\s.(?P\w*)", issue_secret_output).group( - "aws_secret_access_key" - ) + re.search(r"secret_access_key.*:\s.(?P\w*)", issue_secret_output).group("aws_secret_access_key") ) cid = str(re.search(r"container_id.*:\s.(?P\w*)", issue_secret_output).group("container_id")) containers_list = list_containers(wallet.path, shell, endpoint) assert cid in containers_list, f"Expected cid {cid} in {containers_list}" - return aws_access_key_id, aws_secret_access_key + user.s3_credentials = S3Credentials(aws_access_key_id, aws_secret_access_key) + return user.s3_credentials diff --git a/src/frostfs_testlib/credentials/interfaces.py b/src/frostfs_testlib/credentials/interfaces.py index 8db43ad..c863da0 100644 --- a/src/frostfs_testlib/credentials/interfaces.py +++ b/src/frostfs_testlib/credentials/interfaces.py @@ -1,25 +1,51 @@ -from abc import abstractmethod +from abc import ABC, abstractmethod +from dataclasses import dataclass, field +from typing import Any, Optional from frostfs_testlib.plugins import load_plugin -from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo -class S3CredentialsProvider(object): - stash: dict +@dataclass +class S3Credentials: + access_key: str + secret_key: str - def __init__(self, stash: dict) -> None: - self.stash = stash + +@dataclass +class User: + name: str + attributes: dict[str, Any] = field(default_factory=dict) + wallet: WalletInfo | None = None + s3_credentials: S3Credentials | None = None + + +class S3CredentialsProvider(ABC): + def __init__(self, cluster: Cluster) -> None: + self.cluster = cluster @abstractmethod - def provide(self, cluster_node: ClusterNode) -> tuple[str, str]: + def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None) -> S3Credentials: + raise NotImplementedError("Directly called abstract class?") + + +class GrpcCredentialsProvider(ABC): + def __init__(self, cluster: Cluster) -> None: + self.cluster = cluster + + @abstractmethod + def provide(self, user: User, cluster_node: ClusterNode) -> WalletInfo: raise NotImplementedError("Directly called abstract class?") class CredentialsProvider(object): - stash: dict S3: S3CredentialsProvider + GRPC: GrpcCredentialsProvider - def __init__(self, s3_plugin_name: str) -> None: - self.stash = {} - s3cls = load_plugin("frostfs.testlib.credentials_providers", s3_plugin_name) - self.S3 = s3cls(self.stash) + def __init__(self, cluster: Cluster) -> None: + config = cluster.cluster_nodes[0].host.config + s3_cls = load_plugin("frostfs.testlib.credentials_providers", config.s3_creds_plugin_name) + self.S3 = s3_cls(cluster) + grpc_cls = load_plugin("frostfs.testlib.credentials_providers", config.grpc_creds_plugin_name) + self.GRPC = grpc_cls(cluster) diff --git a/src/frostfs_testlib/credentials/wallet_factory_provider.py b/src/frostfs_testlib/credentials/wallet_factory_provider.py new file mode 100644 index 0000000..4d1ab7a --- /dev/null +++ b/src/frostfs_testlib/credentials/wallet_factory_provider.py @@ -0,0 +1,14 @@ +from frostfs_testlib import reporter +from frostfs_testlib.credentials.interfaces import GrpcCredentialsProvider, User +from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_PASS +from frostfs_testlib.shell.local_shell import LocalShell +from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.dataclasses.wallet import WalletFactory, WalletInfo + + +class WalletFactoryProvider(GrpcCredentialsProvider): + @reporter.step("Init gRPC Credentials using wallet generation") + def provide(self, user: User, cluster_node: ClusterNode) -> WalletInfo: + wallet_factory = WalletFactory(ASSETS_DIR, LocalShell()) + user.wallet = wallet_factory.create_wallet(file_name=user, password=DEFAULT_WALLET_PASS) + return user.wallet diff --git a/src/frostfs_testlib/hosting/config.py b/src/frostfs_testlib/hosting/config.py index 310eab2..f52f8b7 100644 --- a/src/frostfs_testlib/hosting/config.py +++ b/src/frostfs_testlib/hosting/config.py @@ -63,6 +63,8 @@ class HostConfig: healthcheck_plugin_name: str address: str s3_creds_plugin_name: str = field(default="authmate") + grpc_creds_plugin_name: str = field(default="wallet_factory") + product: str = field(default="frostfs") services: list[ServiceConfig] = field(default_factory=list) clis: list[CLIConfig] = field(default_factory=list) attributes: dict[str, str] = field(default_factory=dict) diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index 1e98b98..caf3cfe 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -9,13 +9,13 @@ from typing import Any from urllib.parse import urlparse from frostfs_testlib import reporter +from frostfs_testlib.credentials.interfaces import User from frostfs_testlib.load.interfaces.loader import Loader from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario, LoadType from frostfs_testlib.processes.remote_process import RemoteProcess from frostfs_testlib.resources.common import STORAGE_USER_NAME from frostfs_testlib.resources.load_params import K6_STOP_SIGNAL_TIMEOUT, K6_TEARDOWN_PERIOD from frostfs_testlib.shell import Shell -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.test_control import wait_for_success EXIT_RESULT_CODE = 0 @@ -42,16 +42,16 @@ class K6: k6_dir: str, shell: Shell, loader: Loader, - wallet: WalletInfo, + user: User, ): if load_params.scenario is None: raise RuntimeError("Scenario should not be none") - self.load_params: LoadParams = load_params + self.load_params = load_params self.endpoints = endpoints - self.loader: Loader = loader - self.shell: Shell = shell - self.wallet = wallet + self.loader = loader + self.shell = shell + self.user = user self.preset_output: str = "" self.summary_json: str = os.path.join( self.load_params.working_dir, @@ -64,13 +64,9 @@ class K6: f"{self._generate_env_variables()}{self._k6_dir}/k6 run {self._generate_k6_variables()} " f"{self._k6_dir}/scenarios/{self.load_params.scenario.value}.js" ) - user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None - process_id = ( - self.load_params.load_id - if self.load_params.scenario != LoadScenario.VERIFY - else f"{self.load_params.load_id}_verify" - ) - self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, user, process_id) + remote_user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None + process_id = self.load_params.load_id if self.load_params.scenario != LoadScenario.VERIFY else f"{self.load_params.load_id}_verify" + self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, remote_user, process_id) def _get_fill_percents(self): fill_percents = self.shell.exec("df -H --output=source,pcent,target | grep frostfs").stdout.split("\n") @@ -103,8 +99,8 @@ class K6: preset_grpc: [ preset_grpc, f"--endpoint {','.join(self.endpoints)}", - f"--wallet {self.wallet.path} ", - f"--config {self.wallet.config_path} ", + f"--wallet {self.user.wallet.path} ", + f"--config {self.user.wallet.config_path} ", ], preset_s3: [ preset_s3, @@ -167,9 +163,7 @@ class K6: remaining_time = timeout - working_time setup_teardown_time = ( - int(K6_TEARDOWN_PERIOD) - + self.load_params.get_init_time() - + int(self.load_params.setup_timeout.replace("s", "").strip()) + int(K6_TEARDOWN_PERIOD) + self.load_params.get_init_time() + int(self.load_params.setup_timeout.replace("s", "").strip()) ) remaining_time_including_setup_and_teardown = remaining_time + setup_teardown_time timeout = remaining_time_including_setup_and_teardown @@ -201,9 +195,7 @@ class K6: if not self.load_params.fill_percent is None: with reporter.step(f"Check the percentage of filling of all data disks on the node"): if self.check_fill_percent(): - logger.info( - f"Stopping load on because disks is filled more then {self.load_params.fill_percent}%" - ) + logger.info(f"Stopping load on because disks is filled more then {self.load_params.fill_percent}%") event.set() self.stop() return diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index d456270..a34786f 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -1,24 +1,20 @@ import copy import itertools import math -import re import time from dataclasses import fields from threading import Event from typing import Optional from urllib.parse import urlparse -import yaml - from frostfs_testlib import reporter -from frostfs_testlib.cli.frostfs_authmate.authmate import FrostfsAuthmate +from frostfs_testlib.credentials.interfaces import S3Credentials, User from frostfs_testlib.load.interfaces.loader import Loader from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner from frostfs_testlib.load.k6 import K6 from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadType from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader from frostfs_testlib.resources import optionals -from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC from frostfs_testlib.resources.common import STORAGE_USER_NAME from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_VUS_COUNT_DIVISOR, LOAD_NODE_SSH_USER, LOAD_NODES from frostfs_testlib.shell.command_inspectors import SuInspector @@ -26,7 +22,6 @@ from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing import parallel, run_optionally from frostfs_testlib.testing.test_control import retry from frostfs_testlib.utils import datetime_utils @@ -57,17 +52,17 @@ class RunnerBase(ScenarioRunner): class DefaultRunner(RunnerBase): loaders: list[Loader] - loaders_wallet: WalletInfo + user: User def __init__( self, - loaders_wallet: WalletInfo, + user: User, load_ip_list: Optional[list[str]] = None, ) -> None: if load_ip_list is None: load_ip_list = LOAD_NODES self.loaders = RemoteLoader.from_ip_list(load_ip_list) - self.loaders_wallet = loaders_wallet + self.user = user @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @reporter.step("Preparation steps") @@ -86,55 +81,27 @@ class DefaultRunner(RunnerBase): return with reporter.step("Init s3 client on loaders"): - storage_node = nodes_under_load[0].service(StorageNode) - s3_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes] - grpc_peer = storage_node.get_rpc_endpoint() - - parallel(self._prepare_loader, self.loaders, load_params, grpc_peer, s3_public_keys, k6_dir) + s3_credentials = self.user.s3_credentials + parallel(self._aws_configure_on_loader, self.loaders, s3_credentials) def _force_fresh_registry(self, loader: Loader, load_params: LoadParams): with reporter.step(f"Forcing fresh registry on {loader.ip}"): shell = loader.get_shell() shell.exec(f"rm -f {load_params.registry_file}") - def _prepare_loader( + def _aws_configure_on_loader( self, loader: Loader, - load_params: LoadParams, - grpc_peer: str, - s3_public_keys: list[str], - k6_dir: str, + s3_credentials: S3Credentials, ): - with reporter.step(f"Init s3 client on {loader.ip}"): - shell = loader.get_shell() - frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC) - issue_secret_output = frostfs_authmate_exec.secret.issue( - wallet=self.loaders_wallet.path, - peer=grpc_peer, - gate_public_key=s3_public_keys, - container_placement_policy=load_params.preset.container_placement_policy, - container_policy=f"{k6_dir}/scenarios/files/policy.json", - wallet_password=self.loaders_wallet.password, - ).stdout - aws_access_key_id = str( - re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group( - "aws_access_key_id" - ) - ) - aws_secret_access_key = str( - re.search( - r"secret_access_key.*:\s.(?P\w*)", - issue_secret_output, - ).group("aws_secret_access_key") - ) - + with reporter.step(f"Aws configure on {loader.ip}"): configure_input = [ - InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id), - InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key), + InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=s3_credentials.access_key), + InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=s3_credentials.secret_key), InteractiveInput(prompt_pattern=r".*", input=""), InteractiveInput(prompt_pattern=r".*", input=""), ] - shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input)) + loader.get_shell().exec("aws configure", CommandOptions(interactive_inputs=configure_input)) @reporter.step("Init k6 instances") def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): @@ -176,12 +143,10 @@ class DefaultRunner(RunnerBase): k6_dir, shell, loader, - self.loaders_wallet, + self.user, ) - def _get_distributed_load_params_list( - self, original_load_params: LoadParams, workers_count: int - ) -> list[LoadParams]: + def _get_distributed_load_params_list(self, original_load_params: LoadParams, workers_count: int) -> list[LoadParams]: divisor = int(BACKGROUND_LOAD_VUS_COUNT_DIVISOR) distributed_load_params: list[LoadParams] = [] @@ -266,18 +231,20 @@ class LocalRunner(RunnerBase): loaders: list[Loader] cluster_state_controller: ClusterStateController file_keeper: FileKeeper - wallet: WalletInfo + user: User def __init__( self, cluster_state_controller: ClusterStateController, file_keeper: FileKeeper, nodes_under_load: list[ClusterNode], + user: User, ) -> None: self.cluster_state_controller = cluster_state_controller self.file_keeper = file_keeper self.loaders = [NodeLoader(node) for node in nodes_under_load] self.nodes_under_load = nodes_under_load + self.user = user @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @reporter.step("Preparation steps") @@ -326,11 +293,9 @@ class LocalRunner(RunnerBase): shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz --strip-components 2 -C {k6_dir}") shell.exec(f"sudo chmod -R 777 {k6_dir}") - with reporter.step("Create empty_passwd"): - self.wallet = WalletInfo(f"{k6_dir}/scenarios/files/wallet.json", "", "/tmp/empty_passwd.yml") - content = yaml.dump({"password": ""}) - shell.exec(f'echo "{content}" | sudo tee {self.wallet.config_path}') - shell.exec(f"sudo chmod -R 777 {self.wallet.config_path}") + with reporter.step("chmod 777 wallet related files on loader"): + shell.exec(f"sudo chmod -R 777 {self.user.wallet.config_path}") + shell.exec(f"sudo chmod -R 777 {self.user.wallet.path}") @reporter.step("Init k6 instances") def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str): @@ -363,7 +328,7 @@ class LocalRunner(RunnerBase): k6_dir, shell, loader, - self.wallet, + self.user, ) def start(self): @@ -453,7 +418,7 @@ class S3LocalRunner(LocalRunner): k6_dir, shell, loader, - self.wallet, + self.user, ) @run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED) @@ -466,17 +431,10 @@ class S3LocalRunner(LocalRunner): k6_dir: str, ): self.k6_dir = k6_dir - with reporter.step("Init s3 client on loaders"): - storage_node = nodes_under_load[0].service(StorageNode) - s3_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes] - grpc_peer = storage_node.get_rpc_endpoint() - - parallel(self.prepare_node, nodes_under_load, k6_dir, load_params, s3_public_keys, grpc_peer) + parallel(self.prepare_node, nodes_under_load, k6_dir, load_params, cluster_nodes) @reporter.step("Prepare node {cluster_node}") - def prepare_node( - self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams, s3_public_keys: list[str], grpc_peer: str - ): + def prepare_node(self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams, cluster_nodes: list[ClusterNode]): LocalRunner.prepare_node(self, cluster_node, k6_dir, load_params) self.endpoints = cluster_node.s3_gate.get_all_endpoints() shell = cluster_node.host.get_shell() @@ -497,29 +455,9 @@ class S3LocalRunner(LocalRunner): shell.exec(f"sudo python3 -m pip install -I {k6_dir}/requests.tar.gz") with reporter.step(f"Init s3 client on {cluster_node.host_ip}"): - frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC) - issue_secret_output = frostfs_authmate_exec.secret.issue( - wallet=self.wallet.path, - peer=grpc_peer, - gate_public_key=s3_public_keys, - container_placement_policy=load_params.preset.container_placement_policy, - container_policy=f"{k6_dir}/scenarios/files/policy.json", - wallet_password=self.wallet.password, - ).stdout - aws_access_key_id = str( - re.search(r"access_key_id.*:\s.(?P\w*)", issue_secret_output).group( - "aws_access_key_id" - ) - ) - aws_secret_access_key = str( - re.search( - r"secret_access_key.*:\s.(?P\w*)", - issue_secret_output, - ).group("aws_secret_access_key") - ) configure_input = [ - InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id), - InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key), + InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=self.user.s3_credentials.access_key), + InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=self.user.s3_credentials.secret_key), InteractiveInput(prompt_pattern=r".*", input=""), InteractiveInput(prompt_pattern=r".*", input=""), ] diff --git a/src/frostfs_testlib/s3/curl_bucket_resolver.py b/src/frostfs_testlib/s3/curl_bucket_resolver.py new file mode 100644 index 0000000..b713e79 --- /dev/null +++ b/src/frostfs_testlib/s3/curl_bucket_resolver.py @@ -0,0 +1,16 @@ +import re + +from frostfs_testlib.cli.generic_cli import GenericCli +from frostfs_testlib.s3.interfaces import BucketContainerResolver +from frostfs_testlib.storage.cluster import ClusterNode + + +class CurlBucketContainerResolver(BucketContainerResolver): + def resolve(self, node: ClusterNode, bucket_name: str, **kwargs: dict) -> str: + curl = GenericCli("curl", node.host) + output = curl(f"-I http://127.0.0.1:8084/{bucket_name}") + pattern = r"X-Container-Id: (\S+)" + cid = re.findall(pattern, output.stdout) + if cid: + return cid[0] + return None diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index dd21823..b6a10e3 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -1,7 +1,8 @@ -from abc import abstractmethod +from abc import ABC, abstractmethod from datetime import datetime from typing import Literal, Optional, Union +from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.testing.readable import HumanReadableABC, HumanReadableEnum @@ -31,6 +32,22 @@ ACL_COPY = [ ] +class BucketContainerResolver(ABC): + @abstractmethod + def resolve(self, node: ClusterNode, bucket_name: str, **kwargs: dict) -> str: + """ + Resolve Container ID from bucket name + + Args: + node: node from where we want to resolve + bucket_name: name of the bucket + **kwargs: any other required params + + Returns: Container ID + """ + raise NotImplementedError("Call from abstract class") + + class S3ClientWrapper(HumanReadableABC): @abstractmethod def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str) -> None: @@ -296,15 +313,11 @@ class S3ClientWrapper(HumanReadableABC): abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.""" @abstractmethod - def upload_part( - self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str - ) -> str: + def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: """Uploads a part in a multipart upload.""" @abstractmethod - def upload_part_copy( - self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str - ) -> str: + def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: """Uploads a part by copying data from an existing object as data source.""" @abstractmethod diff --git a/src/frostfs_testlib/steps/acl.py b/src/frostfs_testlib/steps/acl.py index e97e4ee..da407b6 100644 --- a/src/frostfs_testlib/steps/acl.py +++ b/src/frostfs_testlib/steps/acl.py @@ -11,25 +11,20 @@ import base58 from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC -from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG +from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.shell import Shell -from frostfs_testlib.storage.dataclasses.acl import ( - EACL_LIFETIME, - FROSTFS_CONTRACT_CACHE_TIMEOUT, - EACLPubKey, - EACLRole, - EACLRule, -) +from frostfs_testlib.storage.dataclasses.acl import EACL_LIFETIME, FROSTFS_CONTRACT_CACHE_TIMEOUT, EACLPubKey, EACLRole, EACLRule +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.utils import wallet_utils logger = logging.getLogger("NeoLogger") @reporter.step("Get extended ACL") -def get_eacl(wallet_path: str, cid: str, shell: Shell, endpoint: str) -> Optional[str]: - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) +def get_eacl(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str) -> Optional[str]: + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) try: - result = cli.container.get_eacl(wallet=wallet_path, rpc_endpoint=endpoint, cid=cid) + result = cli.container.get_eacl(rpc_endpoint=endpoint, cid=cid) except RuntimeError as exc: logger.info("Extended ACL table is not set for this container") logger.info(f"Got exception while getting eacl: {exc}") @@ -41,16 +36,15 @@ def get_eacl(wallet_path: str, cid: str, shell: Shell, endpoint: str) -> Optiona @reporter.step("Set extended ACL") def set_eacl( - wallet_path: str, + wallet: WalletInfo, cid: str, eacl_table_path: str, shell: Shell, endpoint: str, session_token: Optional[str] = None, ) -> None: - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli.container.set_eacl( - wallet=wallet_path, rpc_endpoint=endpoint, cid=cid, table=eacl_table_path, @@ -66,7 +60,7 @@ def _encode_cid_for_eacl(cid: str) -> str: def create_eacl(cid: str, rules_list: List[EACLRule], shell: Shell) -> str: table_file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"eacl_table_{str(uuid.uuid4())}.json") - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC) cli.acl.extended_create(cid=cid, out=table_file_path, rule=rules_list) with open(table_file_path, "r") as file: @@ -77,7 +71,7 @@ def create_eacl(cid: str, rules_list: List[EACLRule], shell: Shell) -> str: def form_bearertoken_file( - wif: str, + wallet: WalletInfo, cid: str, eacl_rule_list: List[Union[EACLRule, EACLPubKey]], shell: Shell, @@ -92,7 +86,7 @@ def form_bearertoken_file( enc_cid = _encode_cid_for_eacl(cid) if cid else None file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) - eacl = get_eacl(wif, cid, shell, endpoint) + eacl = get_eacl(wallet, cid, shell, endpoint) json_eacl = dict() if eacl: eacl = eacl.replace("eACL: ", "").split("Signature")[0] @@ -133,7 +127,7 @@ def form_bearertoken_file( if sign: sign_bearer( shell=shell, - wallet_path=wif, + wallet=wallet, eacl_rules_file_from=file_path, eacl_rules_file_to=file_path, json=True, @@ -164,11 +158,9 @@ def eacl_rules(access: str, verbs: list, user: str) -> list[str]: return rules -def sign_bearer(shell: Shell, wallet_path: str, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool) -> None: - frostfscli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG) - frostfscli.util.sign_bearer_token( - wallet=wallet_path, from_file=eacl_rules_file_from, to_file=eacl_rules_file_to, json=json - ) +def sign_bearer(shell: Shell, wallet: WalletInfo, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool) -> None: + frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + frostfscli.util.sign_bearer_token(eacl_rules_file_from, eacl_rules_file_to, json=json) @reporter.step("Wait for eACL cache expired") @@ -178,9 +170,7 @@ def wait_for_cache_expired(): @reporter.step("Return bearer token in base64 to caller") -def bearer_token_base64_from_file( - bearer_path: str, -) -> str: +def bearer_token_base64_from_file(bearer_path: str) -> str: with open(bearer_path, "rb") as file: signed = file.read() return base64.b64encode(signed).decode("utf-8") diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index 82ff407..fc643e2 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -5,12 +5,11 @@ from dataclasses import dataclass from time import sleep from typing import Optional, Union -import requests - from frostfs_testlib import reporter -from frostfs_testlib.cli import FrostfsCli, GenericCli +from frostfs_testlib.cli import FrostfsCli +from frostfs_testlib.plugins import load_plugin from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC -from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG +from frostfs_testlib.s3.interfaces import BucketContainerResolver from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import put_object, put_object_to_random_node from frostfs_testlib.storage.cluster import Cluster, ClusterNode @@ -25,7 +24,7 @@ logger = logging.getLogger("NeoLogger") @dataclass class StorageContainerInfo: id: str - wallet_file: WalletInfo + wallet: WalletInfo class StorageContainer: @@ -42,11 +41,8 @@ class StorageContainer: def get_id(self) -> str: return self.storage_container_info.id - def get_wallet_path(self) -> str: - return self.storage_container_info.wallet_file.path - - def get_wallet_config_path(self) -> str: - return self.storage_container_info.wallet_file.config_path + def get_wallet(self) -> str: + return self.storage_container_info.wallet @reporter.step("Generate new object and put in container") def generate_object( @@ -61,37 +57,34 @@ class StorageContainer: file_hash = get_file_hash(file_path) container_id = self.get_id() - wallet_path = self.get_wallet_path() - wallet_config = self.get_wallet_config_path() + wallet = self.get_wallet() with reporter.step(f"Put object with size {size} to container {container_id}"): if endpoint: object_id = put_object( - wallet=wallet_path, + wallet=wallet, path=file_path, cid=container_id, expire_at=expire_at, shell=self.shell, endpoint=endpoint, bearer=bearer_token, - wallet_config=wallet_config, ) else: object_id = put_object_to_random_node( - wallet=wallet_path, + wallet=wallet, path=file_path, cid=container_id, expire_at=expire_at, shell=self.shell, cluster=self.cluster, bearer=bearer_token, - wallet_config=wallet_config, ) storage_object = StorageObjectInfo( container_id, object_id, size=size, - wallet_file_path=wallet_path, + wallet=wallet, file_path=file_path, file_hash=file_hash, ) @@ -106,14 +99,13 @@ REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X" @reporter.step("Create Container") def create_container( - wallet: str, + wallet: WalletInfo, shell: Shell, endpoint: str, rule: str = DEFAULT_PLACEMENT_RULE, basic_acl: str = "", attributes: Optional[dict] = None, session_token: str = "", - session_wallet: str = "", name: Optional[str] = None, options: Optional[dict] = None, await_mode: bool = True, @@ -124,7 +116,7 @@ def create_container( A wrapper for `frostfs-cli container create` call. Args: - wallet (str): a wallet on whose behalf a container is created + wallet (WalletInfo): a wallet on whose behalf a container is created rule (optional, str): placement rule for container basic_acl (optional, str): an ACL for container, will be appended to `--basic-acl` key @@ -146,10 +138,9 @@ def create_container( (str): CID of the created container """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) result = cli.container.create( rpc_endpoint=endpoint, - wallet=session_wallet if session_wallet else wallet, policy=rule, basic_acl=basic_acl, attributes=attributes, @@ -170,9 +161,7 @@ def create_container( return cid -def wait_for_container_creation( - wallet: str, cid: str, shell: Shell, endpoint: str, attempts: int = 15, sleep_interval: int = 1 -): +def wait_for_container_creation(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str, attempts: int = 15, sleep_interval: int = 1): for _ in range(attempts): containers = list_containers(wallet, shell, endpoint) if cid in containers: @@ -182,9 +171,7 @@ def wait_for_container_creation( raise RuntimeError(f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting") -def wait_for_container_deletion( - wallet: str, cid: str, shell: Shell, endpoint: str, attempts: int = 30, sleep_interval: int = 1 -): +def wait_for_container_deletion(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str, attempts: int = 30, sleep_interval: int = 1): for _ in range(attempts): try: get_container(wallet, cid, shell=shell, endpoint=endpoint) @@ -198,29 +185,27 @@ def wait_for_container_deletion( @reporter.step("List Containers") -def list_containers( - wallet: str, shell: Shell, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT -) -> list[str]: +def list_containers(wallet: WalletInfo, shell: Shell, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT) -> list[str]: """ A wrapper for `frostfs-cli container list` call. It returns all the available containers for the given wallet. Args: - wallet (str): a wallet on whose behalf we list the containers + wallet (WalletInfo): a wallet on whose behalf we list the containers shell: executor for cli command endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key timeout: Timeout for the operation. Returns: (list): list of containers """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) - result = cli.container.list(rpc_endpoint=endpoint, wallet=wallet, timeout=timeout) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + result = cli.container.list(rpc_endpoint=endpoint, timeout=timeout) logger.info(f"Containers: \n{result}") return result.stdout.split() @reporter.step("List Objects in container") def list_objects( - wallet: str, + wallet: WalletInfo, shell: Shell, container_id: str, endpoint: str, @@ -230,7 +215,7 @@ def list_objects( A wrapper for `frostfs-cli container list-objects` call. It returns all the available objects in container. Args: - wallet (str): a wallet on whose behalf we list the containers objects + wallet (WalletInfo): a wallet on whose behalf we list the containers objects shell: executor for cli command container_id: cid of container endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key @@ -238,15 +223,15 @@ def list_objects( Returns: (list): list of containers """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) - result = cli.container.list_objects(rpc_endpoint=endpoint, wallet=wallet, cid=container_id, timeout=timeout) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + result = cli.container.list_objects(rpc_endpoint=endpoint, cid=container_id, timeout=timeout) logger.info(f"Container objects: \n{result}") return result.stdout.split() @reporter.step("Get Container") def get_container( - wallet: str, + wallet: WalletInfo, cid: str, shell: Shell, endpoint: str, @@ -257,7 +242,7 @@ def get_container( A wrapper for `frostfs-cli container get` call. It extracts container's attributes and rearranges them into a more compact view. Args: - wallet (str): path to a wallet on whose behalf we get the container + wallet (WalletInfo): path to a wallet on whose behalf we get the container cid (str): ID of the container to get shell: executor for cli command endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key @@ -267,8 +252,8 @@ def get_container( (dict, str): dict of container attributes """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) - result = cli.container.get(rpc_endpoint=endpoint, wallet=wallet, cid=cid, json_mode=json_mode, timeout=timeout) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + result = cli.container.get(rpc_endpoint=endpoint, cid=cid, json_mode=json_mode, timeout=timeout) if not json_mode: return result.stdout @@ -285,7 +270,7 @@ def get_container( @reporter.step("Delete Container") # TODO: make the error message about a non-found container more user-friendly def delete_container( - wallet: str, + wallet: WalletInfo, cid: str, shell: Shell, endpoint: str, @@ -297,7 +282,7 @@ def delete_container( A wrapper for `frostfs-cli container delete` call. Args: await_mode: Block execution until container is removed. - wallet (str): path to a wallet on whose behalf we delete the container + wallet (WalletInfo): path to a wallet on whose behalf we delete the container cid (str): ID of the container to delete shell: executor for cli command endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key @@ -306,9 +291,8 @@ def delete_container( This function doesn't return anything. """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli.container.delete( - wallet=wallet, cid=cid, rpc_endpoint=endpoint, force=force, @@ -345,26 +329,22 @@ def _parse_cid(output: str) -> str: @reporter.step("Search container by name") def search_container_by_name(name: str, node: ClusterNode): - curl = GenericCli("curl", node.host) - output = curl(f"-I http://127.0.0.1:8084/{name}") - pattern = r"X-Container-Id: (\S+)" - cid = re.findall(pattern, output.stdout) - if cid: - return cid[0] - return None + resolver_cls = load_plugin("frostfs.testlib.bucket_cid_resolver", node.host.config.product) + resolver: BucketContainerResolver = resolver_cls() + return resolver.resolve(node, name) @reporter.step("Search for nodes with a container") def search_nodes_with_container( - wallet: str, + wallet: WalletInfo, cid: str, shell: Shell, endpoint: str, cluster: Cluster, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, ) -> list[ClusterNode]: - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) - result = cli.container.search_node(rpc_endpoint=endpoint, wallet=wallet, cid=cid, timeout=timeout) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + result = cli.container.search_node(rpc_endpoint=endpoint, cid=cid, timeout=timeout) pattern = r"[0-9]+(?:\.[0-9]+){3}" nodes_ip = list(set(re.findall(pattern, result.stdout))) diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index 610b76a..5fe6054 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -9,9 +9,10 @@ from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli.neogo import NeoGo from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE -from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG +from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.shell import Shell from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.utils import json_utils from frostfs_testlib.utils.cli_utils import parse_cmd_table, parse_netmap_output @@ -20,7 +21,7 @@ logger = logging.getLogger("NeoLogger") @reporter.step("Get object from random node") def get_object_from_random_node( - wallet: str, + wallet: WalletInfo, cid: str, oid: str, shell: Shell, @@ -28,7 +29,6 @@ def get_object_from_random_node( bearer: Optional[str] = None, write_object: Optional[str] = None, xhdr: Optional[dict] = None, - wallet_config: Optional[str] = None, no_progress: bool = True, session: Optional[str] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, @@ -44,7 +44,6 @@ def get_object_from_random_node( cluster: cluster object bearer (optional, str): path to Bearer Token file, appends to `--bearer` key write_object (optional, str): path to downloaded file, appends to `--file` key - wallet_config(optional, str): path to the wallet config no_progress(optional, bool): do not show progress bar xhdr (optional, dict): Request X-Headers in form of Key=Value session (optional, dict): path to a JSON-encoded container session token @@ -62,7 +61,6 @@ def get_object_from_random_node( bearer, write_object, xhdr, - wallet_config, no_progress, session, timeout, @@ -71,7 +69,7 @@ def get_object_from_random_node( @reporter.step("Get object from {endpoint}") def get_object( - wallet: str, + wallet: WalletInfo, cid: str, oid: str, shell: Shell, @@ -79,7 +77,6 @@ def get_object( bearer: Optional[str] = None, write_object: Optional[str] = None, xhdr: Optional[dict] = None, - wallet_config: Optional[str] = None, no_progress: bool = True, session: Optional[str] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, @@ -88,14 +85,13 @@ def get_object( GET from FrostFS. Args: - wallet (str): wallet on whose behalf GET is done + wallet (WalletInfo): wallet on whose behalf GET is done cid (str): ID of Container where we get the Object from oid (str): Object ID shell: executor for cli command bearer: path to Bearer Token file, appends to `--bearer` key write_object: path to downloaded file, appends to `--file` key endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - wallet_config(optional, str): path to the wallet config no_progress(optional, bool): do not show progress bar xhdr (optional, dict): Request X-Headers in form of Key=Value session (optional, dict): path to a JSON-encoded container session token @@ -108,10 +104,9 @@ def get_object( write_object = str(uuid.uuid4()) file_path = os.path.join(ASSETS_DIR, write_object) - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli.object.get( rpc_endpoint=endpoint, - wallet=wallet, cid=cid, oid=oid, file=file_path, @@ -127,14 +122,13 @@ def get_object( @reporter.step("Get Range Hash from {endpoint}") def get_range_hash( - wallet: str, + wallet: WalletInfo, cid: str, oid: str, range_cut: str, shell: Shell, endpoint: str, bearer: Optional[str] = None, - wallet_config: Optional[str] = None, xhdr: Optional[dict] = None, session: Optional[str] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, @@ -151,17 +145,15 @@ def get_range_hash( range_cut: Range to take hash from in the form offset1:length1,..., value to pass to the `--range` parameter endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - wallet_config: path to the wallet config xhdr: Request X-Headers in form of Key=Values session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session. timeout: Timeout for the operation. Returns: None """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) result = cli.object.hash( rpc_endpoint=endpoint, - wallet=wallet, cid=cid, oid=oid, range=range_cut, @@ -177,7 +169,7 @@ def get_range_hash( @reporter.step("Put object to random node") def put_object_to_random_node( - wallet: str, + wallet: WalletInfo, path: str, cid: str, shell: Shell, @@ -186,7 +178,6 @@ def put_object_to_random_node( copies_number: Optional[int] = None, attributes: Optional[dict] = None, xhdr: Optional[dict] = None, - wallet_config: Optional[str] = None, expire_at: Optional[int] = None, no_progress: bool = True, session: Optional[str] = None, @@ -205,7 +196,6 @@ def put_object_to_random_node( copies_number: Number of copies of the object to store within the RPC call attributes: User attributes in form of Key1=Value1,Key2=Value2 cluster: cluster under test - wallet_config: path to the wallet config no_progress: do not show progress bar expire_at: Last epoch in the life of the object xhdr: Request X-Headers in form of Key=Value @@ -226,7 +216,6 @@ def put_object_to_random_node( copies_number, attributes, xhdr, - wallet_config, expire_at, no_progress, session, @@ -236,7 +225,7 @@ def put_object_to_random_node( @reporter.step("Put object at {endpoint} in container {cid}") def put_object( - wallet: str, + wallet: WalletInfo, path: str, cid: str, shell: Shell, @@ -245,7 +234,6 @@ def put_object( copies_number: Optional[int] = None, attributes: Optional[dict] = None, xhdr: Optional[dict] = None, - wallet_config: Optional[str] = None, expire_at: Optional[int] = None, no_progress: bool = True, session: Optional[str] = None, @@ -263,7 +251,6 @@ def put_object( copies_number: Number of copies of the object to store within the RPC call attributes: User attributes in form of Key1=Value1,Key2=Value2 endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - wallet_config: path to the wallet config no_progress: do not show progress bar expire_at: Last epoch in the life of the object xhdr: Request X-Headers in form of Key=Value @@ -273,10 +260,9 @@ def put_object( (str): ID of uploaded Object """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) result = cli.object.put( rpc_endpoint=endpoint, - wallet=wallet, file=path, cid=cid, attributes=attributes, @@ -297,13 +283,12 @@ def put_object( @reporter.step("Delete object {cid}/{oid} from {endpoint}") def delete_object( - wallet: str, + wallet: WalletInfo, cid: str, oid: str, shell: Shell, endpoint: str, bearer: str = "", - wallet_config: Optional[str] = None, xhdr: Optional[dict] = None, session: Optional[str] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, @@ -318,7 +303,6 @@ def delete_object( shell: executor for cli command bearer: path to Bearer Token file, appends to `--bearer` key endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key - wallet_config: path to the wallet config xhdr: Request X-Headers in form of Key=Value session: path to a JSON-encoded container session token timeout: Timeout for the operation. @@ -326,10 +310,9 @@ def delete_object( (str): Tombstone ID """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) result = cli.object.delete( rpc_endpoint=endpoint, - wallet=wallet, cid=cid, oid=oid, bearer=bearer, @@ -345,13 +328,12 @@ def delete_object( @reporter.step("Get Range") def get_range( - wallet: str, + wallet: WalletInfo, cid: str, oid: str, range_cut: str, shell: Shell, endpoint: str, - wallet_config: Optional[str] = None, bearer: str = "", xhdr: Optional[dict] = None, session: Optional[str] = None, @@ -368,7 +350,6 @@ def get_range( shell: executor for cli command endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key bearer: path to Bearer Token file, appends to `--bearer` key - wallet_config: path to the wallet config xhdr: Request X-Headers in form of Key=Value session: path to a JSON-encoded container session token timeout: Timeout for the operation. @@ -377,10 +358,9 @@ def get_range( """ range_file_path = os.path.join(ASSETS_DIR, str(uuid.uuid4())) - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli.object.range( rpc_endpoint=endpoint, - wallet=wallet, cid=cid, oid=oid, range=range_cut, @@ -398,7 +378,7 @@ def get_range( @reporter.step("Lock Object") def lock_object( - wallet: str, + wallet: WalletInfo, cid: str, oid: str, shell: Shell, @@ -408,7 +388,6 @@ def lock_object( address: Optional[str] = None, bearer: Optional[str] = None, session: Optional[str] = None, - wallet_config: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, @@ -435,13 +414,12 @@ def lock_object( Lock object ID """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) result = cli.object.lock( rpc_endpoint=endpoint, lifetime=lifetime, expire_at=expire_at, address=address, - wallet=wallet, cid=cid, oid=oid, bearer=bearer, @@ -459,14 +437,13 @@ def lock_object( @reporter.step("Search object") def search_object( - wallet: str, + wallet: WalletInfo, cid: str, shell: Shell, endpoint: str, bearer: str = "", filters: Optional[dict] = None, expected_objects_list: Optional[list] = None, - wallet_config: Optional[str] = None, xhdr: Optional[dict] = None, session: Optional[str] = None, phy: bool = False, @@ -484,7 +461,6 @@ def search_object( endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key filters: key=value pairs to filter Objects expected_objects_list: a list of ObjectIDs to compare found Objects with - wallet_config: path to the wallet config xhdr: Request X-Headers in form of Key=Value session: path to a JSON-encoded container session token phy: Search physically stored objects. @@ -495,10 +471,9 @@ def search_object( list of found ObjectIDs """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) result = cli.object.search( rpc_endpoint=endpoint, - wallet=wallet, cid=cid, bearer=bearer, xhdr=xhdr, @@ -513,23 +488,18 @@ def search_object( if expected_objects_list: if sorted(found_objects) == sorted(expected_objects_list): - logger.info( - f"Found objects list '{found_objects}' " f"is equal for expected list '{expected_objects_list}'" - ) + logger.info(f"Found objects list '{found_objects}' " f"is equal for expected list '{expected_objects_list}'") else: - logger.warning( - f"Found object list {found_objects} " f"is not equal to expected list '{expected_objects_list}'" - ) + logger.warning(f"Found object list {found_objects} " f"is not equal to expected list '{expected_objects_list}'") return found_objects @reporter.step("Get netmap netinfo") def get_netmap_netinfo( - wallet: str, + wallet: WalletInfo, shell: Shell, endpoint: str, - wallet_config: Optional[str] = None, address: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, @@ -539,7 +509,7 @@ def get_netmap_netinfo( Get netmap netinfo output from node Args: - wallet (str): wallet on whose behalf request is done + wallet (WalletInfo): wallet on whose behalf request is done shell: executor for cli command endpoint (optional, str): FrostFS endpoint to send request to, appends to `--rpc-endpoint` key address: Address of wallet account @@ -552,9 +522,8 @@ def get_netmap_netinfo( (dict): dict of parsed command output """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) output = cli.netmap.netinfo( - wallet=wallet, rpc_endpoint=endpoint, address=address, ttl=ttl, @@ -578,7 +547,7 @@ def get_netmap_netinfo( @reporter.step("Head object") def head_object( - wallet: str, + wallet: WalletInfo, cid: str, oid: str, shell: Shell, @@ -588,7 +557,6 @@ def head_object( json_output: bool = True, is_raw: bool = False, is_direct: bool = False, - wallet_config: Optional[str] = None, session: Optional[str] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, ): @@ -596,7 +564,7 @@ def head_object( HEAD an Object. Args: - wallet (str): wallet on whose behalf HEAD is done + wallet (WalletInfo): wallet on whose behalf HEAD is done cid (str): ID of Container where we get the Object from oid (str): ObjectID to HEAD shell: executor for cli command @@ -608,7 +576,6 @@ def head_object( turns into `--raw` key is_direct(optional, bool): send request directly to the node or not; this flag turns into `--ttl 1` key - wallet_config(optional, str): path to the wallet config xhdr (optional, dict): Request X-Headers in form of Key=Value session (optional, dict): path to a JSON-encoded container session token timeout: Timeout for the operation. @@ -619,10 +586,9 @@ def head_object( (str): HEAD response as a plain text """ - cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) result = cli.object.head( rpc_endpoint=endpoint, - wallet=wallet, cid=cid, oid=oid, bearer=bearer, @@ -673,7 +639,7 @@ def head_object( @reporter.step("Run neo-go dump-keys") -def neo_go_dump_keys(shell: Shell, wallet: str) -> dict: +def neo_go_dump_keys(shell: Shell, wallet: WalletInfo) -> dict: """ Run neo-go dump keys command @@ -761,9 +727,7 @@ def get_object_nodes( parsing_output = parse_cmd_table(result_object_nodes.stdout, "|") list_object_nodes = [ - node - for node in parsing_output - if node["should_contain_object"] == "true" and node["actually_contains_object"] == "true" + node for node in parsing_output if node["should_contain_object"] == "true" and node["actually_contains_object"] == "true" ] netmap_nodes_list = parse_netmap_output( @@ -780,10 +744,7 @@ def get_object_nodes( ] result = [ - cluster_node - for netmap_node in netmap_nodes - for cluster_node in cluster.cluster_nodes - if netmap_node.node == cluster_node.host_ip + cluster_node for netmap_node in netmap_nodes for cluster_node in cluster.cluster_nodes if netmap_node.node == cluster_node.host_ip ] return result diff --git a/src/frostfs_testlib/steps/complex_object_actions.py b/src/frostfs_testlib/steps/complex_object_actions.py index a67dd4c..e1a7088 100644 --- a/src/frostfs_testlib/steps/complex_object_actions.py +++ b/src/frostfs_testlib/steps/complex_object_actions.py @@ -14,11 +14,11 @@ from typing import Optional, Tuple from frostfs_testlib import reporter from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT -from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import head_object from frostfs_testlib.storage.cluster import Cluster, StorageNode from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo logger = logging.getLogger("NeoLogger") @@ -44,7 +44,7 @@ def get_storage_object_chunks( with reporter.step(f"Get complex object chunks (f{storage_object.oid})"): split_object_id = get_link_object( - storage_object.wallet_file_path, + storage_object.wallet, storage_object.cid, storage_object.oid, shell, @@ -53,7 +53,7 @@ def get_storage_object_chunks( timeout=timeout, ) head = head_object( - storage_object.wallet_file_path, + storage_object.wallet, storage_object.cid, split_object_id, shell, @@ -96,7 +96,7 @@ def get_complex_object_split_ranges( chunks_ids = get_storage_object_chunks(storage_object, shell, cluster) for chunk_id in chunks_ids: head = head_object( - storage_object.wallet_file_path, + storage_object.wallet, storage_object.cid, chunk_id, shell, @@ -114,13 +114,12 @@ def get_complex_object_split_ranges( @reporter.step("Get Link Object") def get_link_object( - wallet: str, + wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode], bearer: str = "", - wallet_config: str = DEFAULT_WALLET_CONFIG, is_direct: bool = True, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, ): @@ -154,7 +153,6 @@ def get_link_object( is_raw=True, is_direct=is_direct, bearer=bearer, - wallet_config=wallet_config, timeout=timeout, ) if resp["link"]: @@ -167,7 +165,7 @@ def get_link_object( @reporter.step("Get Last Object") def get_last_object( - wallet: str, + wallet: WalletInfo, cid: str, oid: str, shell: Shell, diff --git a/src/frostfs_testlib/steps/epoch.py b/src/frostfs_testlib/steps/epoch.py index ef8f85a..ce7ed12 100644 --- a/src/frostfs_testlib/steps/epoch.py +++ b/src/frostfs_testlib/steps/epoch.py @@ -4,13 +4,7 @@ from typing import Optional from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsAdm, FrostfsCli, NeoGo -from frostfs_testlib.resources.cli import ( - CLI_DEFAULT_TIMEOUT, - FROSTFS_ADM_CONFIG_PATH, - FROSTFS_ADM_EXEC, - FROSTFS_CLI_EXEC, - NEOGO_EXECUTABLE, -) +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.shell import Shell from frostfs_testlib.steps.payment_neogo import get_contract_hash diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index f717fd4..baf362b 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -10,6 +10,7 @@ from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.container import search_container_by_name, search_nodes_with_container from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo logger = logging.getLogger("NeoLogger") @@ -28,9 +29,7 @@ def check_objects_in_bucket( assert bucket_object in bucket_objects, f"Expected object {bucket_object} in objects list {bucket_objects}" for bucket_object in unexpected_objects: - assert ( - bucket_object not in bucket_objects - ), f"Expected object {bucket_object} not in objects list {bucket_objects}" + assert bucket_object not in bucket_objects, f"Expected object {bucket_object} not in objects list {bucket_objects}" @reporter.step("Try to get object and got error") @@ -58,9 +57,7 @@ def object_key_from_file_path(full_path: str) -> str: return os.path.basename(full_path) -def assert_tags( - actual_tags: list, expected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None -) -> None: +def assert_tags(actual_tags: list, expected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None) -> None: expected_tags = [{"Key": key, "Value": value} for key, value in expected_tags] if expected_tags else [] unexpected_tags = [{"Key": key, "Value": value} for key, value in unexpected_tags] if unexpected_tags else [] if expected_tags == []: @@ -180,7 +177,7 @@ def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str): def search_nodes_with_bucket( cluster: Cluster, bucket_name: str, - wallet: str, + wallet: WalletInfo, shell: Shell, endpoint: str, ) -> list[ClusterNode]: diff --git a/src/frostfs_testlib/steps/session_token.py b/src/frostfs_testlib/steps/session_token.py index 6c87cac..67c556d 100644 --- a/src/frostfs_testlib/steps/session_token.py +++ b/src/frostfs_testlib/steps/session_token.py @@ -4,13 +4,12 @@ import logging import os import uuid from dataclasses import dataclass -from enum import Enum from typing import Any, Optional from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC -from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG +from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.shell import Shell from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo from frostfs_testlib.storage.dataclasses.wallet import WalletInfo @@ -231,8 +230,7 @@ def get_object_signed_token( def create_session_token( shell: Shell, owner: str, - wallet_path: str, - wallet_password: str, + wallet: WalletInfo, rpc_endpoint: str, ) -> str: """ @@ -247,19 +245,18 @@ def create_session_token( The path to the generated session token file. """ session_token = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) - frostfscli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC) + frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) frostfscli.session.create( rpc_endpoint=rpc_endpoint, address=owner, - wallet=wallet_path, - wallet_password=wallet_password, out=session_token, + wallet=wallet.path, ) return session_token @reporter.step("Sign Session Token") -def sign_session_token(shell: Shell, session_token_file: str, wlt: WalletInfo) -> str: +def sign_session_token(shell: Shell, session_token_file: str, wallet: WalletInfo) -> str: """ This function signs the session token by the given wallet. @@ -272,6 +269,6 @@ def sign_session_token(shell: Shell, session_token_file: str, wlt: WalletInfo) - The path to the signed token. """ signed_token_file = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) - frostfscli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG) - frostfscli.util.sign_session_token(wallet=wlt.path, from_file=session_token_file, to_file=signed_token_file) + frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + frostfscli.util.sign_session_token(session_token_file, signed_token_file) return signed_token_file diff --git a/src/frostfs_testlib/steps/storage_object.py b/src/frostfs_testlib/steps/storage_object.py index ce1bb94..4b4b2a6 100644 --- a/src/frostfs_testlib/steps/storage_object.py +++ b/src/frostfs_testlib/steps/storage_object.py @@ -30,14 +30,14 @@ def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell, clust with reporter.step("Delete objects"): for storage_object in storage_objects: storage_object.tombstone = delete_object( - storage_object.wallet_file_path, + storage_object.wallet, storage_object.cid, storage_object.oid, shell=shell, endpoint=cluster.default_rpc_endpoint, ) verify_head_tombstone( - wallet_path=storage_object.wallet_file_path, + wallet=storage_object.wallet, cid=storage_object.cid, oid_ts=storage_object.tombstone, oid=storage_object.oid, @@ -52,7 +52,7 @@ def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell, clust for storage_object in storage_objects: with pytest.raises(Exception, match=OBJECT_ALREADY_REMOVED): get_object( - storage_object.wallet_file_path, + storage_object.wallet, storage_object.cid, storage_object.oid, shell=shell, diff --git a/src/frostfs_testlib/steps/storage_policy.py b/src/frostfs_testlib/steps/storage_policy.py index d2202a4..acc113f 100644 --- a/src/frostfs_testlib/steps/storage_policy.py +++ b/src/frostfs_testlib/steps/storage_policy.py @@ -12,13 +12,15 @@ from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import head_object from frostfs_testlib.steps.complex_object_actions import get_last_object from frostfs_testlib.storage.cluster import StorageNode +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.utils import string_utils logger = logging.getLogger("NeoLogger") +# TODO: Unused, remove or make use of @reporter.step("Get Object Copies") -def get_object_copies(complexity: str, wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: +def get_object_copies(complexity: str, wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: """ The function performs requests to all nodes of the container and finds out if they store a copy of the object. The procedure is @@ -43,7 +45,7 @@ def get_object_copies(complexity: str, wallet: str, cid: str, oid: str, shell: S @reporter.step("Get Simple Object Copies") -def get_simple_object_copies(wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: +def get_simple_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: """ To figure out the number of a simple object copies, only direct HEAD requests should be made to the every node of the container. @@ -72,7 +74,7 @@ def get_simple_object_copies(wallet: str, cid: str, oid: str, shell: Shell, node @reporter.step("Get Complex Object Copies") -def get_complex_object_copies(wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: +def get_complex_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int: """ To figure out the number of a complex object copies, we firstly need to retrieve its Last object. We consider that the number of @@ -109,8 +111,7 @@ def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageN nodes_list = [] for node in nodes: - wallet = node.get_wallet_path() - wallet_config = node.get_wallet_config_path() + wallet = WalletInfo.from_node(node) try: res = head_object( wallet, @@ -119,7 +120,6 @@ def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageN shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True, - wallet_config=wallet_config, ) if res is not None: logger.info(f"Found object {oid} on node {node}") @@ -131,9 +131,7 @@ def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageN @reporter.step("Get Nodes Without Object") -def get_nodes_without_object( - wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode] -) -> list[StorageNode]: +def get_nodes_without_object(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> list[StorageNode]: """ The function returns list of nodes which do not store the given object. diff --git a/src/frostfs_testlib/steps/tombstone.py b/src/frostfs_testlib/steps/tombstone.py index b468c93..27f75d5 100644 --- a/src/frostfs_testlib/steps/tombstone.py +++ b/src/frostfs_testlib/steps/tombstone.py @@ -1,31 +1,23 @@ -import json import logging -from neo3.wallet import wallet - from frostfs_testlib import reporter from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import head_object +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo logger = logging.getLogger("NeoLogger") @reporter.step("Verify Head Tombstone") -def verify_head_tombstone(wallet_path: str, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str): - header = head_object(wallet_path, cid, oid_ts, shell=shell, endpoint=endpoint)["header"] +def verify_head_tombstone(wallet: WalletInfo, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str): + header = head_object(wallet, cid, oid_ts, shell=shell, endpoint=endpoint)["header"] s_oid = header["sessionToken"]["body"]["object"]["target"]["objects"] logger.info(f"Header Session OIDs is {s_oid}") logger.info(f"OID is {oid}") assert header["containerID"] == cid, "Tombstone Header CID is wrong" - - with open(wallet_path, "r") as file: - wlt_data = json.loads(file.read()) - wlt = wallet.Wallet.from_json(wlt_data, password="") - addr = wlt.accounts[0].address - - assert header["ownerID"] == addr, "Tombstone Owner ID is wrong" + assert header["ownerID"] == wallet.get_address_from_json(0), "Tombstone Owner ID is wrong" assert header["objectType"] == "TOMBSTONE", "Header Type isn't Tombstone" assert header["sessionToken"]["body"]["object"]["verb"] == "DELETE", "Header Session Type isn't DELETE" assert header["sessionToken"]["body"]["object"]["target"]["container"] == cid, "Header Session ID is wrong" diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index 5f2ed99..e713f02 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -1,6 +1,5 @@ import copy from datetime import datetime -from typing import Optional import frostfs_testlib.resources.optionals as optionals from frostfs_testlib import reporter @@ -10,7 +9,6 @@ from frostfs_testlib.load.load_report import LoadReport from frostfs_testlib.load.load_verifiers import LoadVerifier from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode -from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.parallel import parallel from frostfs_testlib.testing.test_control import run_optionally @@ -23,7 +21,6 @@ class BackgroundLoadController: cluster_nodes: list[ClusterNode] nodes_under_load: list[ClusterNode] load_counter: int - loaders_wallet: WalletInfo load_summaries: dict endpoints: list[str] runner: ScenarioRunner @@ -34,7 +31,6 @@ class BackgroundLoadController: self, k6_dir: str, load_params: LoadParams, - loaders_wallet: WalletInfo, cluster_nodes: list[ClusterNode], nodes_under_load: list[ClusterNode], runner: ScenarioRunner, @@ -45,7 +41,6 @@ class BackgroundLoadController: self.cluster_nodes = cluster_nodes self.nodes_under_load = nodes_under_load self.load_counter = 1 - self.loaders_wallet = loaders_wallet self.runner = runner self.started = False self.load_reporters = [] @@ -64,10 +59,7 @@ class BackgroundLoadController: ) ), EndpointSelectionStrategy.FIRST: list( - set( - node_under_load.service(StorageNode).get_rpc_endpoint() - for node_under_load in self.nodes_under_load - ) + set(node_under_load.service(StorageNode).get_rpc_endpoint() for node_under_load in self.nodes_under_load) ), }, # for some reason xk6 appends http protocol on its own diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 69df675..9e07914 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -11,12 +11,13 @@ from frostfs_testlib.healthcheck.interfaces import Healthcheck from frostfs_testlib.hosting.interfaces import HostStatus from frostfs_testlib.plugins import load_all from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC -from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG, MORPH_BLOCK_TIME +from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider from frostfs_testlib.steps.network import IpHelper from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing import parallel from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for_success from frostfs_testlib.utils.datetime_utils import parse_time @@ -413,12 +414,12 @@ class ClusterStateController: frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}") @reporter.step("Set mode node to {status}") - def set_mode_node(self, cluster_node: ClusterNode, wallet: str, status: str, await_tick: bool = True) -> None: + def set_mode_node(self, cluster_node: ClusterNode, wallet: WalletInfo, status: str, await_tick: bool = True) -> None: rpc_endpoint = cluster_node.storage_node.get_rpc_endpoint() control_endpoint = cluster_node.service(StorageNode).get_control_endpoint() - frostfs_adm, frostfs_cli, frostfs_cli_remote = self._get_cli(local_shell=self.shell, cluster_node=cluster_node) - node_netinfo = NetmapParser.netinfo(frostfs_cli.netmap.netinfo(rpc_endpoint=rpc_endpoint, wallet=wallet).stdout) + frostfs_adm, frostfs_cli, frostfs_cli_remote = self._get_cli(local_shell=self.shell, local_wallet=wallet, cluster_node=cluster_node) + node_netinfo = NetmapParser.netinfo(frostfs_cli.netmap.netinfo(rpc_endpoint=rpc_endpoint).stdout) with reporter.step("If status maintenance, then check that the option is enabled"): if node_netinfo.maintenance_mode_allowed == "false": @@ -437,12 +438,10 @@ class ClusterStateController: self.check_node_status(status=status, wallet=wallet, cluster_node=cluster_node) @wait_for_success(80, 8, title="Wait for storage status become {status}") - def check_node_status(self, status: str, wallet: str, cluster_node: ClusterNode): - frostfs_cli = FrostfsCli( - shell=self.shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG - ) + def check_node_status(self, status: str, wallet: WalletInfo, cluster_node: ClusterNode): + frostfs_cli = FrostfsCli(self.shell, FROSTFS_CLI_EXEC, wallet.config_path) netmap = NetmapParser.snapshot_all_nodes( - frostfs_cli.netmap.snapshot(rpc_endpoint=cluster_node.storage_node.get_rpc_endpoint(), wallet=wallet).stdout + frostfs_cli.netmap.snapshot(rpc_endpoint=cluster_node.storage_node.get_rpc_endpoint()).stdout ) netmap = [node for node in netmap if cluster_node.host_ip == node.node] if status == "offline": @@ -450,7 +449,9 @@ class ClusterStateController: else: assert netmap[0].node_status == status.upper(), f"Node state - {netmap[0].node_status} != {status} expect" - def _get_cli(self, local_shell: Shell, cluster_node: ClusterNode) -> tuple[FrostfsAdm, FrostfsCli, FrostfsCli]: + def _get_cli( + self, local_shell: Shell, local_wallet: WalletInfo, cluster_node: ClusterNode + ) -> tuple[FrostfsAdm, FrostfsCli, FrostfsCli]: # TODO Move to service config host = cluster_node.host service_config = host.get_service_config(cluster_node.storage_node.name) @@ -462,12 +463,8 @@ class ClusterStateController: wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"' shell.exec(f"echo '{wallet_config}' > {wallet_config_path}") - frostfs_adm = FrostfsAdm( - shell=shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH - ) - frostfs_cli = FrostfsCli( - shell=local_shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG - ) + frostfs_adm = FrostfsAdm(shell=shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH) + frostfs_cli = FrostfsCli(local_shell, FROSTFS_CLI_EXEC, local_wallet.config_path) frostfs_cli_remote = FrostfsCli( shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, @@ -511,9 +508,7 @@ class ClusterStateController: options = CommandOptions(check=False) return self.shell.exec(f"ping {node.host.config.address} -c 1", options).return_code - @retry( - max_attempts=60, sleep_interval=10, expected_result=HostStatus.ONLINE, title="Waiting for {node} to go online" - ) + @retry(max_attempts=60, sleep_interval=10, expected_result=HostStatus.ONLINE, title="Waiting for {node} to go online") def _wait_for_host_online(self, node: ClusterNode): try: ping_result = self._ping_host(node) @@ -524,9 +519,7 @@ class ClusterStateController: logger.warning(f"Host ping fails with error {err}") return HostStatus.OFFLINE - @retry( - max_attempts=60, sleep_interval=10, expected_result=HostStatus.OFFLINE, title="Waiting for {node} to go offline" - ) + @retry(max_attempts=60, sleep_interval=10, expected_result=HostStatus.OFFLINE, title="Waiting for {node} to go offline") def _wait_for_host_offline(self, node: ClusterNode): try: ping_result = self._ping_host(node) diff --git a/src/frostfs_testlib/storage/dataclasses/acl.py b/src/frostfs_testlib/storage/dataclasses/acl.py index 1330618..362dee9 100644 --- a/src/frostfs_testlib/storage/dataclasses/acl.py +++ b/src/frostfs_testlib/storage/dataclasses/acl.py @@ -1,8 +1,8 @@ import logging from dataclasses import dataclass -from enum import Enum from typing import Any, Dict, List, Optional, Union +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.readable import HumanReadableEnum from frostfs_testlib.utils import wallet_utils @@ -65,11 +65,7 @@ class EACLFilters: def __str__(self): return ",".join( - [ - f"{filter.header_type.value}:" - f"{filter.key}{filter.match_type.value}{filter.value}" - for filter in self.filters - ] + [f"{filter.header_type.value}:" f"{filter.key}{filter.match_type.value}{filter.value}" for filter in self.filters] if self.filters else [] ) @@ -84,7 +80,7 @@ class EACLPubKey: class EACLRule: operation: Optional[EACLOperation] = None access: Optional[EACLAccess] = None - role: Optional[Union[EACLRole, str]] = None + role: Optional[Union[EACLRole, WalletInfo]] = None filters: Optional[EACLFilters] = None def to_dict(self) -> Dict[str, Any]: @@ -96,9 +92,9 @@ class EACLRule: } def __str__(self): - role = ( - self.role.value - if isinstance(self.role, EACLRole) - else f'pubkey:{wallet_utils.get_wallet_public_key(self.role, "")}' - ) + role = "" + if isinstance(self.role, EACLRole): + role = self.role.value + if isinstance(self.role, WalletInfo): + role = f"pubkey:{wallet_utils.get_wallet_public_key(self.role.path, self.role.password)}" return f'{self.access.value} {self.operation.value} {self.filters or ""} {role}' diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index 63a3cf2..f4d729d 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -1,6 +1,7 @@ from dataclasses import dataclass from typing import Optional +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.readable import HumanReadableEnum @@ -19,7 +20,7 @@ class LockObjectInfo(ObjectRef): @dataclass class StorageObjectInfo(ObjectRef): size: Optional[int] = None - wallet_file_path: Optional[str] = None + wallet: Optional[WalletInfo] = None file_path: Optional[str] = None file_hash: Optional[str] = None attributes: Optional[list[dict[str, str]]] = None diff --git a/src/frostfs_testlib/storage/dataclasses/wallet.py b/src/frostfs_testlib/storage/dataclasses/wallet.py index 1d66c4b..d053d29 100644 --- a/src/frostfs_testlib/storage/dataclasses/wallet.py +++ b/src/frostfs_testlib/storage/dataclasses/wallet.py @@ -1,13 +1,15 @@ import json import logging import os -import uuid from dataclasses import dataclass from typing import Optional -from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG, DEFAULT_WALLET_PASS +import yaml + +from frostfs_testlib import reporter +from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG, DEFAULT_WALLET_PASS from frostfs_testlib.shell import Shell -from frostfs_testlib.storage.cluster import Cluster, NodeBase +from frostfs_testlib.storage.cluster import NodeBase from frostfs_testlib.utils.wallet_utils import get_last_address_from_wallet, init_wallet logger = logging.getLogger("frostfs.testlib.utils") @@ -21,9 +23,13 @@ class WalletInfo: @staticmethod def from_node(node: NodeBase): - return WalletInfo( - node.get_wallet_path(), node.get_wallet_password(), node.get_wallet_config_path() - ) + wallet_path = node.get_wallet_path() + wallet_password = node.get_wallet_password() + wallet_config_file = os.path.join(ASSETS_DIR, os.path.basename(node.get_wallet_config_path())) + with open(wallet_config_file, "w") as file: + file.write(yaml.dump({"wallet": wallet_path, "password": wallet_password})) + + return WalletInfo(wallet_path, wallet_password, wallet_config_file) def get_address(self) -> str: """ @@ -47,22 +53,17 @@ class WalletInfo: """ with open(self.path, "r") as wallet: wallet_json = json.load(wallet) - assert abs(account_id) + 1 <= len( - wallet_json["accounts"] - ), f"There is no index '{account_id}' in wallet: {wallet_json}" + assert abs(account_id) + 1 <= len(wallet_json["accounts"]), f"There is no index '{account_id}' in wallet: {wallet_json}" return wallet_json["accounts"][account_id]["address"] class WalletFactory: - def __init__(self, wallets_dir: str, shell: Shell, cluster: Cluster) -> None: + def __init__(self, wallets_dir: str, shell: Shell) -> None: self.shell = shell self.wallets_dir = wallets_dir - self.cluster = cluster - def create_wallet( - self, file_name: Optional[str] = None, password: Optional[str] = None - ) -> WalletInfo: + def create_wallet(self, file_name: str, password: Optional[str] = None) -> WalletInfo: """ Creates new default wallet. @@ -74,8 +75,6 @@ class WalletFactory: WalletInfo object of new wallet. """ - if file_name is None: - file_name = str(uuid.uuid4()) if password is None: password = "" @@ -85,6 +84,8 @@ class WalletFactory: init_wallet(wallet_path, password) with open(wallet_config_path, "w") as config_file: - config_file.write(f'password: "{password}"') + config_file.write(f'wallet: {wallet_path}\npassword: "{password}"') + + reporter.attach(wallet_path, os.path.basename(wallet_path)) return WalletInfo(wallet_path, password, wallet_config_path) diff --git a/src/frostfs_testlib/utils/version_utils.py b/src/frostfs_testlib/utils/version_utils.py index 2c1f4ab..91b1d98 100644 --- a/src/frostfs_testlib/utils/version_utils.py +++ b/src/frostfs_testlib/utils/version_utils.py @@ -4,7 +4,6 @@ import re from frostfs_testlib.cli import FrostfsAdm, FrostfsCli from frostfs_testlib.hosting import Host, Hosting from frostfs_testlib.resources.cli import FROSTFS_ADM_EXEC, FROSTFS_AUTHMATE_EXEC, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE -from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG from frostfs_testlib.shell import Shell from frostfs_testlib.testing.parallel import parallel @@ -18,7 +17,7 @@ def get_local_binaries_versions(shell: Shell) -> dict[str, str]: out = shell.exec(f"{binary} --version").stdout versions[binary] = _parse_version(out) - frostfs_cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG) + frostfs_cli = FrostfsCli(shell, FROSTFS_CLI_EXEC) versions[FROSTFS_CLI_EXEC] = _parse_version(frostfs_cli.version.get().stdout) try: @@ -36,7 +35,7 @@ def get_local_binaries_versions(shell: Shell) -> dict[str, str]: def parallel_binary_verions(host: Host) -> dict[str, str]: versions_by_host = {} - + binary_path_by_name = {} # Maps binary name to executable path for service_config in host.config.services: exec_path = service_config.attributes.get("exec_path") @@ -65,7 +64,7 @@ def parallel_binary_verions(host: Host) -> dict[str, str]: versions_at_host[binary_name] = {"version": "Unknown", "check": binary["check"]} versions_by_host[host.config.address] = versions_at_host return versions_by_host - + def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]: versions_by_host = {} @@ -83,26 +82,27 @@ def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]: for host, binary_versions in versions_by_host.items(): for name, binary in binary_versions.items(): version = binary["version"] - if not cheak_versions.get(f'{name[:-2]}', None): - captured_version = cheak_versions.get(f'{name[:-2]}',{}).get(host, {}).get(captured_version) - cheak_versions[f'{name[:-2]}'] = {host: {version: name}} + if not cheak_versions.get(f"{name[:-2]}", None): + captured_version = cheak_versions.get(f"{name[:-2]}", {}).get(host, {}).get(captured_version) + cheak_versions[f"{name[:-2]}"] = {host: {version: name}} else: - captured_version = list(cheak_versions.get(f'{name[:-2]}',{}).get(previous_host).keys())[0] - cheak_versions[f'{name[:-2]}'].update({host:{version:name}}) - + captured_version = list(cheak_versions.get(f"{name[:-2]}", {}).get(previous_host).keys())[0] + cheak_versions[f"{name[:-2]}"].update({host: {version: name}}) + if captured_version and captured_version != version: exception.add(name[:-2]) - + versions[name] = {"version": version, "check": binary["check"]} previous_host = host if exception: for i in exception: for host in versions_by_host.keys(): for version, name in cheak_versions.get(i).get(host).items(): - exсeptions.append(f'Binary {name} has inconsistent version {version} on host {host}') - exсeptions.append('\n') + exсeptions.append(f"Binary {name} has inconsistent version {version} on host {host}") + exсeptions.append("\n") return versions, exсeptions + def _parse_version(version_output: str) -> str: version = re.search(r"version[:\s]*v?(.+)", version_output, re.IGNORECASE) return version.group(1).strip() if version else version_output diff --git a/src/frostfs_testlib/utils/wallet_utils.py b/src/frostfs_testlib/utils/wallet_utils.py index 0c5ab1a..d2b4229 100644 --- a/src/frostfs_testlib/utils/wallet_utils.py +++ b/src/frostfs_testlib/utils/wallet_utils.py @@ -9,6 +9,16 @@ from neo3.wallet import wallet as neo3_wallet logger = logging.getLogger("frostfs.testlib.utils") +def __fix_wallet_schema(wallet: dict) -> None: + # Temporary function to fix wallets that do not conform to the schema + # TODO: get rid of it once issue is solved + if "name" not in wallet: + wallet["name"] = None + for account in wallet["accounts"]: + if "extra" not in account: + account["extra"] = None + + def init_wallet(wallet_path: str, wallet_password: str): """ Create new wallet and new account. @@ -33,29 +43,15 @@ def get_last_address_from_wallet(wallet_path: str, wallet_password: str): Returns: The address for the wallet. """ - with open(wallet_path) as wallet_file: - wallet = neo3_wallet.Wallet.from_json(json.load(wallet_file), password=wallet_password) + wallet = load_wallet(wallet_path, wallet_password) address = wallet.accounts[-1].address logger.info(f"got address: {address}") return address def get_wallet_public_key(wallet_path: str, wallet_password: str, format: str = "hex") -> str: - def __fix_wallet_schema(wallet: dict) -> None: - # Temporary function to fix wallets that do not conform to the schema - # TODO: get rid of it once issue is solved - if "name" not in wallet: - wallet["name"] = None - for account in wallet["accounts"]: - if "extra" not in account: - account["extra"] = None - - # Get public key from wallet file - with open(wallet_path, "r") as file: - wallet_content = json.load(file) - __fix_wallet_schema(wallet_content) - wallet_from_json = neo3_wallet.Wallet.from_json(wallet_content, password=wallet_password) - public_key_hex = str(wallet_from_json.accounts[0].public_key) + wallet = load_wallet(wallet_path, wallet_password) + public_key_hex = str(wallet.accounts[0].public_key) # Convert public key to specified format if format == "hex": @@ -69,7 +65,9 @@ def get_wallet_public_key(wallet_path: str, wallet_password: str, format: str = raise ValueError(f"Invalid public key format: {format}") -def load_wallet(path: str, passwd: str = "") -> neo3_wallet.Wallet: - with open(path, "r") as wallet_file: - wlt_data = wallet_file.read() - return neo3_wallet.Wallet.from_json(json.loads(wlt_data), password=passwd) +def load_wallet(wallet_path: str, wallet_password: str) -> neo3_wallet.Wallet: + with open(wallet_path) as wallet_file: + wallet_content = json.load(wallet_file) + + __fix_wallet_schema(wallet_content) + return neo3_wallet.Wallet.from_json(wallet_content, password=wallet_password) From b323bcfd0ada57d1652fb44ec95a7cc43eaf64ba Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Thu, 14 Mar 2024 14:27:31 +0300 Subject: [PATCH 217/363] [#192] Fix param --- src/frostfs_testlib/credentials/wallet_factory_provider.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/credentials/wallet_factory_provider.py b/src/frostfs_testlib/credentials/wallet_factory_provider.py index 4d1ab7a..d00020f 100644 --- a/src/frostfs_testlib/credentials/wallet_factory_provider.py +++ b/src/frostfs_testlib/credentials/wallet_factory_provider.py @@ -10,5 +10,5 @@ class WalletFactoryProvider(GrpcCredentialsProvider): @reporter.step("Init gRPC Credentials using wallet generation") def provide(self, user: User, cluster_node: ClusterNode) -> WalletInfo: wallet_factory = WalletFactory(ASSETS_DIR, LocalShell()) - user.wallet = wallet_factory.create_wallet(file_name=user, password=DEFAULT_WALLET_PASS) + user.wallet = wallet_factory.create_wallet(file_name=user.name, password=DEFAULT_WALLET_PASS) return user.wallet From 0e247c2ff26327d1af03f2d21da2ff843888e694 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Thu, 14 Mar 2024 16:39:20 +0300 Subject: [PATCH 218/363] [#193] Fix auth provider Signed-off-by: Andrey Berezin --- src/frostfs_testlib/credentials/authmate_s3_provider.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/credentials/authmate_s3_provider.py b/src/frostfs_testlib/credentials/authmate_s3_provider.py index 6343b5a..66c5015 100644 --- a/src/frostfs_testlib/credentials/authmate_s3_provider.py +++ b/src/frostfs_testlib/credentials/authmate_s3_provider.py @@ -22,7 +22,7 @@ class AuthmateS3CredentialsProvider(S3CredentialsProvider): gate_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes] # unique short bucket name - bucket = f"bucket_{hex(int(datetime.now().timestamp()*1000000))}" + bucket = f"bucket-{hex(int(datetime.now().timestamp()*1000000))}" frostfs_authmate: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC) issue_secret_output = frostfs_authmate.secret.issue( @@ -40,7 +40,7 @@ class AuthmateS3CredentialsProvider(S3CredentialsProvider): ) cid = str(re.search(r"container_id.*:\s.(?P\w*)", issue_secret_output).group("container_id")) - containers_list = list_containers(wallet.path, shell, endpoint) + containers_list = list_containers(wallet, shell, endpoint) assert cid in containers_list, f"Expected cid {cid} in {containers_list}" user.s3_credentials = S3Credentials(aws_access_key_id, aws_secret_access_key) From f2bded64e4a7672ae038b09544567830cc76a81f Mon Sep 17 00:00:00 2001 From: Liza Date: Mon, 4 Mar 2024 17:01:24 +0300 Subject: [PATCH 219/363] [#189] Add setup step to check binaries versions Signed-off-by: Liza --- src/frostfs_testlib/utils/version_utils.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/frostfs_testlib/utils/version_utils.py b/src/frostfs_testlib/utils/version_utils.py index 91b1d98..f1b7e37 100644 --- a/src/frostfs_testlib/utils/version_utils.py +++ b/src/frostfs_testlib/utils/version_utils.py @@ -1,6 +1,7 @@ import logging import re +from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsAdm, FrostfsCli from frostfs_testlib.hosting import Host, Hosting from frostfs_testlib.resources.cli import FROSTFS_ADM_EXEC, FROSTFS_AUTHMATE_EXEC, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE @@ -10,6 +11,7 @@ from frostfs_testlib.testing.parallel import parallel logger = logging.getLogger("NeoLogger") +@reporter.step("Get local binaries versions") def get_local_binaries_versions(shell: Shell) -> dict[str, str]: versions = {} @@ -29,6 +31,7 @@ def get_local_binaries_versions(shell: Shell) -> dict[str, str]: out = shell.exec("aws --version").stdout out_lines = out.split("\n") versions["AWS"] = out_lines[0] if out_lines else "Unknown" + logger.info(f"Local binaries version: {out_lines[0]}") return versions @@ -66,6 +69,7 @@ def parallel_binary_verions(host: Host) -> dict[str, str]: return versions_by_host +@reporter.step("Get remote binaries versions") def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]: versions_by_host = {} future_binary_verions = parallel(parallel_binary_verions, parallel_items=hosting.hosts) @@ -94,6 +98,9 @@ def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]: versions[name] = {"version": version, "check": binary["check"]} previous_host = host + logger.info( + "Remote binaries versions:\n" + "\n".join([f"{key} ver: {value['version']}" for key, value in versions.items()]) + ) if exception: for i in exception: for host in versions_by_host.keys(): From 9c508c4f66f615c44f911f4de57492fc8e880ad6 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 15 Mar 2024 17:44:18 +0300 Subject: [PATCH 220/363] [#194] Fix shards watcher CLI usage Signed-off-by: Andrey Berezin --- src/frostfs_testlib/storage/controllers/shards_watcher.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/storage/controllers/shards_watcher.py b/src/frostfs_testlib/storage/controllers/shards_watcher.py index ad07ff4..3d313f1 100644 --- a/src/frostfs_testlib/storage/controllers/shards_watcher.py +++ b/src/frostfs_testlib/storage/controllers/shards_watcher.py @@ -79,9 +79,7 @@ class ShardsWatcher: assert self._is_shard_present(shard_id) shards_with_new_errors = self.get_shards_with_new_errors() - assert ( - shard_id in shards_with_new_errors - ), f"Expected shard {shard_id} to have new errors, but haven't {self.shards_snapshots[-1]}" + assert shard_id in shards_with_new_errors, f"Expected shard {shard_id} to have new errors, but haven't {self.shards_snapshots[-1]}" @wait_for_success(300, 5) def await_for_shards_have_no_new_errors(self): @@ -97,6 +95,8 @@ class ShardsWatcher: response = shards_cli.list( endpoint=self.storage_node.get_control_endpoint(), + wallet=self.storage_node.get_remote_wallet_path(), + wallet_password=self.storage_node.get_wallet_password(), json_mode=True, ) @@ -109,6 +109,8 @@ class ShardsWatcher: ) return shards_cli.set_mode( endpoint=self.storage_node.get_control_endpoint(), + wallet=self.storage_node.get_remote_wallet_path(), + wallet_password=self.storage_node.get_wallet_password(), mode=mode, id=[shard_id], clear_errors=clear_errors, From 11487e983da6e1cc2fae8ea266dac0960a89010d Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Mon, 18 Mar 2024 20:09:08 +0300 Subject: [PATCH 221/363] [#196] Removed profile name from Boto3 client --- src/frostfs_testlib/s3/boto3_client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index bdb177e..cb1ec28 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -46,7 +46,7 @@ class Boto3ClientWrapper(S3ClientWrapper): self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default" ) -> None: self.boto3_client: S3Client = None - self.session = boto3.Session(profile_name=profile) + self.session = boto3.Session() self.config = Config( retries={ "max_attempts": MAX_REQUEST_ATTEMPTS, From 2dc5aa8a1eb5f2a6900af6836e068d512d2f93a0 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 19 Mar 2024 15:47:18 +0300 Subject: [PATCH 222/363] [#195] Update netmap parser and status check message Signed-off-by: Andrey Berezin --- src/frostfs_testlib/cli/netmap_parser.py | 11 +++--- .../controllers/cluster_state_controller.py | 35 +++++++++---------- .../dataclasses/storage_object_info.py | 4 +-- 3 files changed, 26 insertions(+), 24 deletions(-) diff --git a/src/frostfs_testlib/cli/netmap_parser.py b/src/frostfs_testlib/cli/netmap_parser.py index 6d2eaaa..94d12b8 100644 --- a/src/frostfs_testlib/cli/netmap_parser.py +++ b/src/frostfs_testlib/cli/netmap_parser.py @@ -1,7 +1,7 @@ import re from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo, NodeStatus class NetmapParser: @@ -44,7 +44,7 @@ class NetmapParser: regexes = { "node_id": r"\d+: (?P\w+)", "node_data_ips": r"(?P/ip4/.+?)$", - "node_status": r"(?PONLINE|OFFLINE)", + "node_status": r"(?PONLINE|MAINTENANCE|OFFLINE)", "cluster_name": r"ClusterName: (?P\w+)", "continent": r"Continent: (?P\w+)", "country": r"Country: (?P\w+)", @@ -62,14 +62,17 @@ class NetmapParser: for node in netmap_nodes: for key, regex in regexes.items(): search_result = re.search(regex, node, flags=re.MULTILINE) + if search_result == None: + result_netmap[key] = None + continue if key == "node_data_ips": result_netmap[key] = search_result[key].strip().split(" ") continue if key == "external_address": result_netmap[key] = search_result[key].strip().split(",") continue - if search_result == None: - result_netmap[key] = None + if key == "node_status": + result_netmap[key] = NodeStatus(search_result[key].strip().lower()) continue result_netmap[key] = search_result[key].strip() diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 9e07914..03648f5 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -17,6 +17,7 @@ from frostfs_testlib.steps.network import IpHelper from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeStatus from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing import parallel from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for_success @@ -413,41 +414,39 @@ class ClusterStateController: ) frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}") - @reporter.step("Set mode node to {status}") - def set_mode_node(self, cluster_node: ClusterNode, wallet: WalletInfo, status: str, await_tick: bool = True) -> None: + @reporter.step("Set node status to {status} in CSC") + def set_node_status(self, cluster_node: ClusterNode, wallet: WalletInfo, status: NodeStatus, await_tick: bool = True) -> None: rpc_endpoint = cluster_node.storage_node.get_rpc_endpoint() control_endpoint = cluster_node.service(StorageNode).get_control_endpoint() - frostfs_adm, frostfs_cli, frostfs_cli_remote = self._get_cli(local_shell=self.shell, local_wallet=wallet, cluster_node=cluster_node) - node_netinfo = NetmapParser.netinfo(frostfs_cli.netmap.netinfo(rpc_endpoint=rpc_endpoint).stdout) + frostfs_adm, frostfs_cli, frostfs_cli_remote = self._get_cli(self.shell, wallet, cluster_node) + node_netinfo = NetmapParser.netinfo(frostfs_cli.netmap.netinfo(rpc_endpoint).stdout) - with reporter.step("If status maintenance, then check that the option is enabled"): - if node_netinfo.maintenance_mode_allowed == "false": - frostfs_adm.morph.set_config(set_key_value="MaintenanceModeAllowed=true") + if node_netinfo.maintenance_mode_allowed == "false": + with reporter.step("Enable maintenance mode"): + frostfs_adm.morph.set_config("MaintenanceModeAllowed=true") - with reporter.step(f"Change the status to {status}"): - frostfs_cli_remote.control.set_status(endpoint=control_endpoint, status=status) + with reporter.step(f"Set node status to {status} using FrostfsCli"): + frostfs_cli_remote.control.set_status(control_endpoint, status.value) if not await_tick: return - with reporter.step("Tick 1 epoch, and await 2 block"): + with reporter.step("Tick 1 epoch and await 2 block"): frostfs_adm.morph.force_new_epoch() time.sleep(parse_time(MORPH_BLOCK_TIME) * 2) - self.check_node_status(status=status, wallet=wallet, cluster_node=cluster_node) + self.await_node_status(status, wallet, cluster_node) - @wait_for_success(80, 8, title="Wait for storage status become {status}") - def check_node_status(self, status: str, wallet: WalletInfo, cluster_node: ClusterNode): + @wait_for_success(80, 8, title="Wait for node status become {status}") + def await_node_status(self, status: NodeStatus, wallet: WalletInfo, cluster_node: ClusterNode): frostfs_cli = FrostfsCli(self.shell, FROSTFS_CLI_EXEC, wallet.config_path) - netmap = NetmapParser.snapshot_all_nodes( - frostfs_cli.netmap.snapshot(rpc_endpoint=cluster_node.storage_node.get_rpc_endpoint()).stdout - ) + netmap = NetmapParser.snapshot_all_nodes(frostfs_cli.netmap.snapshot(cluster_node.storage_node.get_rpc_endpoint()).stdout) netmap = [node for node in netmap if cluster_node.host_ip == node.node] - if status == "offline": + if status == NodeStatus.OFFLINE: assert cluster_node.host_ip not in netmap, f"{cluster_node.host_ip} not in Offline" else: - assert netmap[0].node_status == status.upper(), f"Node state - {netmap[0].node_status} != {status} expect" + assert netmap[0].node_status == status, f"Node status should be '{status}', but was '{netmap[0].node_status}'" def _get_cli( self, local_shell: Shell, local_wallet: WalletInfo, cluster_node: ClusterNode diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index f4d729d..28fdaa5 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -28,7 +28,7 @@ class StorageObjectInfo(ObjectRef): locks: Optional[list[LockObjectInfo]] = None -class ModeNode(HumanReadableEnum): +class NodeStatus(HumanReadableEnum): MAINTENANCE: str = "maintenance" ONLINE: str = "online" OFFLINE: str = "offline" @@ -37,7 +37,7 @@ class ModeNode(HumanReadableEnum): @dataclass class NodeNetmapInfo: node_id: str = None - node_status: ModeNode = None + node_status: NodeStatus = None node_data_ips: list[str] = None cluster_name: str = None continent: str = None From 653621fb7eebce0fb6a9cf558eab1365d52012e0 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Wed, 20 Mar 2024 17:06:38 +0300 Subject: [PATCH 223/363] [#197] Allow config_dir for local scenario --- src/frostfs_testlib/load/load_config.py | 42 +++++++------------------ 1 file changed, 11 insertions(+), 31 deletions(-) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index b859971..2a546c4 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -177,13 +177,9 @@ class Preset: @dataclass class PrometheusParams: # Prometheus server URL - server_url: Optional[str] = metadata_field( - all_load_scenarios, env_variable="K6_PROMETHEUS_RW_SERVER_URL", string_repr=False - ) + server_url: Optional[str] = metadata_field(all_load_scenarios, env_variable="K6_PROMETHEUS_RW_SERVER_URL", string_repr=False) # Prometheus trend stats - trend_stats: Optional[str] = metadata_field( - all_load_scenarios, env_variable="K6_PROMETHEUS_RW_TREND_STATS", string_repr=False - ) + trend_stats: Optional[str] = metadata_field(all_load_scenarios, env_variable="K6_PROMETHEUS_RW_TREND_STATS", string_repr=False) # Additional tags metrics_tags: Optional[str] = metadata_field(all_load_scenarios, None, "METRIC_TAGS", False) @@ -246,9 +242,7 @@ class LoadParams: # ------- COMMON SCENARIO PARAMS ------- # Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value. - load_time: Optional[int] = metadata_field( - all_load_scenarios, None, "DURATION", False, formatter=convert_time_to_seconds - ) + load_time: Optional[int] = metadata_field(all_load_scenarios, None, "DURATION", False, formatter=convert_time_to_seconds) # Object size in KB for load and preset. object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE", False) # For read operations, controls from which set get objects to read @@ -266,9 +260,7 @@ class LoadParams: # sleep for the remainder of the time until the specified minimum duration is reached. min_iteration_duration: Optional[str] = metadata_field(all_load_scenarios, None, "K6_MIN_ITERATION_DURATION", False) # Prepare/cut objects locally on client before sending - prepare_locally: Optional[bool] = metadata_field( - [LoadScenario.gRPC, LoadScenario.gRPC_CAR], None, "PREPARE_LOCALLY", False - ) + prepare_locally: Optional[bool] = metadata_field([LoadScenario.gRPC, LoadScenario.gRPC_CAR], None, "PREPARE_LOCALLY", False) # Specifies K6 setupTimeout time. Currently hardcoded in xk6 as 5 seconds for all scenarios # https://k6.io/docs/using-k6/k6-options/reference/#setup-timeout setup_timeout: Optional[str] = metadata_field(all_scenarios, None, "K6_SETUP_TIMEOUT", False) @@ -298,35 +290,25 @@ class LoadParams: delete_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "DELETE_RATE", True, True) # Amount of preAllocatedVUs for write operations. - preallocated_writers: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True, True - ) + preallocated_writers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True, True) # Amount of maxVUs for write operations. max_writers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_WRITERS", False, True) # Amount of preAllocatedVUs for read operations. - preallocated_readers: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True, True - ) + preallocated_readers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True, True) # Amount of maxVUs for read operations. max_readers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_READERS", False, True) # Amount of preAllocatedVUs for read operations. - preallocated_deleters: Optional[int] = metadata_field( - constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True, True - ) + preallocated_deleters: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True, True) # Amount of maxVUs for delete operations. max_deleters: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_DELETERS", False, True) # Multipart # Number of parts to upload in parallel - writers_multipart: Optional[int] = metadata_field( - [LoadScenario.S3_MULTIPART], None, "WRITERS_MULTIPART", False, True - ) + writers_multipart: Optional[int] = metadata_field([LoadScenario.S3_MULTIPART], None, "WRITERS_MULTIPART", False, True) # part size must be greater than (5 MB) - write_object_part_size: Optional[int] = metadata_field( - [LoadScenario.S3_MULTIPART], None, "WRITE_OBJ_PART_SIZE", False - ) + write_object_part_size: Optional[int] = metadata_field([LoadScenario.S3_MULTIPART], None, "WRITE_OBJ_PART_SIZE", False) # Period of time to apply the rate value. time_unit: Optional[str] = metadata_field(constant_arrival_rate_scenarios, None, "TIME_UNIT", False) @@ -341,7 +323,7 @@ class LoadParams: # Config file location (filled automatically) config_file: Optional[str] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "CONFIG_FILE", False) # Config directory location (filled automatically) - config_dir: Optional[str] = metadata_field([LoadScenario.S3_LOCAL], None, "CONFIG_DIR", False) + config_dir: Optional[str] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "CONFIG_DIR", False) def set_id(self, load_id): self.load_id = load_id @@ -474,9 +456,7 @@ class LoadParams: static_params = [f"{load_type_str}"] dynamic_params = [ - f"{meta_field.name}={meta_field.value}" - for meta_field in self._get_applicable_fields() - if meta_field.metadata["string_repr"] + f"{meta_field.name}={meta_field.value}" for meta_field in self._get_applicable_fields() if meta_field.metadata["string_repr"] ] params = ", ".join(static_params + dynamic_params) From 076e444f84edc546f8960cb0e1f4191803f91a13 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Fri, 22 Mar 2024 12:19:53 +0300 Subject: [PATCH 224/363] [#198] Check only data disks for local safe-stopper Signed-off-by: Andrey Berezin --- src/frostfs_testlib/load/k6.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/load/k6.py b/src/frostfs_testlib/load/k6.py index caf3cfe..3e62a16 100644 --- a/src/frostfs_testlib/load/k6.py +++ b/src/frostfs_testlib/load/k6.py @@ -4,6 +4,7 @@ import math import os from dataclasses import dataclass from datetime import datetime +from threading import Event from time import sleep from typing import Any from urllib.parse import urlparse @@ -69,7 +70,7 @@ class K6: self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, remote_user, process_id) def _get_fill_percents(self): - fill_percents = self.shell.exec("df -H --output=source,pcent,target | grep frostfs").stdout.split("\n") + fill_percents = self.shell.exec("df -H --output=source,pcent,target | grep frostfs | grep data").stdout.split("\n") return [line.split() for line in fill_percents][:-1] def check_fill_percent(self): @@ -149,7 +150,7 @@ class K6: with reporter.step(f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}"): self._k6_process.start() - def wait_until_finished(self, event, soft_timeout: int = 0) -> None: + def wait_until_finished(self, event: Event, soft_timeout: int = 0) -> None: with reporter.step(f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}"): if self.load_params.scenario == LoadScenario.VERIFY: timeout = self.load_params.verify_time or 0 From 9cfaf1a6187798900ca3069518b1f734a75afd3d Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Tue, 2 Apr 2024 19:40:18 +0300 Subject: [PATCH 225/363] [#201] Add more time for node return Signed-off-by: Andrey Berezin --- src/frostfs_testlib/steps/node_management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/steps/node_management.py b/src/frostfs_testlib/steps/node_management.py index dd38279..ece674b 100644 --- a/src/frostfs_testlib/steps/node_management.py +++ b/src/frostfs_testlib/steps/node_management.py @@ -263,7 +263,7 @@ def check_node_not_in_map(node: StorageNode, shell: Shell, alive_node: Optional[ @reporter.step("Wait for node {node} is ready") def wait_for_node_to_be_ready(node: StorageNode) -> None: - timeout, attempts = 30, 6 + timeout, attempts = 60, 15 for _ in range(attempts): try: health_check = storage_node_healthcheck(node) From 338584069d8a99113e9e293cd4563ebb4ea45aa6 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Tue, 5 Mar 2024 12:51:15 +0300 Subject: [PATCH 226/363] [#190] Add PlacementPolicy dataclass Allow to parametrize tests with placement policy. Signed-off-by: Evgenii Stratonikov --- src/frostfs_testlib/storage/dataclasses/policy.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 src/frostfs_testlib/storage/dataclasses/policy.py diff --git a/src/frostfs_testlib/storage/dataclasses/policy.py b/src/frostfs_testlib/storage/dataclasses/policy.py new file mode 100644 index 0000000..872ee05 --- /dev/null +++ b/src/frostfs_testlib/storage/dataclasses/policy.py @@ -0,0 +1,13 @@ +from dataclasses import dataclass + + +@dataclass +class PlacementPolicy: + name: str + value: str + + def __str__(self) -> str: + return self.name + + def __repr__(self) -> str: + return self.__str__() From e2a170d66e827d1d41bd201ab93ba6abfba5920b Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Tue, 5 Mar 2024 12:52:19 +0300 Subject: [PATCH 227/363] [#190] Introduce default EC placement policy The default policy which is similar to REP 2, but uses EC instead. Signed-off-by: Evgenii Stratonikov --- src/frostfs_testlib/steps/cli/container.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index fc643e2..fa739a8 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -95,6 +95,7 @@ class StorageContainer: DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X" SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X" REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X" +DEFAULT_EC_PLACEMENT_RULE = "EC 3.1" @reporter.step("Create Container") From 6629b9bbaa5dbfc87f48597639d725d9a669cb67 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 4 Apr 2024 11:55:27 +0300 Subject: [PATCH 228/363] [#202] .forgejo: Replace old DCO action Signed-off-by: Evgenii Stratonikov --- .forgejo/workflows/dco.yml | 21 +++++++++++++++++++++ .github/CODEOWNERS | 1 - .github/workflows/dco.yml | 21 --------------------- 3 files changed, 21 insertions(+), 22 deletions(-) create mode 100644 .forgejo/workflows/dco.yml delete mode 100644 .github/CODEOWNERS delete mode 100644 .github/workflows/dco.yml diff --git a/.forgejo/workflows/dco.yml b/.forgejo/workflows/dco.yml new file mode 100644 index 0000000..9aa0d31 --- /dev/null +++ b/.forgejo/workflows/dco.yml @@ -0,0 +1,21 @@ +name: DCO action +on: [pull_request] + +jobs: + dco: + name: DCO + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Setup Go + uses: actions/setup-go@v3 + with: + go-version: '1.21' + + - name: Run commit format checker + uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3 + with: + from: 'origin/${{ github.event.pull_request.base.ref }}' diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS deleted file mode 100644 index 1422062..0000000 --- a/.github/CODEOWNERS +++ /dev/null @@ -1 +0,0 @@ -* @aprasolova @vdomnich-yadro @dansingjulia @yadro-vavdeev @abereziny diff --git a/.github/workflows/dco.yml b/.github/workflows/dco.yml deleted file mode 100644 index 40ed8fc..0000000 --- a/.github/workflows/dco.yml +++ /dev/null @@ -1,21 +0,0 @@ -name: DCO check - -on: - pull_request: - branches: - - master - -jobs: - commits_check_job: - runs-on: ubuntu-latest - name: Commits Check - steps: - - name: Get PR Commits - id: 'get-pr-commits' - uses: tim-actions/get-pr-commits@master - with: - token: ${{ secrets.GITHUB_TOKEN }} - - name: DCO Check - uses: tim-actions/dco@master - with: - commits: ${{ steps.get-pr-commits.outputs.commits }} From 863e74f16153ec95b3d639efde879355b12806ef Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 8 Apr 2024 14:26:50 +0300 Subject: [PATCH 229/363] [#204] Fix custom_registry for verify scenario Signed-off-by: Andrey Berezin --- .../storage/controllers/background_load_controller.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index e713f02..a8588ff 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -187,6 +187,7 @@ class BackgroundLoadController: read_from=self.load_params.read_from, registry_file=self.load_params.registry_file, verify_time=self.load_params.verify_time, + custom_registry=self.load_params.custom_registry, load_type=self.load_params.load_type, load_id=self.load_params.load_id, vu_init_time=0, @@ -196,6 +197,9 @@ class BackgroundLoadController: setup_timeout="1s", ) + if self.verification_params.custom_registry: + self.verification_params.registry_file = self.load_params.custom_registry + if self.verification_params.verify_time is None: raise RuntimeError("verify_time should not be none") From 65ec50391ef3227635a465aead08421e27cab298 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Thu, 23 Nov 2023 15:53:03 +0300 Subject: [PATCH 230/363] Interfaces for IAM in S3 client --- src/frostfs_testlib/s3/aws_cli_client.py | 584 ++++++++++++++++++++++- src/frostfs_testlib/s3/boto3_client.py | 315 +++++++++++- src/frostfs_testlib/s3/interfaces.py | 153 +++++- src/frostfs_testlib/utils/cli_utils.py | 4 +- 4 files changed, 1039 insertions(+), 17 deletions(-) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index e4f2bb2..470e7a3 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -29,13 +29,17 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Configure S3 client (aws cli)") def __init__( - self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default" + self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1" ) -> None: self.s3gate_endpoint = s3gate_endpoint + self.access_key_id: str = access_key_id + self.secret_access_key: str = secret_access_key self.profile = profile self.local_shell = LocalShell() + self.region = region + self.iam_endpoint = None try: - _configure_aws_cli(f"aws configure --profile {profile}", access_key_id, secret_access_key) + _configure_aws_cli(f"aws configure --profile {profile}", access_key_id, secret_access_key, region) self.local_shell.exec(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS} --profile {profile}") self.local_shell.exec( f"aws configure set retry_mode {RETRY_MODE} --profile {profile}", @@ -43,10 +47,14 @@ class AwsCliClient(S3ClientWrapper): except Exception as err: raise RuntimeError("Error while configuring AwsCliClient") from err - @reporter.step("Set endpoint S3 to {s3gate_endpoint}") + @reporter.step("Set S3 endpoint to {s3gate_endpoint}") def set_endpoint(self, s3gate_endpoint: str): self.s3gate_endpoint = s3gate_endpoint + @reporter.step("Set IAM endpoint to {iam_endpoint}") + def set_iam_endpoint(self, iam_endpoint: str): + self.iam_endpoint = iam_endpoint + @reporter.step("Create bucket S3") def create_bucket( self, @@ -565,12 +573,13 @@ class AwsCliClient(S3ClientWrapper): self.local_shell.exec(cmd) @reporter.step("Put object tagging") - def put_object_tagging(self, bucket: str, key: str, tags: list) -> None: + def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = '') -> None: tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tagging = {"TagSet": tags} + version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api put-object-tagging --bucket {bucket} --key {key} " - f"--tagging '{json.dumps(tagging)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" + f"{version} --tagging '{json.dumps(tagging)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) @@ -586,10 +595,11 @@ class AwsCliClient(S3ClientWrapper): return response.get("TagSet") @reporter.step("Delete object tagging") - def delete_object_tagging(self, bucket: str, key: str) -> None: + def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None: + version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} " - f"--key {key} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + f"--key {key} {version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) self.local_shell.exec(cmd) @@ -750,3 +760,563 @@ class AwsCliClient(S3ClientWrapper): json_output = json.loads(output[output.index("{") :]) return json_output + + # IAM METHODS # + # Some methods don't have checks because AWS is silent in some cases (delete, attach, etc.) + + @reporter.step("Adds the specified user to the specified group") + def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam add-user-to-group --user-name {user_name} --group-name {group_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + + @reporter.step("Attaches the specified managed policy to the specified IAM group") + def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam attach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + sleep(S3_SYNC_WAIT_TIME * 10) + + return response + + + @reporter.step("Attaches the specified managed policy to the specified user") + def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam attach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + sleep(S3_SYNC_WAIT_TIME * 10) + + return response + + + @reporter.step("Creates a new AWS secret access key and access key ID for the specified user") + def iam_create_access_key(self, user_name: Optional[str] = None) -> dict: + cmd = ( + f"aws {self.common_flags} iam create-access-key --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + if user_name: + cmd += f" --user-name {user_name}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + access_key_id = response["AccessKey"].get("AccessKeyId") + secret_access_key = response["AccessKey"].get("SecretAccessKey") + assert access_key_id, f"Expected AccessKeyId in response:\n{response}" + assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}" + + return access_key_id, secret_access_key + + + @reporter.step("Creates a new group") + def iam_create_group(self, group_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam create-group --group-name {group_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("Group"), f"Expected Group in response:\n{response}" + assert response["Group"].get("GroupName") == group_name, f"GroupName should be equal to {group_name}" + + return response + + + @reporter.step("Creates a new managed policy for your AWS account") + def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: + cmd = ( + f"aws {self.common_flags} iam create-policy --endpoint {self.iam_endpoint}" + f" --policy-name {policy_name} --policy-document '{json.dumps(policy_document)}'" + ) + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("Policy"), f"Expected Policy in response:\n{response}" + assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}" + + return response + + + @reporter.step("Creates a new IAM user for your AWS account") + def iam_create_user(self, user_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam create-user --user-name {user_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("User"), f"Expected User in response:\n{response}" + assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" + + return response + + + @reporter.step("Deletes the access key pair associated with the specified IAM user") + def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam delete-access-key --access-key-id {access_key_id} --user-name {user_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + + @reporter.step("Deletes the specified IAM group") + def iam_delete_group(self, group_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam delete-group --group-name {group_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + + @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group") + def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam delete-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + + @reporter.step("Deletes the specified managed policy") + def iam_delete_policy(self, policy_arn: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam delete-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + + @reporter.step("Deletes the specified IAM user") + def iam_delete_user(self, user_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam delete-user --user-name {user_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + + return response + + + @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user") + def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam delete-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + + @reporter.step("Removes the specified managed policy from the specified IAM group") + def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam detach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + sleep(S3_SYNC_WAIT_TIME * 10) + + return response + + + @reporter.step("Removes the specified managed policy from the specified user") + def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam detach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + sleep(S3_SYNC_WAIT_TIME * 10) + + return response + + + @reporter.step("Returns a list of IAM users that are in the specified IAM group") + def iam_get_group(self, group_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam get-group --group-name {group_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert "Users" in response.keys(), f"Expected Users in response:\n{response}" + assert response.get("Group").get("GroupName") == group_name, f"GroupName should be equal to {group_name}" + + return response + + + @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group") + def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam get-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + + @reporter.step("Retrieves information about the specified managed policy") + def iam_get_policy(self, policy_arn: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam get-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("Policy"), f"Expected Policy in response:\n{response}" + assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}" + + return response + + + @reporter.step("Retrieves information about the specified version of the specified managed policy") + def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam get-policy-version --policy-arn {policy_arn} --version-id {version_id} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("PolicyVersion"), f"Expected PolicyVersion in response:\n{response}" + assert response["PolicyVersion"].get("VersionId") == version_id, f"VersionId should be equal to {version_id}" + + return response + + + @reporter.step("Retrieves information about the specified IAM user") + def iam_get_user(self, user_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam get-user --user-name {user_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("User"), f"Expected User in response:\n{response}" + assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" + + return response + + + @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user") + def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam get-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("UserName"), f"Expected User in response:\n{response}" + + return response + + + @reporter.step("Returns information about the access key IDs associated with the specified IAM user") + def iam_list_access_keys(self, user_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam list-access-keys --user-name {user_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + + @reporter.step("Lists all managed policies that are attached to the specified IAM group") + def iam_list_attached_group_policies(self, group_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam list-attached-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" + + return response + + + @reporter.step("Lists all managed policies that are attached to the specified IAM user") + def iam_list_attached_user_policies(self, user_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam list-attached-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" + + return response + + + @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") + def iam_list_entities_for_policy(self, policy_arn: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam list-entities-for-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("PolicyGroups"), f"Expected PolicyGroups in response:\n{response}" + assert response.get("PolicyUsers"), f"Expected PolicyUsers in response:\n{response}" + + return response + + + @reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group") + def iam_list_group_policies(self, group_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam list-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" + + return response + + + @reporter.step("Lists the IAM groups") + def iam_list_groups(self) -> dict: + cmd = ( + f"aws {self.common_flags} iam list-groups --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("Groups"), f"Expected Groups in response:\n{response}" + + return response + + + @reporter.step("Lists the IAM groups that the specified IAM user belongs to") + def iam_list_groups_for_user(self, user_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam list-groups-for-user --user-name {user_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("Groups"), f"Expected Groups in response:\n{response}" + + return response + + + @reporter.step("Lists all the managed policies that are available in your AWS account") + def iam_list_policies(self) -> dict: + cmd = ( + f"aws {self.common_flags} iam list-policies --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert 'Policies' in response.keys(), f"Expected Policies in response:\n{response}" + + return response + + + @reporter.step("Lists information about the versions of the specified managed policy") + def iam_list_policy_versions(self, policy_arn: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam list-policy-versions --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("Versions"), f"Expected Versions in response:\n{response}" + + return response + + + @reporter.step("Lists the names of the inline policies embedded in the specified IAM user") + def iam_list_user_policies(self, user_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam list-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" + + return response + + + @reporter.step("Lists the IAM users") + def iam_list_users(self) -> dict: + cmd = ( + f"aws {self.common_flags} iam list-users --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + assert "Users" in response.keys(), f"Expected Users in response:\n{response}" + + return response + + + @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group") + def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: + cmd = ( + f"aws {self.common_flags} iam put-group-policy --endpoint {self.iam_endpoint}" + f" --group-name {group_name} --policy-name {policy_name} --policy-document \'{json.dumps(policy_document)}\'" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + sleep(S3_SYNC_WAIT_TIME * 10) + + return response + + + @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") + def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: + cmd = ( + f"aws {self.common_flags} iam put-user-policy --endpoint {self.iam_endpoint}" + f" --user-name {user_name} --policy-name {policy_name} --policy-document \'{json.dumps(policy_document)}\'" + ) + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + sleep(S3_SYNC_WAIT_TIME * 10) + + return response + + + @reporter.step("Removes the specified user from the specified group") + def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam remove-user-from-group --endpoint {self.iam_endpoint}" + f" --group-name {group_name} --user-name {user_name}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + + @reporter.step("Updates the name and/or the path of the specified IAM group") + def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: + cmd = ( + f"aws {self.common_flags} iam update-group --group-name {group_name} --endpoint {self.iam_endpoint}" + ) + if new_name: + cmd += f" --new-group-name {new_name}" + if new_path: + cmd += f" --new-path {new_path}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + + @reporter.step("Updates the name and/or the path of the specified IAM user") + def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: + cmd = ( + f"aws {self.common_flags} iam update-user --user-name {user_name} --endpoint {self.iam_endpoint}" + ) + if new_name: + cmd += f" --new-user-name {new_name}" + if new_path: + cmd += f" --new-path {new_path}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index cb1ec28..46cfe4b 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -18,6 +18,9 @@ from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, R from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.utils.cli_utils import log_command_execution +# TODO: Refactor this code to use shell instead of _cmd_run +from frostfs_testlib.utils.cli_utils import _configure_aws_cli + logger = logging.getLogger("NeoLogger") # Disable warnings on self-signed certificate which the @@ -43,10 +46,11 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("Configure S3 client (boto3)") @report_error def __init__( - self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default" + self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1" ) -> None: self.boto3_client: S3Client = None self.session = boto3.Session() + self.region = region self.config = Config( retries={ "max_attempts": MAX_REQUEST_ATTEMPTS, @@ -56,6 +60,7 @@ class Boto3ClientWrapper(S3ClientWrapper): self.access_key_id: str = access_key_id self.secret_access_key: str = secret_access_key self.s3gate_endpoint: str = "" + self.boto3_iam_client: S3Client = None self.set_endpoint(s3gate_endpoint) @reporter.step("Set endpoint S3 to {s3gate_endpoint}") @@ -69,11 +74,23 @@ class Boto3ClientWrapper(S3ClientWrapper): service_name="s3", aws_access_key_id=self.access_key_id, aws_secret_access_key=self.secret_access_key, + region_name=self.region, config=self.config, endpoint_url=s3gate_endpoint, verify=False, ) + + @reporter.step("Set endpoint IAM to {iam_endpoint}") + def set_iam_endpoint(self, iam_endpoint: str): + self.boto3_iam_client = self.session.client( + service_name="iam", + aws_access_key_id=self.access_key_id, + aws_secret_access_key=self.secret_access_key, + endpoint_url=iam_endpoint, + verify=False,) + + def _to_s3_param(self, param: str): replacement_map = { "Acl": "ACL", @@ -118,7 +135,7 @@ class Boto3ClientWrapper(S3ClientWrapper): s3_bucket = self.boto3_client.create_bucket(**params) log_command_execution(f"Created S3 bucket {bucket}", s3_bucket) - sleep(S3_SYNC_WAIT_TIME) + sleep(S3_SYNC_WAIT_TIME * 10) return bucket @reporter.step("List buckets S3") @@ -139,7 +156,7 @@ class Boto3ClientWrapper(S3ClientWrapper): def delete_bucket(self, bucket: str) -> None: response = self.boto3_client.delete_bucket(Bucket=bucket) log_command_execution("S3 Delete bucket result", response) - sleep(S3_SYNC_WAIT_TIME) + sleep(S3_SYNC_WAIT_TIME * 10) @reporter.step("Head bucket S3") @report_error @@ -355,7 +372,7 @@ class Boto3ClientWrapper(S3ClientWrapper): } response = self.boto3_client.delete_object(**params) log_command_execution("S3 Delete object result", response) - sleep(S3_SYNC_WAIT_TIME) + sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Delete objects S3") @@ -366,7 +383,7 @@ class Boto3ClientWrapper(S3ClientWrapper): assert ( "Errors" not in response ), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}' - sleep(S3_SYNC_WAIT_TIME) + sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Delete object versions S3") @@ -592,10 +609,10 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("Put object tagging") @report_error - def put_object_tagging(self, bucket: str, key: str, tags: list) -> None: + def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = '') -> None: tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tagging = {"TagSet": tags} - response = self.boto3_client.put_object_tagging(Bucket=bucket, Key=key, Tagging=tagging) + response = self.boto3_client.put_object_tagging(Bucket=bucket, Key=key, Tagging=tagging, VersionId=version_id) log_command_execution("S3 Put object tagging", response) @reporter.step("Get object tagging") @@ -654,3 +671,287 @@ class Boto3ClientWrapper(S3ClientWrapper): raise NotImplementedError("Cp is not supported for boto3 client") # END OBJECT METHODS # + + + # IAM METHODS # + # Some methods don't have checks because boto3 is silent in some cases (delete, attach, etc.) + + @reporter.step("Adds the specified user to the specified group") + def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: + response = self.boto3_iam_client.add_user_to_group(UserName=user_name, GroupName=group_name) + return response + + + @reporter.step("Attaches the specified managed policy to the specified IAM group") + def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: + response = self.boto3_iam_client.attach_group_policy(GroupName=group_name, PolicyArn=policy_arn) + sleep(S3_SYNC_WAIT_TIME * 10) + return response + + + @reporter.step("Attaches the specified managed policy to the specified user") + def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: + response = self.boto3_iam_client.attach_user_policy(UserName=user_name, PolicyArn=policy_arn) + sleep(S3_SYNC_WAIT_TIME * 10) + return response + + + @reporter.step("Creates a new AWS secret access key and access key ID for the specified user") + def iam_create_access_key(self, user_name: str) -> dict: + response = self.boto3_iam_client.create_access_key(UserName=user_name) + + access_key_id = response["AccessKey"].get("AccessKeyId") + secret_access_key = response["AccessKey"].get("SecretAccessKey") + assert access_key_id, f"Expected AccessKeyId in response:\n{response}" + assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}" + + return access_key_id, secret_access_key + + + @reporter.step("Creates a new group") + def iam_create_group(self, group_name: str) -> dict: + response = self.boto3_iam_client.create_group(GroupName=group_name) + assert response.get("Group"), f"Expected Group in response:\n{response}" + assert response["Group"].get("GroupName") == group_name, f"GroupName should be equal to {group_name}" + + return response + + + @reporter.step("Creates a new managed policy for your AWS account") + def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: + response = self.boto3_iam_client.create_policy(PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)) + assert response.get("Policy"), f"Expected Policy in response:\n{response}" + assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}" + + return response + + + @reporter.step("Creates a new IAM user for your AWS account") + def iam_create_user(self, user_name: str) -> dict: + response = self.boto3_iam_client.create_user(UserName=user_name) + assert response.get("User"), f"Expected User in response:\n{response}" + assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" + + return response + + + @reporter.step("Deletes the access key pair associated with the specified IAM user") + def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: + response = self.boto3_iam_client.delete_access_key(AccessKeyId=access_key_id, UserName=user_name) + return response + + + @reporter.step("Deletes the specified IAM group") + def iam_delete_group(self, group_name: str) -> dict: + response = self.boto3_iam_client.delete_group(GroupName=group_name) + return response + + + @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group") + def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: + response = self.boto3_iam_client.delete_group_policy(GroupName=group_name, PolicyName=policy_name) + return response + + + @reporter.step("Deletes the specified managed policy") + def iam_delete_policy(self, policy_arn: str) -> dict: + response = self.boto3_iam_client.delete_policy(PolicyArn=policy_arn) + return response + + + @reporter.step("Deletes the specified IAM user") + def iam_delete_user(self, user_name: str) -> dict: + response = self.boto3_iam_client.delete_user(UserName=user_name) + return response + + + @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user") + def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: + response = self.boto3_iam_client.delete_user_policy(UserName=user_name, PolicyName=policy_name) + return response + + + @reporter.step("Removes the specified managed policy from the specified IAM group") + def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: + response = self.boto3_iam_client.detach_group_policy(GroupName=group_name, PolicyArn=policy_arn) + sleep(S3_SYNC_WAIT_TIME * 10) + return response + + + @reporter.step("Removes the specified managed policy from the specified user") + def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: + response = self.boto3_iam_client.detach_user_policy(UserName=user_name, PolicyArn=policy_arn) + sleep(S3_SYNC_WAIT_TIME * 10) + return response + + + @reporter.step("Returns a list of IAM users that are in the specified IAM group") + def iam_get_group(self, group_name: str) -> dict: + response = self.boto3_iam_client.get_group(GroupName=group_name) + assert response.get("Group").get("GroupName") == group_name, f"GroupName should be equal to {group_name}" + + return response + + + @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group") + def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: + response = self.boto3_iam_client.get_group_policy(GroupName=group_name, PolicyName=policy_name) + + return response + + + @reporter.step("Retrieves information about the specified managed policy") + def iam_get_policy(self, policy_arn: str) -> dict: + response = self.boto3_iam_client.get_policy(PolicyArn=policy_arn) + assert response.get("Policy"), f"Expected Policy in response:\n{response}" + assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}" + + return response + + + @reporter.step("Retrieves information about the specified version of the specified managed policy") + def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: + response = self.boto3_iam_client.get_policy_version(PolicyArn=policy_arn, VersionId=version_id) + assert response.get("PolicyVersion"), f"Expected PolicyVersion in response:\n{response}" + assert response["PolicyVersion"].get("VersionId") == version_id, f"VersionId should be equal to {version_id}" + + return response + + + @reporter.step("Retrieves information about the specified IAM user") + def iam_get_user(self, user_name: str) -> dict: + response = self.boto3_iam_client.get_user(UserName=user_name) + assert response.get("User"), f"Expected User in response:\n{response}" + assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" + + return response + + + @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user") + def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: + response = self.boto3_iam_client.get_user_policy(UserName=user_name, PolicyName=policy_name) + assert response.get("UserName"), f"Expected UserName in response:\n{response}" + + return response + + + @reporter.step("Returns information about the access key IDs associated with the specified IAM user") + def iam_list_access_keys(self, user_name: str) -> dict: + response = self.boto3_iam_client.list_access_keys(UserName=user_name) + + return response + + + @reporter.step("Lists all managed policies that are attached to the specified IAM group") + def iam_list_attached_group_policies(self, group_name: str) -> dict: + response = self.boto3_iam_client.list_attached_group_policies(GroupName=group_name) + assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" + + return response + + + @reporter.step("Lists all managed policies that are attached to the specified IAM user") + def iam_list_attached_user_policies(self, user_name: str) -> dict: + response = self.boto3_iam_client.list_attached_user_policies(UserName=user_name) + assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" + + return response + + + @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") + def iam_list_entities_for_policy(self, policy_arn: str) -> dict: + response = self.boto3_iam_client.list_entities_for_policy(PolicyArn=policy_arn) + + assert response.get("PolicyGroups"), f"Expected PolicyGroups in response:\n{response}" + assert response.get("PolicyUsers"), f"Expected PolicyUsers in response:\n{response}" + + return response + + + @reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group") + def iam_list_group_policies(self, group_name: str) -> dict: + response = self.boto3_iam_client.list_group_policies(GroupName=group_name) + assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" + + return response + + + @reporter.step("Lists the IAM groups") + def iam_list_groups(self) -> dict: + response = self.boto3_iam_client.list_groups() + assert response.get("Groups"), f"Expected Groups in response:\n{response}" + + return response + + + @reporter.step("Lists the IAM groups that the specified IAM user belongs to") + def iam_list_groups_for_user(self, user_name: str) -> dict: + response = self.boto3_iam_client.list_groups_for_user(UserName=user_name) + assert response.get("Groups"), f"Expected Groups in response:\n{response}" + + return response + + + @reporter.step("Lists all the managed policies that are available in your AWS account") + def iam_list_policies(self) -> dict: + response = self.boto3_iam_client.list_policies() + assert response.get("Policies"), f"Expected Policies in response:\n{response}" + + return response + + + @reporter.step("Lists information about the versions of the specified managed policy") + def iam_list_policy_versions(self, policy_arn: str) -> dict: + response = self.boto3_iam_client.list_policy_versions(PolicyArn=policy_arn) + assert response.get("Versions"), f"Expected Versions in response:\n{response}" + + return response + + + @reporter.step("Lists the names of the inline policies embedded in the specified IAM user") + def iam_list_user_policies(self, user_name: str) -> dict: + response = self.boto3_iam_client.list_user_policies(UserName=user_name) + assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" + + return response + + + @reporter.step("Lists the IAM users") + def iam_list_users(self) -> dict: + response = self.boto3_iam_client.list_users() + assert response.get("Users"), f"Expected Users in response:\n{response}" + + return response + + + @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group") + def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: + response = self.boto3_iam_client.put_group_policy(GroupName=group_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)) + sleep(S3_SYNC_WAIT_TIME * 10) + return response + + + @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") + def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: + response = self.boto3_iam_client.put_user_policy(UserName=user_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)) + sleep(S3_SYNC_WAIT_TIME * 10) + return response + + + @reporter.step("Removes the specified user from the specified group") + def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: + response = self.boto3_iam_client.remove_user_from_group(GroupName=group_name, UserName=user_name) + return response + + + @reporter.step("Updates the name and/or the path of the specified IAM group") + def iam_update_group(self, group_name: str, new_name: str, new_path: Optional[str] = None) -> dict: + response = self.boto3_iam_client.update_group(GroupName=group_name, NewGroupName=new_name, NewPath='/') + + return response + + + @reporter.step("Updates the name and/or the path of the specified IAM user") + def iam_update_user(self, user_name: str, new_name: str, new_path: Optional[str] = None) -> dict: + response = self.boto3_iam_client.update_user(UserName=user_name, NewUserName=new_name, NewPath='/') + return response \ No newline at end of file diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index b6a10e3..6c2a8e5 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -50,7 +50,7 @@ class BucketContainerResolver(ABC): class S3ClientWrapper(HumanReadableABC): @abstractmethod - def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str) -> None: + def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str, region: str) -> None: pass @abstractmethod @@ -395,3 +395,154 @@ class S3ClientWrapper(HumanReadableABC): """cp directory TODO: Add proper description""" # END OF OBJECT METHODS # + + + # IAM METHODS # + + @abstractmethod + def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: + '''Adds the specified user to the specified group''' + + @abstractmethod + def iam_attach_group_policy(self, group: str, policy_arn: str) -> dict: + '''Attaches the specified managed policy to the specified IAM group''' + + @abstractmethod + def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: + '''Attaches the specified managed policy to the specified user''' + + @abstractmethod + def iam_create_access_key(self, user_name: str) -> dict: + '''Creates a new AWS secret access key and access key ID for the specified user''' + + @abstractmethod + def iam_create_group(self, group_name: str) -> dict: + '''Creates a new group''' + + @abstractmethod + def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: + '''Creates a new managed policy for your AWS account''' + + @abstractmethod + def iam_create_user(self, user_name: str) -> dict: + '''Creates a new IAM user for your AWS account''' + + @abstractmethod + def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: + '''Deletes the access key pair associated with the specified IAM user''' + + @abstractmethod + def iam_delete_group(self, group_name: str) -> dict: + '''Deletes the specified IAM group''' + + @abstractmethod + def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: + '''Deletes the specified inline policy that is embedded in the specified IAM group''' + + @abstractmethod + def iam_delete_policy(self, policy_arn: str) -> dict: + '''Deletes the specified managed policy''' + + @abstractmethod + def iam_delete_user(self, user_name: str) -> dict: + '''Deletes the specified IAM user''' + + @abstractmethod + def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: + '''Deletes the specified inline policy that is embedded in the specified IAM user''' + + @abstractmethod + def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: + '''Removes the specified managed policy from the specified IAM group''' + + @abstractmethod + def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: + '''Removes the specified managed policy from the specified user''' + + @abstractmethod + def iam_get_group(self, group_name: str) -> dict: + '''Returns a list of IAM users that are in the specified IAM group''' + + @abstractmethod + def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: + '''Retrieves the specified inline policy document that is embedded in the specified IAM group''' + + @abstractmethod + def iam_get_policy(self, policy_arn: str) -> dict: + '''Retrieves information about the specified managed policy''' + + @abstractmethod + def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: + '''Retrieves information about the specified version of the specified managed policy''' + + @abstractmethod + def iam_get_user(self, user_name: str) -> dict: + '''Retrieves information about the specified IAM user''' + + @abstractmethod + def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: + '''Retrieves the specified inline policy document that is embedded in the specified IAM user''' + + @abstractmethod + def iam_list_access_keys(self, user_name: str) -> dict: + '''Returns information about the access key IDs associated with the specified IAM user''' + + @abstractmethod + def iam_list_attached_group_policies(self, group_name: str) -> dict: + '''Lists all managed policies that are attached to the specified IAM group''' + + @abstractmethod + def iam_list_attached_user_policies(self, user_name: str) -> dict: + '''Lists all managed policies that are attached to the specified IAM user''' + + @abstractmethod + def iam_list_entities_for_policy(self, policy_arn: str) -> dict: + '''Lists all IAM users, groups, and roles that the specified managed policy is attached to''' + + @abstractmethod + def iam_list_group_policies(self, group_name: str) -> dict: + '''Lists the names of the inline policies that are embedded in the specified IAM group''' + + @abstractmethod + def iam_list_groups(self) -> dict: + '''Lists the IAM groups''' + + @abstractmethod + def iam_list_groups_for_user(self, user_name: str) -> dict: + '''Lists the IAM groups that the specified IAM user belongs to''' + + @abstractmethod + def iam_list_policies(self) -> dict: + '''Lists all the managed policies that are available in your AWS account''' + + @abstractmethod + def iam_list_policy_versions(self, policy_arn: str) -> dict: + '''Lists information about the versions of the specified managed policy''' + + @abstractmethod + def iam_list_user_policies(self, user_name: str) -> dict: + '''Lists the names of the inline policies embedded in the specified IAM user''' + + @abstractmethod + def iam_list_users(self) -> dict: + '''Lists the IAM users''' + + @abstractmethod + def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: + '''Adds or updates an inline policy document that is embedded in the specified IAM group''' + + @abstractmethod + def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: + '''Adds or updates an inline policy document that is embedded in the specified IAM user''' + + @abstractmethod + def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: + '''Removes the specified user from the specified group''' + + @abstractmethod + def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: + '''Updates the name and/or the path of the specified IAM group''' + + @abstractmethod + def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: + '''Updates the name and/or the path of the specified IAM user''' diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index 41d52ab..0a1b5fd 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -41,7 +41,7 @@ def _run_with_passwd(cmd: str) -> str: return cmd.decode() -def _configure_aws_cli(cmd: str, key_id: str, access_key: str, out_format: str = "json") -> str: +def _configure_aws_cli(cmd: str, key_id: str, access_key: str, region: str, out_format: str = "json") -> str: child = pexpect.spawn(cmd) child.delaybeforesend = 1 @@ -52,7 +52,7 @@ def _configure_aws_cli(cmd: str, key_id: str, access_key: str, out_format: str = child.sendline(access_key) child.expect("Default region name.*") - child.sendline("") + child.sendline("region") child.expect("Default output format.*") child.sendline(out_format) From 82a8f9bab3e7938102c90bbe402fe2a586051fdb Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Thu, 11 Apr 2024 11:46:04 +0300 Subject: [PATCH 231/363] [#205] Propagate SETUP_TIMEOUT option Signed-off-by: a.berezin --- .../storage/controllers/background_load_controller.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/storage/controllers/background_load_controller.py b/src/frostfs_testlib/storage/controllers/background_load_controller.py index a8588ff..5628282 100644 --- a/src/frostfs_testlib/storage/controllers/background_load_controller.py +++ b/src/frostfs_testlib/storage/controllers/background_load_controller.py @@ -194,7 +194,7 @@ class BackgroundLoadController: working_dir=self.load_params.working_dir, endpoint_selection_strategy=self.load_params.endpoint_selection_strategy, k6_process_allocation_strategy=self.load_params.k6_process_allocation_strategy, - setup_timeout="1s", + setup_timeout=self.load_params.setup_timeout, ) if self.verification_params.custom_registry: From a85070e957e4d6b00dcdd838ccffd443e5fa8e9e Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 15 Apr 2024 12:35:33 +0300 Subject: [PATCH 232/363] [#206] Change epoch in func set status node, to 2 Signed-off-by: Dmitriy Zayakin --- .../storage/controllers/cluster_state_controller.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 03648f5..4003dfd 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -432,9 +432,10 @@ class ClusterStateController: if not await_tick: return - with reporter.step("Tick 1 epoch and await 2 block"): - frostfs_adm.morph.force_new_epoch() - time.sleep(parse_time(MORPH_BLOCK_TIME) * 2) + with reporter.step("Tick 2 epoch with 2 block await."): + for _ in range(2): + frostfs_adm.morph.force_new_epoch() + time.sleep(parse_time(MORPH_BLOCK_TIME) * 2) self.await_node_status(status, wallet, cluster_node) From 70f03579602b941a660b66a1f0a6e4978d657062 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Mon, 15 Apr 2024 16:50:54 +0300 Subject: [PATCH 233/363] [#207] Fix shards for disabled write_cache Signed-off-by: a.berezin --- src/frostfs_testlib/storage/dataclasses/shard.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/storage/dataclasses/shard.py b/src/frostfs_testlib/storage/dataclasses/shard.py index 170a477..bebdbf5 100644 --- a/src/frostfs_testlib/storage/dataclasses/shard.py +++ b/src/frostfs_testlib/storage/dataclasses/shard.py @@ -56,9 +56,7 @@ class Shard: var_prefix = f"{SHARD_PREFIX}{shard_id}" blobstor_count = Shard._get_blobstor_count_from_section(config_object, shard_id) - blobstors = [ - Blobstor.from_config_object(config_object, shard_id, blobstor_id) for blobstor_id in range(blobstor_count) - ] + blobstors = [Blobstor.from_config_object(config_object, shard_id, blobstor_id) for blobstor_id in range(blobstor_count)] write_cache_enabled = config_object.as_bool(f"{var_prefix}_WRITECACHE_ENABLED") @@ -71,7 +69,13 @@ class Shard: @staticmethod def from_object(shard): metabase = shard["metabase"]["path"] if "path" in shard["metabase"] else shard["metabase"] + writecache_enabled = True + if "enabled" in shard["writecache"]: + writecache_enabled = shard["writecache"]["enabled"] + writecache = shard["writecache"]["path"] if "path" in shard["writecache"] else shard["writecache"] + if not writecache_enabled: + writecache = "" # Currently due to issue we need to check if pilorama exists in keys # TODO: make pilorama mandatory after fix From 541a3e0636e3bb4f9ab4d31a5aec3826f2382467 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Wed, 17 Apr 2024 11:03:47 +0300 Subject: [PATCH 234/363] [#208] Add await for search func Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/steps/cli/object.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index 5fe6054..cd58ec3 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -13,6 +13,7 @@ from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.shell import Shell from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +from frostfs_testlib.testing import wait_for_success from frostfs_testlib.utils import json_utils from frostfs_testlib.utils.cli_utils import parse_cmd_table, parse_netmap_output @@ -695,6 +696,7 @@ def neo_go_query_height(shell: Shell, endpoint: str) -> dict: } +@wait_for_success() @reporter.step("Search object nodes") def get_object_nodes( cluster: Cluster, From 80c65b454e08e9ee1957f25dd0ebefbe138218b4 Mon Sep 17 00:00:00 2001 From: Andrey Berezin Date: Mon, 8 Apr 2024 12:13:59 +0300 Subject: [PATCH 235/363] [#203] Remove hostnames cludges Signed-off-by: Andrey Berezin --- src/frostfs_testlib/steps/http/http_gate.py | 17 +++--------- src/frostfs_testlib/storage/cluster.py | 26 +++---------------- src/frostfs_testlib/storage/constants.py | 2 -- .../storage/dataclasses/frostfs_services.py | 9 ------- 4 files changed, 8 insertions(+), 46 deletions(-) diff --git a/src/frostfs_testlib/steps/http/http_gate.py b/src/frostfs_testlib/steps/http/http_gate.py index 3f4d838..373283f 100644 --- a/src/frostfs_testlib/steps/http/http_gate.py +++ b/src/frostfs_testlib/steps/http/http_gate.py @@ -50,9 +50,7 @@ def get_via_http_gate( else: request = f"{node.http_gate.get_endpoint()}{request_path}" - resp = requests.get( - request, headers={"Host": node.storage_node.get_http_hostname()[0]}, stream=True, timeout=timeout, verify=False - ) + resp = requests.get(request, stream=True, timeout=timeout, verify=False) if not resp.ok: raise Exception( @@ -118,7 +116,6 @@ def get_via_http_gate_by_attribute( cid: CID to get object from attribute: attribute {name: attribute} value pair endpoint: http gate endpoint - http_hostname: http host name on the node request_path: (optional) http request path, if ommited - use default [{endpoint}/get_by_attribute/{Key}/{Value}] """ attr_name = list(attribute.keys())[0] @@ -129,9 +126,7 @@ def get_via_http_gate_by_attribute( else: request = f"{node.http_gate.get_endpoint()}{request_path}" - resp = requests.get( - request, stream=True, timeout=timeout, verify=False, headers={"Host": node.storage_node.get_http_hostname()[0]} - ) + resp = requests.get(request, stream=True, timeout=timeout, verify=False) if not resp.ok: raise Exception( @@ -151,11 +146,8 @@ def get_via_http_gate_by_attribute( return file_path -# TODO: pass http_hostname as a header @reporter.step("Upload via HTTP Gate") -def upload_via_http_gate( - cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300 -) -> str: +def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300) -> str: """ This function upload given object through HTTP gate cid: CID to get object from @@ -198,7 +190,6 @@ def is_object_large(filepath: str) -> bool: return False -# TODO: pass http_hostname as a header @reporter.step("Upload via HTTP Gate using Curl") def upload_via_http_gate_curl( cid: str, @@ -259,7 +250,7 @@ def get_via_http_curl(cid: str, oid: str, node: ClusterNode) -> str: file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}") curl = GenericCli("curl", node.host) - curl(f'-k -H "Host: {node.storage_node.get_http_hostname()[0]}"', f"{request} > {file_path}", shell=local_shell) + curl(f"-k ", f"{request} > {file_path}", shell=local_shell) return file_path diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index 23130cb..15827cf 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -141,30 +141,16 @@ class ClusterNode: return self.host.config.interfaces[interface.value] def get_data_interfaces(self) -> list[str]: - return [ - ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "data" in name_interface - ] + return [ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "data" in name_interface] def get_data_interface(self, search_interface: str) -> list[str]: - return [ - self.host.config.interfaces[interface] - for interface in self.host.config.interfaces.keys() - if search_interface == interface - ] + return [self.host.config.interfaces[interface] for interface in self.host.config.interfaces.keys() if search_interface == interface] def get_internal_interfaces(self) -> list[str]: - return [ - ip_address - for name_interface, ip_address in self.host.config.interfaces.items() - if "internal" in name_interface - ] + return [ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "internal" in name_interface] def get_internal_interface(self, search_internal: str) -> list[str]: - return [ - self.host.config.interfaces[interface] - for interface in self.host.config.interfaces.keys() - if search_internal == interface - ] + return [self.host.config.interfaces[interface] for interface in self.host.config.interfaces.keys() if search_internal == interface] class Cluster: @@ -175,8 +161,6 @@ class Cluster: default_rpc_endpoint: str default_s3_gate_endpoint: str default_http_gate_endpoint: str - default_http_hostname: str - default_s3_hostname: str def __init__(self, hosting: Hosting) -> None: self._hosting = hosting @@ -185,8 +169,6 @@ class Cluster: self.default_rpc_endpoint = self.services(StorageNode)[0].get_rpc_endpoint() self.default_s3_gate_endpoint = self.services(S3Gate)[0].get_endpoint() self.default_http_gate_endpoint = self.services(HTTPGate)[0].get_endpoint() - self.default_http_hostname = self.services(StorageNode)[0].get_http_hostname() - self.default_s3_hostname = self.services(StorageNode)[0].get_s3_hostname() @property def hosts(self) -> list[Host]: diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 3d75988..66bf5cc 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -16,5 +16,3 @@ class ConfigAttributes: ENDPOINT_PROMETHEUS = "endpoint_prometheus" CONTROL_ENDPOINT = "control_endpoint" UN_LOCODE = "un_locode" - HTTP_HOSTNAME = "http_hostname" - S3_HOSTNAME = "s3_hostname" diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index 9e671d5..16efd72 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -154,15 +154,6 @@ class StorageNode(NodeBase): def get_data_directory(self) -> str: return self.host.get_data_directory(self.name) - def get_storage_config(self) -> str: - return self.host.get_storage_config(self.name) - - def get_http_hostname(self) -> list[str]: - return self._get_attribute(ConfigAttributes.HTTP_HOSTNAME) - - def get_s3_hostname(self) -> list[str]: - return self._get_attribute(ConfigAttributes.S3_HOSTNAME) - def delete_blobovnicza(self): self.host.delete_blobovnicza(self.name) From c0e37c8138c59b898f2a457876e6a1ac0e2f2523 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Tue, 23 Apr 2024 22:37:54 +0300 Subject: [PATCH 236/363] [#210] Return response in complete_multipart_upload function --- src/frostfs_testlib/s3/aws_cli_client.py | 5 ++++- src/frostfs_testlib/s3/boto3_client.py | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 470e7a3..e9811a5 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -729,7 +729,10 @@ class AwsCliClient(S3ClientWrapper): f"--key {key} --upload-id {upload_id} --multipart-upload file://{file_path} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" ) - self.local_shell.exec(cmd) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response @reporter.step("Put object lock configuration") def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index 46cfe4b..f9b8b16 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -571,6 +571,8 @@ class Boto3ClientWrapper(S3ClientWrapper): ) log_command_execution("S3 Complete multipart upload", response) + return response + @reporter.step("Put object retention") @report_error def put_object_retention( From 5b715877b3257b91dd8a3f80a1fe1f61a50828a5 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Wed, 24 Apr 2024 14:58:30 +0300 Subject: [PATCH 237/363] [#214] Removed x10 wait in delete bucket function --- src/frostfs_testlib/s3/boto3_client.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index f9b8b16..9801dbd 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -135,7 +135,7 @@ class Boto3ClientWrapper(S3ClientWrapper): s3_bucket = self.boto3_client.create_bucket(**params) log_command_execution(f"Created S3 bucket {bucket}", s3_bucket) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME) return bucket @reporter.step("List buckets S3") @@ -156,7 +156,7 @@ class Boto3ClientWrapper(S3ClientWrapper): def delete_bucket(self, bucket: str) -> None: response = self.boto3_client.delete_bucket(Bucket=bucket) log_command_execution("S3 Delete bucket result", response) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME) @reporter.step("Head bucket S3") @report_error @@ -372,7 +372,7 @@ class Boto3ClientWrapper(S3ClientWrapper): } response = self.boto3_client.delete_object(**params) log_command_execution("S3 Delete object result", response) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME) return response @reporter.step("Delete objects S3") @@ -383,7 +383,7 @@ class Boto3ClientWrapper(S3ClientWrapper): assert ( "Errors" not in response ), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}' - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME) return response @reporter.step("Delete object versions S3") From a32bd120f23496f22f3aece52549605099d3d7d3 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Fri, 3 May 2024 17:12:54 +0300 Subject: [PATCH 238/363] [#218] Add ns attribute for container create Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/cli/frostfs_cli/container.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/frostfs_testlib/cli/frostfs_cli/container.py b/src/frostfs_testlib/cli/frostfs_cli/container.py index b5592e8..43c3ec6 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/container.py +++ b/src/frostfs_testlib/cli/frostfs_cli/container.py @@ -9,6 +9,8 @@ class FrostfsCliContainer(CliCommand): self, rpc_endpoint: str, wallet: Optional[str] = None, + nns_zone: Optional[str] = None, + nns_name: Optional[str] = None, address: Optional[str] = None, attributes: Optional[dict] = None, basic_acl: Optional[str] = None, @@ -45,6 +47,8 @@ class FrostfsCliContainer(CliCommand): wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. timeout: Timeout for the operation (default 15s). + nns_zone: Container nns zone attribute. + nns_name: Container nns name attribute. Returns: Command's result. From 0306c09bed17a5fae6fad9df9c72da11176c4dc5 Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Thu, 25 Apr 2024 20:50:33 +0300 Subject: [PATCH 239/363] [#216] Add parameter max_total_size_gb --- src/frostfs_testlib/load/interfaces/summarized.py | 2 +- src/frostfs_testlib/load/load_config.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/load/interfaces/summarized.py b/src/frostfs_testlib/load/interfaces/summarized.py index 54947b4..4be33ef 100644 --- a/src/frostfs_testlib/load/interfaces/summarized.py +++ b/src/frostfs_testlib/load/interfaces/summarized.py @@ -86,7 +86,7 @@ class SummarizedStats: target.latencies.by_node[node_key] = operation.latency target.throughput += operation.throughput target.errors.threshold = load_params.error_threshold - target.total_bytes = operation.total_bytes + target.total_bytes += operation.total_bytes if operation.failed_iterations: target.errors.by_node[node_key] = operation.failed_iterations diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 2a546c4..e0625a9 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -233,6 +233,8 @@ class LoadParams: ) # Percentage of filling of all data disks on all nodes fill_percent: Optional[float] = None + # if specified, max payload size in GB of the storage engine. If the storage engine is already full, no new objects will be saved. + max_total_size_gb: Optional[float] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "MAX_TOTAL_SIZE_GB") # if set, the payload is generated on the fly and is not read into memory fully. streaming: Optional[int] = metadata_field(all_load_scenarios, None, "STREAMING", False) # Output format From 3e64b523065828bcbb84ec9bede0959c03ffaec3 Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Mon, 13 May 2024 13:34:37 +0300 Subject: [PATCH 240/363] [#220] add container metrics --- src/frostfs_testlib/storage/cluster.py | 3 +++ .../storage/dataclasses/metrics.py | 22 +++++++++++++++++++ 2 files changed, 25 insertions(+) create mode 100644 src/frostfs_testlib/storage/dataclasses/metrics.py diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index 15827cf..9fcc4c9 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -14,6 +14,7 @@ from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, Inner from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.service_registry import ServiceRegistry +from frostfs_testlib.storage.dataclasses.metrics import Metrics class ClusterNode: @@ -24,11 +25,13 @@ class ClusterNode: class_registry: ServiceRegistry id: int host: Host + metrics: Metrics def __init__(self, host: Host, id: int) -> None: self.host = host self.id = id self.class_registry = get_service_registry() + self.metrics = Metrics(host=self.host, metrics_endpoint=self.storage_node.get_metrics_endpoint()) @property def host_ip(self): diff --git a/src/frostfs_testlib/storage/dataclasses/metrics.py b/src/frostfs_testlib/storage/dataclasses/metrics.py new file mode 100644 index 0000000..49c59bc --- /dev/null +++ b/src/frostfs_testlib/storage/dataclasses/metrics.py @@ -0,0 +1,22 @@ +from frostfs_testlib.hosting import Host +from frostfs_testlib.shell.interfaces import CommandResult + + +class Metrics: + def __init__(self, host: Host, metrics_endpoint: str) -> None: + self.storage = StorageMetrics(host, metrics_endpoint) + + + +class StorageMetrics: + """ + Class represents storage metrics in a cluster + """ + def __init__(self, host: Host, metrics_endpoint: str) -> None: + self.host = host + self.metrics_endpoint = metrics_endpoint + + def get_metric_container(self, metric: str, cid: str) -> CommandResult: + shell = self.host.get_shell() + result = shell.exec(f"curl -s {self.metrics_endpoint} | grep {metric} |grep {cid}") + return result From 3fee7aa1976e243cc0d03efaf9780fcd4dc385ed Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 13 May 2024 16:01:35 +0300 Subject: [PATCH 241/363] [#221] Added new control command CLI --- .../cli/frostfs_cli/control.py | 155 +++++++++++++++++- 1 file changed, 153 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/control.py b/src/frostfs_testlib/cli/frostfs_cli/control.py index 2cddfdf..957bca9 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/control.py +++ b/src/frostfs_testlib/cli/frostfs_cli/control.py @@ -69,7 +69,7 @@ class FrostfsCliControl(CliCommand): wallet: Path to the wallet or binary key address: Address of wallet account endpoint: Remote node control address (as 'multiaddr' or ':') - objects: List of object addresses to be removed in string format + objects: List of object addresses to be removed in string format timeout: Timeout for an operation (default 15s) Returns: @@ -78,4 +78,155 @@ class FrostfsCliControl(CliCommand): return self._execute( "control drop-objects", **{param: value for param, value in locals().items() if param not in ["self"]}, - ) \ No newline at end of file + ) + + def add_rule( + self, + endpoint: str, + chain_id: str, + target_name: str, + target_type: str, + rule: Optional[list[str]] = None, + path: Optional[str] = None, + chain_id_hex: Optional[bool] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address: Address of wallet account + chain-id: Assign ID to the parsed chain + chain-id-hex: Flag to parse chain ID as hex + endpoint: Remote node control address (as 'multiaddr' or ':') + path: Path to encoded chain in JSON or binary format + rule: Rule statement + target-name: Resource name in APE resource name format + target-type: Resource type(container/namespace) + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control add-rule", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def get_rule( + self, + endpoint: str, + chain_id: str, + target_name: str, + target_type: str, + chain_id_hex: Optional[bool] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address string Address of wallet account + chain-id string Chain id + chain-id-hex Flag to parse chain ID as hex + endpoint string Remote node control address (as 'multiaddr' or ':') + target-name string Resource name in APE resource name format + target-type string Resource type(container/namespace) + timeout duration Timeout for an operation (default 15s) + wallet string Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control get-rule", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def list_rules( + self, + endpoint: str, + target_name: str, + target_type: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + target-name: Resource name in APE resource name format + target-type: Resource type(container/namespace) + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control list-rules", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def list_targets( + self, + endpoint: str, + chain_name: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address: Address of wallet account + chain-name: Chain name(ingress|s3) + endpoint: Remote node control address (as 'multiaddr' or ':') + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control list-targets", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def remove_rule( + self, + endpoint: str, + chain_id: str, + target_name: str, + target_type: str, + all: Optional[bool] = None, + chain_id_hex: Optional[bool] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address: Address of wallet account + all: Remove all chains + chain-id: Assign ID to the parsed chain + chain-id-hex: Flag to parse chain ID as hex + endpoint: Remote node control address (as 'multiaddr' or ':') + target-name: Resource name in APE resource name format + target-type: Resource type(container/namespace) + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control remove-rule", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) From b8ce75b299b9a748ac0c040a31710a53cfdb5b30 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Thu, 16 May 2024 12:47:46 +0300 Subject: [PATCH 242/363] [#224] Restore invalid_obj check Signed-off-by: a.berezin --- src/frostfs_testlib/load/load_verifiers.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/frostfs_testlib/load/load_verifiers.py b/src/frostfs_testlib/load/load_verifiers.py index cbf6f64..97b0ffa 100644 --- a/src/frostfs_testlib/load/load_verifiers.py +++ b/src/frostfs_testlib/load/load_verifiers.py @@ -57,6 +57,8 @@ class LoadVerifier: invalid_objects = verify_metrics.read.failed_iterations total_left_objects = load_metrics.write.success_iterations - delete_success + if invalid_objects > 0: + issues.append(f"There were {invalid_objects} verification fails (hash mismatch).") # Due to interruptions we may see total verified objects to be less than written on writers count if abs(total_left_objects - verified_objects) > writers: issues.append( From 37a1177a3c5e18bca7ad63849787aafe09d0bd37 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Thu, 16 May 2024 10:13:11 +0300 Subject: [PATCH 243/363] Added delete bucket policy method to s3 client --- src/frostfs_testlib/s3/aws_cli_client.py | 10 ++++++++++ src/frostfs_testlib/s3/boto3_client.py | 7 +++++++ src/frostfs_testlib/s3/interfaces.py | 4 ++++ 3 files changed, 21 insertions(+) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index e9811a5..69a097b 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -489,6 +489,16 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response.get("Policy") + @reporter.step("Delete bucket policy") + def delete_bucket_policy(self, bucket: str) -> dict: + cmd = ( + f"aws {self.common_flags} s3api delete-bucket-policy --bucket {bucket} " + f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response + @reporter.step("Put bucket policy") def put_bucket_policy(self, bucket: str, policy: dict) -> None: # Leaving it as is was in test repo. Double dumps to escape resulting string diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index 9801dbd..59da55a 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -246,6 +246,13 @@ class Boto3ClientWrapper(S3ClientWrapper): log_command_execution("S3 get_bucket_policy result", response) return response.get("Policy") + @reporter.step("Delete bucket policy") + @report_error + def delete_bucket_policy(self, bucket: str) -> str: + response = self.boto3_client.delete_bucket_policy(Bucket=bucket) + log_command_execution("S3 delete_bucket_policy result", response) + return response + @reporter.step("Put bucket policy") @report_error def put_bucket_policy(self, bucket: str, policy: dict) -> None: diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index 6c2a8e5..8cfc2bb 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -152,6 +152,10 @@ class S3ClientWrapper(HumanReadableABC): def get_bucket_policy(self, bucket: str) -> str: """Returns the policy of a specified bucket.""" + @abstractmethod + def delete_bucket_policy(self, bucket: str) -> str: + """Deletes the policy of a specified bucket.""" + @abstractmethod def put_bucket_policy(self, bucket: str, policy: dict) -> None: """Applies S3 bucket policy to an S3 bucket.""" From a563f089f605ba174a405e6f297735ce4da19077 Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Tue, 21 May 2024 09:16:35 +0300 Subject: [PATCH 244/363] [#228] metrics for object --- src/frostfs_testlib/storage/dataclasses/metrics.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/frostfs_testlib/storage/dataclasses/metrics.py b/src/frostfs_testlib/storage/dataclasses/metrics.py index 49c59bc..c79dcf8 100644 --- a/src/frostfs_testlib/storage/dataclasses/metrics.py +++ b/src/frostfs_testlib/storage/dataclasses/metrics.py @@ -20,3 +20,17 @@ class StorageMetrics: shell = self.host.get_shell() result = shell.exec(f"curl -s {self.metrics_endpoint} | grep {metric} |grep {cid}") return result + + def get_metrics_search_by_greps(self, **greps) -> CommandResult: + """ + Get a metrics, search by: cid, metric_type, shard_id etc. + Args: + greps: dict of grep-command-name and value + for example get_metrics_search_by_greps(command='container_objects_total', cid='123456') + Return: + result of metrics + """ + shell = self.host.get_shell() + additional_greps = " |grep ".join([grep_command for grep_command in greps.values()]) + result = shell.exec(f"curl -s {self.metrics_endpoint} | grep {additional_greps}") + return result From e7423938e95e3cc55129a9fc297d78292db5b40f Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Thu, 30 May 2024 09:12:21 +0300 Subject: [PATCH 245/363] [#232]Change provide methods --- src/frostfs_testlib/credentials/interfaces.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/credentials/interfaces.py b/src/frostfs_testlib/credentials/interfaces.py index c863da0..b2ae6f1 100644 --- a/src/frostfs_testlib/credentials/interfaces.py +++ b/src/frostfs_testlib/credentials/interfaces.py @@ -26,7 +26,7 @@ class S3CredentialsProvider(ABC): self.cluster = cluster @abstractmethod - def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None) -> S3Credentials: + def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None, **kwargs) -> S3Credentials: raise NotImplementedError("Directly called abstract class?") @@ -35,7 +35,7 @@ class GrpcCredentialsProvider(ABC): self.cluster = cluster @abstractmethod - def provide(self, user: User, cluster_node: ClusterNode) -> WalletInfo: + def provide(self, user: User, cluster_node: ClusterNode, **kwargs) -> WalletInfo: raise NotImplementedError("Directly called abstract class?") From ea1b3481205d355f96e66816744e1a372c18c987 Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Fri, 31 May 2024 09:44:17 +0300 Subject: [PATCH 246/363] [#232] grpc metrics --- src/frostfs_testlib/cli/frostfs_cli/tree.py | 24 +++++++++++++ .../healthcheck/basic_healthcheck.py | 8 +++++ src/frostfs_testlib/healthcheck/interfaces.py | 4 +++ src/frostfs_testlib/steps/cli/tree.py | 35 +++++++++++++++++++ 4 files changed, 71 insertions(+) create mode 100644 src/frostfs_testlib/steps/cli/tree.py diff --git a/src/frostfs_testlib/cli/frostfs_cli/tree.py b/src/frostfs_testlib/cli/frostfs_cli/tree.py index af330fe..c75b526 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/tree.py +++ b/src/frostfs_testlib/cli/frostfs_cli/tree.py @@ -27,3 +27,27 @@ class FrostfsCliTree(CliCommand): "tree healthcheck", **{param: value for param, value in locals().items() if param not in ["self"]}, ) + + def list( + self, + cid: str, + rpc_endpoint: Optional[str] = None, + wallet: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Get Tree List + + Args: + cid: Container ID. + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + timeout: duration Timeout for the operation (default 15 s) + + Returns: + Command's result. + + """ + return self._execute( + "tree list", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/frostfs_testlib/healthcheck/basic_healthcheck.py b/src/frostfs_testlib/healthcheck/basic_healthcheck.py index 0443e28..fc7ba59 100644 --- a/src/frostfs_testlib/healthcheck/basic_healthcheck.py +++ b/src/frostfs_testlib/healthcheck/basic_healthcheck.py @@ -47,6 +47,14 @@ class BasicHealthcheck(Healthcheck): self._perform(cluster_node, checks) + @wait_for_success(900, 30, title="Wait for tree healthcheck on {cluster_node}") + def tree_healthcheck(self, cluster_node: ClusterNode) -> str | None: + checks = { + self._tree_healthcheck: {}, + } + + self._perform(cluster_node, checks) + @wait_for_success(120, 5, title="Wait for service healthcheck on {cluster_node}") def services_healthcheck(self, cluster_node: ClusterNode): svcs_to_check = cluster_node.services diff --git a/src/frostfs_testlib/healthcheck/interfaces.py b/src/frostfs_testlib/healthcheck/interfaces.py index c665b8a..cf17852 100644 --- a/src/frostfs_testlib/healthcheck/interfaces.py +++ b/src/frostfs_testlib/healthcheck/interfaces.py @@ -19,3 +19,7 @@ class Healthcheck(ABC): @abstractmethod def services_healthcheck(self, cluster_node: ClusterNode): """Perform service status check on target cluster node""" + + @abstractmethod + def tree_healthcheck(self, cluster_node: ClusterNode): + """Perform tree healthcheck on target cluster node""" diff --git a/src/frostfs_testlib/steps/cli/tree.py b/src/frostfs_testlib/steps/cli/tree.py new file mode 100644 index 0000000..4b0dfb3 --- /dev/null +++ b/src/frostfs_testlib/steps/cli/tree.py @@ -0,0 +1,35 @@ +import logging +from typing import Optional + +from frostfs_testlib import reporter +from frostfs_testlib.cli import FrostfsCli +from frostfs_testlib.plugins import load_plugin +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC +from frostfs_testlib.shell import Shell +from frostfs_testlib.storage.dataclasses.wallet import WalletInfo + +logger = logging.getLogger("NeoLogger") + + + +@reporter.step("Get Tree List") +def get_tree_list( + wallet: WalletInfo, + cid: str, + shell: Shell, + endpoint: str, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, +) -> None: + """ + A wrapper for `frostfs-cli tree list` call. + Args: + wallet (WalletInfo): path to a wallet on whose behalf we delete the container + cid (str): ID of the container to delete + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + timeout: Timeout for the operation. + This function doesn't return anything. + """ + + cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) + cli.tree.list(cid=cid, rpc_endpoint=endpoint, timeout=timeout) From ec42b156ac8ea8c17e8a6f53aa5da7cdce986f5a Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 4 Jun 2024 12:46:32 +0300 Subject: [PATCH 247/363] [#236] Add EC logic this HEAD command CLI --- src/frostfs_testlib/steps/cli/object.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index cd58ec3..3e0806c 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -615,6 +615,11 @@ def head_object( fst_line_idx = result.stdout.find("\n") decoded = json.loads(result.stdout[fst_line_idx:]) + # if response + if "chunks" in decoded.keys(): + logger.info("decoding ec chunks") + return decoded["chunks"] + # If response is Complex Object header, it has `splitId` key if "splitId" in decoded.keys(): logger.info("decoding split header") From a3b78559a961b738554aa1afb4bf199ea42b582f Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Wed, 5 Jun 2024 13:11:08 +0300 Subject: [PATCH 248/363] [#238] Update S3 acl verify method Signed-off-by: a.berezin --- .../resources/error_patterns.py | 5 +- .../resources/s3_acl_grants.py | 9 ++++ src/frostfs_testlib/steps/s3/s3_helper.py | 46 +++++++++---------- 3 files changed, 32 insertions(+), 28 deletions(-) create mode 100644 src/frostfs_testlib/resources/s3_acl_grants.py diff --git a/src/frostfs_testlib/resources/error_patterns.py b/src/frostfs_testlib/resources/error_patterns.py index e2e4c48..e92b33d 100644 --- a/src/frostfs_testlib/resources/error_patterns.py +++ b/src/frostfs_testlib/resources/error_patterns.py @@ -23,6 +23,5 @@ INVALID_RANGE_OVERFLOW = "invalid '{range}' range: uint64 overflow" INVALID_OFFSET_SPECIFIER = "invalid '{range}' range offset specifier" INVALID_LENGTH_SPECIFIER = "invalid '{range}' range length specifier" -S3_MALFORMED_XML_REQUEST = ( - "The XML you provided was not well-formed or did not validate against our published schema." -) +S3_BUCKET_DOES_NOT_ALLOW_ACL = "The bucket does not allow ACLs" +S3_MALFORMED_XML_REQUEST = "The XML you provided was not well-formed or did not validate against our published schema." diff --git a/src/frostfs_testlib/resources/s3_acl_grants.py b/src/frostfs_testlib/resources/s3_acl_grants.py new file mode 100644 index 0000000..37005e8 --- /dev/null +++ b/src/frostfs_testlib/resources/s3_acl_grants.py @@ -0,0 +1,9 @@ +ALL_USERS_GROUP_URI = "http://acs.amazonaws.com/groups/global/AllUsers" +ALL_USERS_GROUP_WRITE_GRANT = {"Grantee": {"Type": "Group", "URI": ALL_USERS_GROUP_URI}, "Permission": "WRITE"} +ALL_USERS_GROUP_READ_GRANT = {"Grantee": {"Type": "Group", "URI": ALL_USERS_GROUP_URI}, "Permission": "READ"} +CANONICAL_USER_FULL_CONTROL_GRANT = {"Grantee": {"Type": "CanonicalUser"}, "Permission": "FULL_CONTROL"} + +# https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl +PRIVATE_GRANTS = [CANONICAL_USER_FULL_CONTROL_GRANT] +PUBLIC_READ_GRANTS = [CANONICAL_USER_FULL_CONTROL_GRANT, ALL_USERS_GROUP_READ_GRANT] +PUBLIC_READ_WRITE_GRANTS = [CANONICAL_USER_FULL_CONTROL_GRANT, ALL_USERS_GROUP_WRITE_GRANT, ALL_USERS_GROUP_READ_GRANT] diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index baf362b..ab0cee3 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -120,32 +120,28 @@ def assert_object_lock_mode( ).days == retain_period, f"Expected retention period is {retain_period} days" -def assert_s3_acl(acl_grants: list, permitted_users: str): - if permitted_users == "AllUsers": - grantees = {"AllUsers": 0, "CanonicalUser": 0} - for acl_grant in acl_grants: - if acl_grant.get("Grantee", {}).get("Type") == "Group": - uri = acl_grant.get("Grantee", {}).get("URI") - permission = acl_grant.get("Permission") - assert (uri, permission) == ( - "http://acs.amazonaws.com/groups/global/AllUsers", - "FULL_CONTROL", - ), "All Groups should have FULL_CONTROL" - grantees["AllUsers"] += 1 - if acl_grant.get("Grantee", {}).get("Type") == "CanonicalUser": - permission = acl_grant.get("Permission") - assert permission == "FULL_CONTROL", "Canonical User should have FULL_CONTROL" - grantees["CanonicalUser"] += 1 - assert grantees["AllUsers"] >= 1, "All Users should have FULL_CONTROL" - assert grantees["CanonicalUser"] >= 1, "Canonical User should have FULL_CONTROL" +def _format_grants_as_strings(grants: list[dict]) -> list: + grantee_format = "{g_type}::{uri}:{permission}" + return set( + [ + grantee_format.format( + g_type=grant.get("Grantee", {}).get("Type", ""), + uri=grant.get("Grantee", {}).get("URI", ""), + permission=grant.get("Permission", ""), + ) + for grant in grants + ] + ) - if permitted_users == "CanonicalUser": - for acl_grant in acl_grants: - if acl_grant.get("Grantee", {}).get("Type") == "CanonicalUser": - permission = acl_grant.get("Permission") - assert permission == "FULL_CONTROL", "Only CanonicalUser should have FULL_CONTROL" - else: - logger.error("FULL_CONTROL is given to All Users") + +@reporter.step("Verify ACL permissions") +def verify_acl_permissions(actual_acl_grants: list[dict], expected_acl_grants: list[dict], strict: bool = True): + actual_grants = _format_grants_as_strings(actual_acl_grants) + expected_grants = _format_grants_as_strings(expected_acl_grants) + + assert expected_grants <= actual_grants, "Permissions mismatch" + if strict: + assert expected_grants == actual_grants, "Extra permissions found, must not be there" @reporter.step("Delete bucket with all objects") From 5d192524a00b0327ccda0dc3898fe7045dc2976d Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Thu, 6 Jun 2024 15:10:36 +0300 Subject: [PATCH 249/363] [#243] New error patterns --- src/frostfs_testlib/resources/error_patterns.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/frostfs_testlib/resources/error_patterns.py b/src/frostfs_testlib/resources/error_patterns.py index e92b33d..5491a7a 100644 --- a/src/frostfs_testlib/resources/error_patterns.py +++ b/src/frostfs_testlib/resources/error_patterns.py @@ -25,3 +25,6 @@ INVALID_LENGTH_SPECIFIER = "invalid '{range}' range length specifier" S3_BUCKET_DOES_NOT_ALLOW_ACL = "The bucket does not allow ACLs" S3_MALFORMED_XML_REQUEST = "The XML you provided was not well-formed or did not validate against our published schema." + +RULE_ACCESS_DENIED_CONTAINER = "access to container operation {operation} is denied by access policy engine: Access denied" +RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied" From 10821f4c494d7ac6e7d9e380bc655423fa149e16 Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Wed, 5 Jun 2024 15:13:22 +0300 Subject: [PATCH 250/363] [#239] write cache metrics --- src/frostfs_testlib/load/load_config.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index e0625a9..1128096 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -119,6 +119,8 @@ class NodesSelectionStrategy(Enum): ALL_EXCEPT_UNDER_TEST = "ALL_EXCEPT_UNDER_TEST" # Select ONE random node except under test (useful for failover). RANDOM_SINGLE_EXCEPT_UNDER_TEST = "RANDOM_SINGLE_EXCEPT_UNDER_TEST" + # Select node under test + NODE_UNDER_TEST = "NODE_UNDER_TEST" class EndpointSelectionStrategy(Enum): From bfd7f70b6cc51703dc506ad55c009de6d172bd8c Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Wed, 5 Jun 2024 16:38:55 +0300 Subject: [PATCH 251/363] [#241] Methods for tag IAM user --- src/frostfs_testlib/s3/aws_cli_client.py | 43 ++++++++++++++++++++++++ src/frostfs_testlib/s3/boto3_client.py | 19 +++++++++++ src/frostfs_testlib/s3/interfaces.py | 12 +++++++ 3 files changed, 74 insertions(+) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 69a097b..3bf335e 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -1332,4 +1332,47 @@ class AwsCliClient(S3ClientWrapper): return response + @reporter.step("Adds one or more tags to an IAM user") + def iam_tag_user(self, user_name: str, tags: list) -> dict: + tags_json = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] + cmd = ( + f"aws {self.common_flags} iam tag-user --user-name {user_name} --tags '{json.dumps(tags_json)}' --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + + @reporter.step("List tags of IAM user") + def iam_list_user_tags(self, user_name: str) -> dict: + cmd = ( + f"aws {self.common_flags} iam list-user-tags --user-name {user_name} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + + @reporter.step("Removes the specified tags from the user") + def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: + tag_keys_joined = ' '.join(tag_keys) + cmd = ( + f"aws {self.common_flags} iam untag-user --user-name {user_name} --tag-keys {tag_keys_joined} --endpoint {self.iam_endpoint}" + ) + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index 59da55a..bed316b 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -963,4 +963,23 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("Updates the name and/or the path of the specified IAM user") def iam_update_user(self, user_name: str, new_name: str, new_path: Optional[str] = None) -> dict: response = self.boto3_iam_client.update_user(UserName=user_name, NewUserName=new_name, NewPath='/') + return response + + + @reporter.step("Adds one or more tags to an IAM user") + def iam_tag_user(self, user_name: str, tags: list) -> dict: + tags_json = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] + response = self.boto3_iam_client.tag_user(UserName=user_name, Tags=tags_json) + return response + + + @reporter.step("List tags of IAM user") + def iam_list_user_tags(self, user_name: str) -> dict: + response = self.boto3_iam_client.list_user_tags(UserName=user_name) + return response + + + @reporter.step("Removes the specified tags from the user") + def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: + response = self.boto3_iam_client.untag_user(UserName=user_name, TagKeys=tag_keys) return response \ No newline at end of file diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index 8cfc2bb..651be7a 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -550,3 +550,15 @@ class S3ClientWrapper(HumanReadableABC): @abstractmethod def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: '''Updates the name and/or the path of the specified IAM user''' + + @abstractmethod + def iam_tag_user(self, user_name: str, tags: list) -> dict: + '''Adds one or more tags to an IAM user''' + + @abstractmethod + def iam_list_user_tags(self, user_name: str) -> dict: + '''List tags of IAM user''' + + @abstractmethod + def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: + '''Removes the specified tags from the user''' \ No newline at end of file From 7a482152a8a067a10da645c8401ef46cfb55f363 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Fri, 7 Jun 2024 17:03:39 +0300 Subject: [PATCH 252/363] [#245] Update versions check Signed-off-by: a.berezin --- src/frostfs_testlib/utils/version_utils.py | 83 ++++++++-------------- 1 file changed, 29 insertions(+), 54 deletions(-) diff --git a/src/frostfs_testlib/utils/version_utils.py b/src/frostfs_testlib/utils/version_utils.py index f1b7e37..7fcc9de 100644 --- a/src/frostfs_testlib/utils/version_utils.py +++ b/src/frostfs_testlib/utils/version_utils.py @@ -1,5 +1,6 @@ import logging import re +from functools import lru_cache from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsAdm, FrostfsCli @@ -36,78 +37,52 @@ def get_local_binaries_versions(shell: Shell) -> dict[str, str]: return versions +@reporter.step("Collect binaries versions from host") def parallel_binary_verions(host: Host) -> dict[str, str]: versions_by_host = {} - binary_path_by_name = {} # Maps binary name to executable path - for service_config in host.config.services: - exec_path = service_config.attributes.get("exec_path") - requires_check = service_config.attributes.get("requires_version_check", "true") - if exec_path: - binary_path_by_name[service_config.name] = { - "exec_path": exec_path, - "check": requires_check.lower() == "true", + binary_path_by_name = { + **{ + svc.name[:-3]: { + "exec_path": svc.attributes.get("exec_path"), + "param": svc.attributes.get("custom_version_parameter", "--version"), } - for cli_config in host.config.clis: - requires_check = cli_config.attributes.get("requires_version_check", "true") - binary_path_by_name[cli_config.name] = { - "exec_path": cli_config.exec_path, - "check": requires_check.lower() == "true", - } + for svc in host.config.services + if svc.attributes.get("exec_path") and svc.attributes.get("requires_version_check", "true") == "true" + }, + **{ + cli.name: {"exec_path": cli.exec_path, "param": cli.attributes.get("custom_version_parameter", "--version")} + for cli in host.config.clis + if cli.attributes.get("requires_version_check", "true") == "true" + }, + } shell = host.get_shell() versions_at_host = {} for binary_name, binary in binary_path_by_name.items(): + binary_path = binary["exec_path"] try: - binary_path = binary["exec_path"] - result = shell.exec(f"{binary_path} --version") - versions_at_host[binary_name] = {"version": _parse_version(result.stdout), "check": binary["check"]} + result = shell.exec(f"{binary_path} {binary['param']}") + version = _parse_version(result.stdout) or _parse_version(result.stderr) or "Unknown" + versions_at_host[binary_name] = version except Exception as exc: logger.error(f"Cannot get version for {binary_path} because of\n{exc}") - versions_at_host[binary_name] = {"version": "Unknown", "check": binary["check"]} + versions_at_host[binary_name] = "Unknown" versions_by_host[host.config.address] = versions_at_host return versions_by_host -@reporter.step("Get remote binaries versions") -def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]: - versions_by_host = {} - future_binary_verions = parallel(parallel_binary_verions, parallel_items=hosting.hosts) +@lru_cache +def get_remote_binaries_versions(hosting: Hosting) -> dict[str, dict[str, str]]: + versions_by_host: dict[str, dict[str, str]] = {} + + with reporter.step("Get remote binaries versions"): + future_binary_verions = parallel(parallel_binary_verions, parallel_items=hosting.hosts) + for future in future_binary_verions: versions_by_host.update(future.result()) - # Consolidate versions across all hosts - cheak_versions = {} - exсeptions = [] - exception = set() - previous_host = None - versions = {} - captured_version = None - for host, binary_versions in versions_by_host.items(): - for name, binary in binary_versions.items(): - version = binary["version"] - if not cheak_versions.get(f"{name[:-2]}", None): - captured_version = cheak_versions.get(f"{name[:-2]}", {}).get(host, {}).get(captured_version) - cheak_versions[f"{name[:-2]}"] = {host: {version: name}} - else: - captured_version = list(cheak_versions.get(f"{name[:-2]}", {}).get(previous_host).keys())[0] - cheak_versions[f"{name[:-2]}"].update({host: {version: name}}) - - if captured_version and captured_version != version: - exception.add(name[:-2]) - - versions[name] = {"version": version, "check": binary["check"]} - previous_host = host - logger.info( - "Remote binaries versions:\n" + "\n".join([f"{key} ver: {value['version']}" for key, value in versions.items()]) - ) - if exception: - for i in exception: - for host in versions_by_host.keys(): - for version, name in cheak_versions.get(i).get(host).items(): - exсeptions.append(f"Binary {name} has inconsistent version {version} on host {host}") - exсeptions.append("\n") - return versions, exсeptions + return versions_by_host def _parse_version(version_output: str) -> str: From cb31d41f15c2ff54dd2efe4d6985e365e5bfaffe Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Tue, 18 Jun 2024 13:37:07 +0300 Subject: [PATCH 253/363] [#247] Use TestFiles which automatically deletes itself Signed-off-by: a.berezin --- src/frostfs_testlib/s3/aws_cli_client.py | 211 +++++--------------- src/frostfs_testlib/s3/boto3_client.py | 126 +++--------- src/frostfs_testlib/s3/interfaces.py | 84 ++++---- src/frostfs_testlib/steps/cli/object.py | 17 +- src/frostfs_testlib/steps/http/http_gate.py | 31 ++- src/frostfs_testlib/utils/file_utils.py | 86 ++++++-- 6 files changed, 209 insertions(+), 346 deletions(-) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 3bf335e..f6488f5 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -14,6 +14,7 @@ from frostfs_testlib.shell.local_shell import LocalShell # TODO: Refactor this code to use shell instead of _cmd_run from frostfs_testlib.utils.cli_utils import _configure_aws_cli +from frostfs_testlib.utils.file_utils import TestFile logger = logging.getLogger("NeoLogger") command_options = CommandOptions(timeout=480) @@ -153,8 +154,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get bucket acl") def get_bucket_acl(self, bucket: str) -> list: cmd = ( - f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -172,10 +172,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("List objects S3") def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - cmd = ( - f"aws {self.common_flags} s3api list-objects --bucket {bucket} " - f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" - ) + cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -319,18 +316,18 @@ class AwsCliClient(S3ClientWrapper): version_id: Optional[str] = None, object_range: Optional[tuple[int, int]] = None, full_output: bool = False, - ) -> Union[dict, str]: - file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) + ) -> dict | TestFile: + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))) version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api get-object --bucket {bucket} --key {key} " - f"{version} {file_path} --endpoint {self.s3gate_endpoint} --profile {self.profile}" + f"{version} {test_file} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) if object_range: cmd += f" --range bytes={object_range[0]}-{object_range[1]}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - return response if full_output else file_path + return response if full_output else test_file @reporter.step("Get object ACL") def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: @@ -583,7 +580,7 @@ class AwsCliClient(S3ClientWrapper): self.local_shell.exec(cmd) @reporter.step("Put object tagging") - def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = '') -> None: + def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = "") -> None: tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tagging = {"TagSet": tags} version = f" --version-id {version_id}" if version_id else "" @@ -622,8 +619,7 @@ class AwsCliClient(S3ClientWrapper): metadata: Optional[dict] = None, ) -> dict: cmd = ( - f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " - f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" ) if metadata: cmd += " --metadata" @@ -779,9 +775,7 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Adds the specified user to the specified group") def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam add-user-to-group --user-name {user_name} --group-name {group_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam add-user-to-group --user-name {user_name} --group-name {group_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -789,12 +783,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Attaches the specified managed policy to the specified IAM group") def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam attach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam attach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -803,12 +794,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Attaches the specified managed policy to the specified user") def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam attach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam attach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -817,12 +805,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Creates a new AWS secret access key and access key ID for the specified user") def iam_create_access_key(self, user_name: Optional[str] = None) -> dict: - cmd = ( - f"aws {self.common_flags} iam create-access-key --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam create-access-key --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" if user_name: @@ -837,12 +822,9 @@ class AwsCliClient(S3ClientWrapper): return access_key_id, secret_access_key - @reporter.step("Creates a new group") def iam_create_group(self, group_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam create-group --group-name {group_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam create-group --group-name {group_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -853,7 +835,6 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Creates a new managed policy for your AWS account") def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: cmd = ( @@ -871,12 +852,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Creates a new IAM user for your AWS account") def iam_create_user(self, user_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam create-user --user-name {user_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam create-user --user-name {user_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -887,12 +865,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Deletes the access key pair associated with the specified IAM user") def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam delete-access-key --access-key-id {access_key_id} --user-name {user_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam delete-access-key --access-key-id {access_key_id} --user-name {user_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" @@ -901,12 +876,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Deletes the specified IAM group") def iam_delete_group(self, group_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam delete-group --group-name {group_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam delete-group --group-name {group_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -914,12 +886,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group") def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam delete-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam delete-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -927,12 +896,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Deletes the specified managed policy") def iam_delete_policy(self, policy_arn: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam delete-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam delete-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -940,26 +906,19 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Deletes the specified IAM user") def iam_delete_user(self, user_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam delete-user --user-name {user_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam delete-user --user-name {user_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - return response - @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user") def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam delete-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam delete-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -967,12 +926,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Removes the specified managed policy from the specified IAM group") def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam detach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam detach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -981,12 +937,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Removes the specified managed policy from the specified user") def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam detach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam detach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -995,12 +948,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Returns a list of IAM users that are in the specified IAM group") def iam_get_group(self, group_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam get-group --group-name {group_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam get-group --group-name {group_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1011,12 +961,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group") def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam get-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam get-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1024,12 +971,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Retrieves information about the specified managed policy") def iam_get_policy(self, policy_arn: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam get-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam get-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1040,12 +984,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Retrieves information about the specified version of the specified managed policy") def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam get-policy-version --policy-arn {policy_arn} --version-id {version_id} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam get-policy-version --policy-arn {policy_arn} --version-id {version_id} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1056,12 +997,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Retrieves information about the specified IAM user") def iam_get_user(self, user_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam get-user --user-name {user_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam get-user --user-name {user_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1072,12 +1010,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user") def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam get-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam get-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1087,12 +1022,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Returns information about the access key IDs associated with the specified IAM user") def iam_list_access_keys(self, user_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam list-access-keys --user-name {user_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam list-access-keys --user-name {user_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1100,12 +1032,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Lists all managed policies that are attached to the specified IAM group") def iam_list_attached_group_policies(self, group_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam list-attached-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam list-attached-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1115,12 +1044,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Lists all managed policies that are attached to the specified IAM user") def iam_list_attached_user_policies(self, user_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam list-attached-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam list-attached-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1130,12 +1056,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") def iam_list_entities_for_policy(self, policy_arn: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam list-entities-for-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam list-entities-for-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1146,12 +1069,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group") def iam_list_group_policies(self, group_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam list-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam list-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1161,12 +1081,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Lists the IAM groups") def iam_list_groups(self) -> dict: - cmd = ( - f"aws {self.common_flags} iam list-groups --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam list-groups --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1176,12 +1093,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Lists the IAM groups that the specified IAM user belongs to") def iam_list_groups_for_user(self, user_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam list-groups-for-user --user-name {user_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam list-groups-for-user --user-name {user_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1191,27 +1105,21 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Lists all the managed policies that are available in your AWS account") def iam_list_policies(self) -> dict: - cmd = ( - f"aws {self.common_flags} iam list-policies --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam list-policies --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert 'Policies' in response.keys(), f"Expected Policies in response:\n{response}" + assert "Policies" in response.keys(), f"Expected Policies in response:\n{response}" return response - @reporter.step("Lists information about the versions of the specified managed policy") def iam_list_policy_versions(self, policy_arn: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam list-policy-versions --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam list-policy-versions --policy-arn {policy_arn} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1221,12 +1129,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Lists the names of the inline policies embedded in the specified IAM user") def iam_list_user_policies(self, user_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam list-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam list-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1236,12 +1141,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Lists the IAM users") def iam_list_users(self) -> dict: - cmd = ( - f"aws {self.common_flags} iam list-users --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam list-users --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout @@ -1251,12 +1153,11 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group") def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: cmd = ( f"aws {self.common_flags} iam put-group-policy --endpoint {self.iam_endpoint}" - f" --group-name {group_name} --policy-name {policy_name} --policy-document \'{json.dumps(policy_document)}\'" + f" --group-name {group_name} --policy-name {policy_name} --policy-document '{json.dumps(policy_document)}'" ) if self.profile: cmd += f" --profile {self.profile}" @@ -1266,12 +1167,11 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: cmd = ( f"aws {self.common_flags} iam put-user-policy --endpoint {self.iam_endpoint}" - f" --user-name {user_name} --policy-name {policy_name} --policy-document \'{json.dumps(policy_document)}\'" + f" --user-name {user_name} --policy-name {policy_name} --policy-document '{json.dumps(policy_document)}'" ) if self.profile: cmd += f" --profile {self.profile}" @@ -1282,7 +1182,6 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Removes the specified user from the specified group") def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: cmd = ( @@ -1296,12 +1195,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Updates the name and/or the path of the specified IAM group") def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: - cmd = ( - f"aws {self.common_flags} iam update-group --group-name {group_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam update-group --group-name {group_name} --endpoint {self.iam_endpoint}" if new_name: cmd += f" --new-group-name {new_name}" if new_path: @@ -1314,12 +1210,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Updates the name and/or the path of the specified IAM user") def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: - cmd = ( - f"aws {self.common_flags} iam update-user --user-name {user_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam update-user --user-name {user_name} --endpoint {self.iam_endpoint}" if new_name: cmd += f" --new-user-name {new_name}" if new_path: @@ -1346,12 +1239,9 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("List tags of IAM user") def iam_list_user_tags(self, user_name: str) -> dict: - cmd = ( - f"aws {self.common_flags} iam list-user-tags --user-name {user_name} --endpoint {self.iam_endpoint}" - ) + cmd = f"aws {self.common_flags} iam list-user-tags --user-name {user_name} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" @@ -1360,13 +1250,10 @@ class AwsCliClient(S3ClientWrapper): return response - @reporter.step("Removes the specified tags from the user") def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: - tag_keys_joined = ' '.join(tag_keys) - cmd = ( - f"aws {self.common_flags} iam untag-user --user-name {user_name} --tag-keys {tag_keys_joined} --endpoint {self.iam_endpoint}" - ) + tag_keys_joined = " ".join(tag_keys) + cmd = f"aws {self.common_flags} iam untag-user --user-name {user_name} --tag-keys {tag_keys_joined} --endpoint {self.iam_endpoint}" if self.profile: cmd += f" --profile {self.profile}" @@ -1374,5 +1261,3 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response - - diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index bed316b..bdf7a9f 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -16,10 +16,10 @@ from mypy_boto3_s3 import S3Client from frostfs_testlib import reporter from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict -from frostfs_testlib.utils.cli_utils import log_command_execution # TODO: Refactor this code to use shell instead of _cmd_run -from frostfs_testlib.utils.cli_utils import _configure_aws_cli +from frostfs_testlib.utils.cli_utils import _configure_aws_cli, log_command_execution +from frostfs_testlib.utils.file_utils import TestFile logger = logging.getLogger("NeoLogger") @@ -80,7 +80,6 @@ class Boto3ClientWrapper(S3ClientWrapper): verify=False, ) - @reporter.step("Set endpoint IAM to {iam_endpoint}") def set_iam_endpoint(self, iam_endpoint: str): self.boto3_iam_client = self.session.client( @@ -88,8 +87,8 @@ class Boto3ClientWrapper(S3ClientWrapper): aws_access_key_id=self.access_key_id, aws_secret_access_key=self.secret_access_key, endpoint_url=iam_endpoint, - verify=False,) - + verify=False, + ) def _to_s3_param(self, param: str): replacement_map = { @@ -167,9 +166,7 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("Put bucket versioning status") @report_error def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: - response = self.boto3_client.put_bucket_versioning( - Bucket=bucket, VersioningConfiguration={"Status": status.value} - ) + response = self.boto3_client.put_bucket_versioning(Bucket=bucket, VersioningConfiguration={"Status": status.value}) log_command_execution("S3 Set bucket versioning to", response) @reporter.step("Get bucket versioning status") @@ -217,11 +214,7 @@ class Boto3ClientWrapper(S3ClientWrapper): grant_write: Optional[str] = None, grant_read: Optional[str] = None, ) -> None: - params = { - self._to_s3_param(param): value - for param, value in locals().items() - if param not in ["self"] and value is not None - } + params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} response = self.boto3_client.put_bucket_acl(**params) log_command_execution("S3 ACL bucket result", response) @@ -360,11 +353,7 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("Head object S3") @report_error def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: - params = { - self._to_s3_param(param): value - for param, value in locals().items() - if param not in ["self"] and value is not None - } + params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} response = self.boto3_client.head_object(**params) log_command_execution("S3 Head object result", response) return response @@ -372,11 +361,7 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("Delete object S3") @report_error def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: - params = { - self._to_s3_param(param): value - for param, value in locals().items() - if param not in ["self"] and value is not None - } + params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} response = self.boto3_client.delete_object(**params) log_command_execution("S3 Delete object result", response) sleep(S3_SYNC_WAIT_TIME) @@ -415,9 +400,7 @@ class Boto3ClientWrapper(S3ClientWrapper): def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: # Delete objects without creating delete markers for object_version in object_versions: - response = self.boto3_client.delete_object( - Bucket=bucket, Key=object_version["Key"], VersionId=object_version["VersionId"] - ) + response = self.boto3_client.delete_object(Bucket=bucket, Key=object_version["Key"], VersionId=object_version["VersionId"]) log_command_execution("S3 Delete object result", response) @reporter.step("Put object ACL") @@ -436,11 +419,7 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("Get object ACL") @report_error def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: - params = { - self._to_s3_param(param): value - for param, value in locals().items() - if param not in ["self"] and value is not None - } + params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} response = self.boto3_client.get_object_acl(**params) log_command_execution("S3 ACL objects result", response) return response.get("Grants") @@ -483,8 +462,7 @@ class Boto3ClientWrapper(S3ClientWrapper): version_id: Optional[str] = None, object_range: Optional[tuple[int, int]] = None, full_output: bool = False, - ) -> Union[dict, str]: - filename = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) + ) -> dict | TestFile: range_str = None if object_range: range_str = f"bytes={object_range[0]}-{object_range[1]}" @@ -497,12 +475,16 @@ class Boto3ClientWrapper(S3ClientWrapper): response = self.boto3_client.get_object(**params) log_command_execution("S3 Get objects result", response) - with open(f"{filename}", "wb") as get_file: + if full_output: + return response + + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))) + with open(test_file, "wb") as file: chunk = response["Body"].read(1024) while chunk: - get_file.write(chunk) + file.write(chunk) chunk = response["Body"].read(1024) - return response if full_output else filename + return test_file @reporter.step("Create multipart upload S3") @report_error @@ -573,9 +555,7 @@ class Boto3ClientWrapper(S3ClientWrapper): @report_error def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts] - response = self.boto3_client.complete_multipart_upload( - Bucket=bucket, Key=key, UploadId=upload_id, MultipartUpload={"Parts": parts} - ) + response = self.boto3_client.complete_multipart_upload(Bucket=bucket, Key=key, UploadId=upload_id, MultipartUpload={"Parts": parts}) log_command_execution("S3 Complete multipart upload", response) return response @@ -590,11 +570,7 @@ class Boto3ClientWrapper(S3ClientWrapper): version_id: Optional[str] = None, bypass_governance_retention: Optional[bool] = None, ) -> None: - params = { - self._to_s3_param(param): value - for param, value in locals().items() - if param not in ["self"] and value is not None - } + params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} response = self.boto3_client.put_object_retention(**params) log_command_execution("S3 Put object retention ", response) @@ -618,7 +594,7 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("Put object tagging") @report_error - def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = '') -> None: + def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = "") -> None: tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tagging = {"TagSet": tags} response = self.boto3_client.put_object_tagging(Bucket=bucket, Key=key, Tagging=tagging, VersionId=version_id) @@ -627,11 +603,7 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("Get object tagging") @report_error def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: - params = { - self._to_s3_param(param): value - for param, value in locals().items() - if param not in ["self"] and value is not None - } + params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} response = self.boto3_client.get_object_tagging(**params) log_command_execution("S3 Get object tagging", response) return response.get("TagSet") @@ -681,7 +653,6 @@ class Boto3ClientWrapper(S3ClientWrapper): # END OBJECT METHODS # - # IAM METHODS # # Some methods don't have checks because boto3 is silent in some cases (delete, attach, etc.) @@ -690,21 +661,18 @@ class Boto3ClientWrapper(S3ClientWrapper): response = self.boto3_iam_client.add_user_to_group(UserName=user_name, GroupName=group_name) return response - @reporter.step("Attaches the specified managed policy to the specified IAM group") def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: response = self.boto3_iam_client.attach_group_policy(GroupName=group_name, PolicyArn=policy_arn) sleep(S3_SYNC_WAIT_TIME * 10) return response - @reporter.step("Attaches the specified managed policy to the specified user") def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: response = self.boto3_iam_client.attach_user_policy(UserName=user_name, PolicyArn=policy_arn) sleep(S3_SYNC_WAIT_TIME * 10) return response - @reporter.step("Creates a new AWS secret access key and access key ID for the specified user") def iam_create_access_key(self, user_name: str) -> dict: response = self.boto3_iam_client.create_access_key(UserName=user_name) @@ -716,7 +684,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return access_key_id, secret_access_key - @reporter.step("Creates a new group") def iam_create_group(self, group_name: str) -> dict: response = self.boto3_iam_client.create_group(GroupName=group_name) @@ -725,7 +692,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Creates a new managed policy for your AWS account") def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: response = self.boto3_iam_client.create_policy(PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)) @@ -734,7 +700,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Creates a new IAM user for your AWS account") def iam_create_user(self, user_name: str) -> dict: response = self.boto3_iam_client.create_user(UserName=user_name) @@ -743,57 +708,48 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Deletes the access key pair associated with the specified IAM user") def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: response = self.boto3_iam_client.delete_access_key(AccessKeyId=access_key_id, UserName=user_name) return response - @reporter.step("Deletes the specified IAM group") def iam_delete_group(self, group_name: str) -> dict: response = self.boto3_iam_client.delete_group(GroupName=group_name) return response - @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group") def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: response = self.boto3_iam_client.delete_group_policy(GroupName=group_name, PolicyName=policy_name) return response - @reporter.step("Deletes the specified managed policy") def iam_delete_policy(self, policy_arn: str) -> dict: response = self.boto3_iam_client.delete_policy(PolicyArn=policy_arn) return response - @reporter.step("Deletes the specified IAM user") def iam_delete_user(self, user_name: str) -> dict: response = self.boto3_iam_client.delete_user(UserName=user_name) return response - @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user") def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: response = self.boto3_iam_client.delete_user_policy(UserName=user_name, PolicyName=policy_name) return response - @reporter.step("Removes the specified managed policy from the specified IAM group") def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: response = self.boto3_iam_client.detach_group_policy(GroupName=group_name, PolicyArn=policy_arn) sleep(S3_SYNC_WAIT_TIME * 10) return response - @reporter.step("Removes the specified managed policy from the specified user") def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: response = self.boto3_iam_client.detach_user_policy(UserName=user_name, PolicyArn=policy_arn) sleep(S3_SYNC_WAIT_TIME * 10) return response - @reporter.step("Returns a list of IAM users that are in the specified IAM group") def iam_get_group(self, group_name: str) -> dict: response = self.boto3_iam_client.get_group(GroupName=group_name) @@ -801,14 +757,12 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group") def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: response = self.boto3_iam_client.get_group_policy(GroupName=group_name, PolicyName=policy_name) return response - @reporter.step("Retrieves information about the specified managed policy") def iam_get_policy(self, policy_arn: str) -> dict: response = self.boto3_iam_client.get_policy(PolicyArn=policy_arn) @@ -817,7 +771,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Retrieves information about the specified version of the specified managed policy") def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: response = self.boto3_iam_client.get_policy_version(PolicyArn=policy_arn, VersionId=version_id) @@ -826,7 +779,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Retrieves information about the specified IAM user") def iam_get_user(self, user_name: str) -> dict: response = self.boto3_iam_client.get_user(UserName=user_name) @@ -835,7 +787,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user") def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: response = self.boto3_iam_client.get_user_policy(UserName=user_name, PolicyName=policy_name) @@ -843,14 +794,12 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Returns information about the access key IDs associated with the specified IAM user") def iam_list_access_keys(self, user_name: str) -> dict: response = self.boto3_iam_client.list_access_keys(UserName=user_name) return response - @reporter.step("Lists all managed policies that are attached to the specified IAM group") def iam_list_attached_group_policies(self, group_name: str) -> dict: response = self.boto3_iam_client.list_attached_group_policies(GroupName=group_name) @@ -858,7 +807,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Lists all managed policies that are attached to the specified IAM user") def iam_list_attached_user_policies(self, user_name: str) -> dict: response = self.boto3_iam_client.list_attached_user_policies(UserName=user_name) @@ -866,7 +814,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") def iam_list_entities_for_policy(self, policy_arn: str) -> dict: response = self.boto3_iam_client.list_entities_for_policy(PolicyArn=policy_arn) @@ -876,7 +823,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group") def iam_list_group_policies(self, group_name: str) -> dict: response = self.boto3_iam_client.list_group_policies(GroupName=group_name) @@ -884,7 +830,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Lists the IAM groups") def iam_list_groups(self) -> dict: response = self.boto3_iam_client.list_groups() @@ -892,7 +837,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Lists the IAM groups that the specified IAM user belongs to") def iam_list_groups_for_user(self, user_name: str) -> dict: response = self.boto3_iam_client.list_groups_for_user(UserName=user_name) @@ -900,7 +844,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Lists all the managed policies that are available in your AWS account") def iam_list_policies(self) -> dict: response = self.boto3_iam_client.list_policies() @@ -908,7 +851,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Lists information about the versions of the specified managed policy") def iam_list_policy_versions(self, policy_arn: str) -> dict: response = self.boto3_iam_client.list_policy_versions(PolicyArn=policy_arn) @@ -916,7 +858,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Lists the names of the inline policies embedded in the specified IAM user") def iam_list_user_policies(self, user_name: str) -> dict: response = self.boto3_iam_client.list_user_policies(UserName=user_name) @@ -924,7 +865,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Lists the IAM users") def iam_list_users(self) -> dict: response = self.boto3_iam_client.list_users() @@ -932,54 +872,50 @@ class Boto3ClientWrapper(S3ClientWrapper): return response - @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group") def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: - response = self.boto3_iam_client.put_group_policy(GroupName=group_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)) + response = self.boto3_iam_client.put_group_policy( + GroupName=group_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document) + ) sleep(S3_SYNC_WAIT_TIME * 10) return response - @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: - response = self.boto3_iam_client.put_user_policy(UserName=user_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)) + response = self.boto3_iam_client.put_user_policy( + UserName=user_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document) + ) sleep(S3_SYNC_WAIT_TIME * 10) return response - @reporter.step("Removes the specified user from the specified group") def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: response = self.boto3_iam_client.remove_user_from_group(GroupName=group_name, UserName=user_name) return response - @reporter.step("Updates the name and/or the path of the specified IAM group") def iam_update_group(self, group_name: str, new_name: str, new_path: Optional[str] = None) -> dict: - response = self.boto3_iam_client.update_group(GroupName=group_name, NewGroupName=new_name, NewPath='/') + response = self.boto3_iam_client.update_group(GroupName=group_name, NewGroupName=new_name, NewPath="/") return response - @reporter.step("Updates the name and/or the path of the specified IAM user") def iam_update_user(self, user_name: str, new_name: str, new_path: Optional[str] = None) -> dict: - response = self.boto3_iam_client.update_user(UserName=user_name, NewUserName=new_name, NewPath='/') + response = self.boto3_iam_client.update_user(UserName=user_name, NewUserName=new_name, NewPath="/") return response - @reporter.step("Adds one or more tags to an IAM user") def iam_tag_user(self, user_name: str, tags: list) -> dict: tags_json = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] response = self.boto3_iam_client.tag_user(UserName=user_name, Tags=tags_json) return response - @reporter.step("List tags of IAM user") def iam_list_user_tags(self, user_name: str) -> dict: response = self.boto3_iam_client.list_user_tags(UserName=user_name) return response - @reporter.step("Removes the specified tags from the user") def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: response = self.boto3_iam_client.untag_user(UserName=user_name, TagKeys=tag_keys) - return response \ No newline at end of file + return response diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index 651be7a..f3793e0 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -4,6 +4,7 @@ from typing import Literal, Optional, Union from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.testing.readable import HumanReadableABC, HumanReadableEnum +from frostfs_testlib.utils.file_utils import TestFile def _make_objs_dict(key_names): @@ -289,7 +290,7 @@ class S3ClientWrapper(HumanReadableABC): version_id: Optional[str] = None, object_range: Optional[tuple[int, int]] = None, full_output: bool = False, - ) -> Union[dict, str]: + ) -> dict | TestFile: """Retrieves objects from S3.""" @abstractmethod @@ -400,165 +401,164 @@ class S3ClientWrapper(HumanReadableABC): # END OF OBJECT METHODS # - # IAM METHODS # @abstractmethod def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: - '''Adds the specified user to the specified group''' + """Adds the specified user to the specified group""" @abstractmethod def iam_attach_group_policy(self, group: str, policy_arn: str) -> dict: - '''Attaches the specified managed policy to the specified IAM group''' + """Attaches the specified managed policy to the specified IAM group""" @abstractmethod def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: - '''Attaches the specified managed policy to the specified user''' + """Attaches the specified managed policy to the specified user""" @abstractmethod def iam_create_access_key(self, user_name: str) -> dict: - '''Creates a new AWS secret access key and access key ID for the specified user''' + """Creates a new AWS secret access key and access key ID for the specified user""" @abstractmethod def iam_create_group(self, group_name: str) -> dict: - '''Creates a new group''' + """Creates a new group""" @abstractmethod def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: - '''Creates a new managed policy for your AWS account''' + """Creates a new managed policy for your AWS account""" @abstractmethod def iam_create_user(self, user_name: str) -> dict: - '''Creates a new IAM user for your AWS account''' + """Creates a new IAM user for your AWS account""" @abstractmethod def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: - '''Deletes the access key pair associated with the specified IAM user''' + """Deletes the access key pair associated with the specified IAM user""" @abstractmethod def iam_delete_group(self, group_name: str) -> dict: - '''Deletes the specified IAM group''' + """Deletes the specified IAM group""" @abstractmethod def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: - '''Deletes the specified inline policy that is embedded in the specified IAM group''' + """Deletes the specified inline policy that is embedded in the specified IAM group""" @abstractmethod def iam_delete_policy(self, policy_arn: str) -> dict: - '''Deletes the specified managed policy''' + """Deletes the specified managed policy""" @abstractmethod def iam_delete_user(self, user_name: str) -> dict: - '''Deletes the specified IAM user''' + """Deletes the specified IAM user""" @abstractmethod def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: - '''Deletes the specified inline policy that is embedded in the specified IAM user''' + """Deletes the specified inline policy that is embedded in the specified IAM user""" @abstractmethod def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: - '''Removes the specified managed policy from the specified IAM group''' + """Removes the specified managed policy from the specified IAM group""" @abstractmethod def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: - '''Removes the specified managed policy from the specified user''' + """Removes the specified managed policy from the specified user""" @abstractmethod def iam_get_group(self, group_name: str) -> dict: - '''Returns a list of IAM users that are in the specified IAM group''' + """Returns a list of IAM users that are in the specified IAM group""" @abstractmethod def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: - '''Retrieves the specified inline policy document that is embedded in the specified IAM group''' + """Retrieves the specified inline policy document that is embedded in the specified IAM group""" @abstractmethod def iam_get_policy(self, policy_arn: str) -> dict: - '''Retrieves information about the specified managed policy''' + """Retrieves information about the specified managed policy""" @abstractmethod def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: - '''Retrieves information about the specified version of the specified managed policy''' + """Retrieves information about the specified version of the specified managed policy""" @abstractmethod def iam_get_user(self, user_name: str) -> dict: - '''Retrieves information about the specified IAM user''' + """Retrieves information about the specified IAM user""" @abstractmethod def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: - '''Retrieves the specified inline policy document that is embedded in the specified IAM user''' + """Retrieves the specified inline policy document that is embedded in the specified IAM user""" @abstractmethod def iam_list_access_keys(self, user_name: str) -> dict: - '''Returns information about the access key IDs associated with the specified IAM user''' + """Returns information about the access key IDs associated with the specified IAM user""" @abstractmethod def iam_list_attached_group_policies(self, group_name: str) -> dict: - '''Lists all managed policies that are attached to the specified IAM group''' + """Lists all managed policies that are attached to the specified IAM group""" @abstractmethod def iam_list_attached_user_policies(self, user_name: str) -> dict: - '''Lists all managed policies that are attached to the specified IAM user''' + """Lists all managed policies that are attached to the specified IAM user""" @abstractmethod def iam_list_entities_for_policy(self, policy_arn: str) -> dict: - '''Lists all IAM users, groups, and roles that the specified managed policy is attached to''' + """Lists all IAM users, groups, and roles that the specified managed policy is attached to""" @abstractmethod def iam_list_group_policies(self, group_name: str) -> dict: - '''Lists the names of the inline policies that are embedded in the specified IAM group''' + """Lists the names of the inline policies that are embedded in the specified IAM group""" @abstractmethod def iam_list_groups(self) -> dict: - '''Lists the IAM groups''' + """Lists the IAM groups""" @abstractmethod def iam_list_groups_for_user(self, user_name: str) -> dict: - '''Lists the IAM groups that the specified IAM user belongs to''' + """Lists the IAM groups that the specified IAM user belongs to""" @abstractmethod def iam_list_policies(self) -> dict: - '''Lists all the managed policies that are available in your AWS account''' + """Lists all the managed policies that are available in your AWS account""" @abstractmethod def iam_list_policy_versions(self, policy_arn: str) -> dict: - '''Lists information about the versions of the specified managed policy''' + """Lists information about the versions of the specified managed policy""" @abstractmethod def iam_list_user_policies(self, user_name: str) -> dict: - '''Lists the names of the inline policies embedded in the specified IAM user''' + """Lists the names of the inline policies embedded in the specified IAM user""" @abstractmethod def iam_list_users(self) -> dict: - '''Lists the IAM users''' + """Lists the IAM users""" @abstractmethod def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: - '''Adds or updates an inline policy document that is embedded in the specified IAM group''' + """Adds or updates an inline policy document that is embedded in the specified IAM group""" @abstractmethod def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: - '''Adds or updates an inline policy document that is embedded in the specified IAM user''' + """Adds or updates an inline policy document that is embedded in the specified IAM user""" @abstractmethod def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: - '''Removes the specified user from the specified group''' + """Removes the specified user from the specified group""" @abstractmethod def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: - '''Updates the name and/or the path of the specified IAM group''' + """Updates the name and/or the path of the specified IAM group""" @abstractmethod def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict: - '''Updates the name and/or the path of the specified IAM user''' + """Updates the name and/or the path of the specified IAM user""" @abstractmethod def iam_tag_user(self, user_name: str, tags: list) -> dict: - '''Adds one or more tags to an IAM user''' + """Adds one or more tags to an IAM user""" @abstractmethod def iam_list_user_tags(self, user_name: str) -> dict: - '''List tags of IAM user''' + """List tags of IAM user""" @abstractmethod def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: - '''Removes the specified tags from the user''' \ No newline at end of file + """Removes the specified tags from the user""" diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index 3e0806c..b84a3a2 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -16,6 +16,7 @@ from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing import wait_for_success from frostfs_testlib.utils import json_utils from frostfs_testlib.utils.cli_utils import parse_cmd_table, parse_netmap_output +from frostfs_testlib.utils.file_utils import TestFile logger = logging.getLogger("NeoLogger") @@ -81,7 +82,7 @@ def get_object( no_progress: bool = True, session: Optional[str] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, -) -> str: +) -> TestFile: """ GET from FrostFS. @@ -103,14 +104,14 @@ def get_object( if not write_object: write_object = str(uuid.uuid4()) - file_path = os.path.join(ASSETS_DIR, write_object) + test_file = TestFile(os.path.join(ASSETS_DIR, write_object)) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli.object.get( rpc_endpoint=endpoint, cid=cid, oid=oid, - file=file_path, + file=test_file, bearer=bearer, no_progress=no_progress, xhdr=xhdr, @@ -118,7 +119,7 @@ def get_object( timeout=timeout, ) - return file_path + return test_file @reporter.step("Get Range Hash from {endpoint}") @@ -357,7 +358,7 @@ def get_range( Returns: (str, bytes) - path to the file with range content and content of this file as bytes """ - range_file_path = os.path.join(ASSETS_DIR, str(uuid.uuid4())) + test_file = TestFile(os.path.join(ASSETS_DIR, str(uuid.uuid4()))) cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) cli.object.range( @@ -365,16 +366,16 @@ def get_range( cid=cid, oid=oid, range=range_cut, - file=range_file_path, + file=test_file, bearer=bearer, xhdr=xhdr, session=session, timeout=timeout, ) - with open(range_file_path, "rb") as file: + with open(test_file, "rb") as file: content = file.read() - return range_file_path, content + return test_file, content @reporter.step("Lock Object") diff --git a/src/frostfs_testlib/steps/http/http_gate.py b/src/frostfs_testlib/steps/http/http_gate.py index 373283f..117cded 100644 --- a/src/frostfs_testlib/steps/http/http_gate.py +++ b/src/frostfs_testlib/steps/http/http_gate.py @@ -12,7 +12,7 @@ import requests from frostfs_testlib import reporter from frostfs_testlib.cli import GenericCli -from frostfs_testlib.resources.common import SIMPLE_OBJECT_SIZE +from frostfs_testlib.resources.common import ASSETS_DIR, SIMPLE_OBJECT_SIZE from frostfs_testlib.s3.aws_cli_client import command_options from frostfs_testlib.shell import Shell from frostfs_testlib.shell.local_shell import LocalShell @@ -20,11 +20,10 @@ from frostfs_testlib.steps.cli.object import get_object from frostfs_testlib.steps.storage_policy import get_nodes_without_object from frostfs_testlib.storage.cluster import ClusterNode, StorageNode from frostfs_testlib.testing.test_control import retry -from frostfs_testlib.utils.file_utils import get_file_hash +from frostfs_testlib.utils.file_utils import TestFile, get_file_hash logger = logging.getLogger("NeoLogger") -ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/") local_shell = LocalShell() @@ -64,10 +63,10 @@ def get_via_http_gate( logger.info(f"Request: {request}") _attach_allure_step(request, resp.status_code) - file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}") - with open(file_path, "wb") as file: + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}")) + with open(test_file, "wb") as file: shutil.copyfileobj(resp.raw, file) - return file_path + return test_file @reporter.step("Get via Zip HTTP Gate") @@ -93,11 +92,11 @@ def get_via_zip_http_gate(cid: str, prefix: str, node: ClusterNode, timeout: Opt logger.info(f"Request: {request}") _attach_allure_step(request, resp.status_code) - file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_archive.zip") - with open(file_path, "wb") as file: + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_archive.zip")) + with open(test_file, "wb") as file: shutil.copyfileobj(resp.raw, file) - with zipfile.ZipFile(file_path, "r") as zip_ref: + with zipfile.ZipFile(test_file, "r") as zip_ref: zip_ref.extractall(ASSETS_DIR) return os.path.join(os.getcwd(), ASSETS_DIR, prefix) @@ -140,10 +139,10 @@ def get_via_http_gate_by_attribute( logger.info(f"Request: {request}") _attach_allure_step(request, resp.status_code) - file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{str(uuid.uuid4())}") - with open(file_path, "wb") as file: + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{str(uuid.uuid4())}")) + with open(test_file, "wb") as file: shutil.copyfileobj(resp.raw, file) - return file_path + return test_file @reporter.step("Upload via HTTP Gate") @@ -239,7 +238,7 @@ def upload_via_http_gate_curl( @retry(max_attempts=3, sleep_interval=1) @reporter.step("Get via HTTP Gate using Curl") -def get_via_http_curl(cid: str, oid: str, node: ClusterNode) -> str: +def get_via_http_curl(cid: str, oid: str, node: ClusterNode) -> TestFile: """ This function gets given object from HTTP gate using curl utility. cid: CID to get object from @@ -247,12 +246,12 @@ def get_via_http_curl(cid: str, oid: str, node: ClusterNode) -> str: node: node for request """ request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}" - file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}") + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}")) curl = GenericCli("curl", node.host) - curl(f"-k ", f"{request} > {file_path}", shell=local_shell) + curl(f"-k ", f"{request} > {test_file}", shell=local_shell) - return file_path + return test_file def _attach_allure_step(request: str, status_code: int, req_type="GET"): diff --git a/src/frostfs_testlib/utils/file_utils.py b/src/frostfs_testlib/utils/file_utils.py index d238106..e01ce31 100644 --- a/src/frostfs_testlib/utils/file_utils.py +++ b/src/frostfs_testlib/utils/file_utils.py @@ -10,7 +10,39 @@ from frostfs_testlib.resources.common import ASSETS_DIR logger = logging.getLogger("NeoLogger") -def generate_file(size: int) -> str: +class TestFile(os.PathLike): + def __init__(self, path: str): + self.path = path + + def __del__(self): + logger.debug(f"Removing file {self.path}") + if os.path.exists(self.path): + os.remove(self.path) + + def __str__(self): + return self.path + + def __repr__(self): + return self.path + + def __fspath__(self): + return self.path + + +def ensure_directory(path): + directory = os.path.dirname(path) + + if not os.path.exists(directory): + os.makedirs(directory) + + +def ensure_directory_opener(path, flags): + ensure_directory(path) + return os.open(path, flags) + + +@reporter.step("Generate file with size {size}") +def generate_file(size: int) -> TestFile: """Generates a binary file with the specified size in bytes. Args: @@ -19,19 +51,20 @@ def generate_file(size: int) -> str: Returns: The path to the generated file. """ - file_path = os.path.join(ASSETS_DIR, str(uuid.uuid4())) - with open(file_path, "wb") as file: + test_file = TestFile(os.path.join(ASSETS_DIR, str(uuid.uuid4()))) + with open(test_file, "wb", opener=ensure_directory_opener) as file: file.write(os.urandom(size)) - logger.info(f"File with size {size} bytes has been generated: {file_path}") + logger.info(f"File with size {size} bytes has been generated: {test_file}") - return file_path + return test_file +@reporter.step("Generate file with content of size {size}") def generate_file_with_content( size: int, - file_path: Optional[str] = None, + file_path: Optional[str | TestFile] = None, content: Optional[str] = None, -) -> str: +) -> TestFile: """Creates a new file with specified content. Args: @@ -48,20 +81,22 @@ def generate_file_with_content( content = os.urandom(size) mode = "wb" + test_file = None if not file_path: - file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))) + elif isinstance(file_path, TestFile): + test_file = file_path else: - if not os.path.exists(os.path.dirname(file_path)): - os.makedirs(os.path.dirname(file_path)) + test_file = TestFile(file_path) - with open(file_path, mode) as file: + with open(test_file, mode, opener=ensure_directory_opener) as file: file.write(content) - return file_path + return test_file @reporter.step("Get File Hash") -def get_file_hash(file_path: str, len: Optional[int] = None, offset: Optional[int] = None) -> str: +def get_file_hash(file_path: str | TestFile, len: Optional[int] = None, offset: Optional[int] = None) -> str: """Generates hash for the specified file. Args: @@ -88,7 +123,7 @@ def get_file_hash(file_path: str, len: Optional[int] = None, offset: Optional[in @reporter.step("Concatenation set of files to one file") -def concat_files(file_paths: list, resulting_file_path: Optional[str] = None) -> str: +def concat_files(file_paths: list[str | TestFile], resulting_file_path: Optional[str | TestFile] = None) -> TestFile: """Concatenates several files into a single file. Args: @@ -98,16 +133,24 @@ def concat_files(file_paths: list, resulting_file_path: Optional[str] = None) -> Returns: Path to the resulting file. """ + + test_file = None if not resulting_file_path: - resulting_file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())) - with open(resulting_file_path, "wb") as f: + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))) + elif isinstance(resulting_file_path, TestFile): + test_file = resulting_file_path + else: + test_file = TestFile(resulting_file_path) + + with open(test_file, "wb", opener=ensure_directory_opener) as f: for file in file_paths: with open(file, "rb") as part_file: f.write(part_file.read()) - return resulting_file_path + return test_file -def split_file(file_path: str, parts: int) -> list[str]: +@reporter.step("Split file to {parts} parts") +def split_file(file_path: str | TestFile, parts: int) -> list[TestFile]: """Splits specified file into several specified number of parts. Each part is saved under name `{original_file}_part_{i}`. @@ -129,7 +172,7 @@ def split_file(file_path: str, parts: int) -> list[str]: part_file_paths = [] for content_offset in range(0, content_size + 1, chunk_size): part_file_name = f"{file_path}_part_{part_id}" - part_file_paths.append(part_file_name) + part_file_paths.append(TestFile(part_file_name)) with open(part_file_name, "wb") as out_file: out_file.write(content[content_offset : content_offset + chunk_size]) part_id += 1 @@ -137,9 +180,8 @@ def split_file(file_path: str, parts: int) -> list[str]: return part_file_paths -def get_file_content( - file_path: str, content_len: Optional[int] = None, mode: str = "r", offset: Optional[int] = None -) -> Any: +@reporter.step("Get file content") +def get_file_content(file_path: str | TestFile, content_len: Optional[int] = None, mode: str = "r", offset: Optional[int] = None) -> Any: """Returns content of specified file. Args: From f1b2fbd47bb8fed982ac3aae2a9065aa14618e5e Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Tue, 25 Jun 2024 02:31:14 +0300 Subject: [PATCH 254/363] [#250] Adjustments for tests optimization Signed-off-by: a.berezin --- src/frostfs_testlib/s3/aws_cli_client.py | 5 ----- src/frostfs_testlib/s3/boto3_client.py | 10 ++++------ src/frostfs_testlib/steps/s3/s3_helper.py | 1 - .../storage/controllers/cluster_state_controller.py | 6 ++++-- src/frostfs_testlib/testing/parallel.py | 6 ++++-- 5 files changed, 12 insertions(+), 16 deletions(-) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index f6488f5..3568037 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -91,7 +91,6 @@ class AwsCliClient(S3ClientWrapper): if location_constraint: cmd += f" --create-bucket-configuration LocationConstraint={location_constraint}" self.local_shell.exec(cmd) - sleep(S3_SYNC_WAIT_TIME) return bucket @@ -106,7 +105,6 @@ class AwsCliClient(S3ClientWrapper): def delete_bucket(self, bucket: str) -> None: cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" self.local_shell.exec(cmd, command_options) - sleep(S3_SYNC_WAIT_TIME) @reporter.step("Head bucket S3") def head_bucket(self, bucket: str) -> None: @@ -397,7 +395,6 @@ class AwsCliClient(S3ClientWrapper): ) output = self.local_shell.exec(cmd, command_options).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME) return response @reporter.step("Delete object S3") @@ -408,7 +405,6 @@ class AwsCliClient(S3ClientWrapper): f"--key {key} {version} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd, command_options).stdout - sleep(S3_SYNC_WAIT_TIME) return self._to_json(output) @reporter.step("Delete object versions S3") @@ -435,7 +431,6 @@ class AwsCliClient(S3ClientWrapper): f"--delete file://{file_path} --endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd, command_options).stdout - sleep(S3_SYNC_WAIT_TIME) return self._to_json(output) @reporter.step("Delete object versions S3 without delete markers") diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index bdf7a9f..a8a7828 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -134,7 +134,6 @@ class Boto3ClientWrapper(S3ClientWrapper): s3_bucket = self.boto3_client.create_bucket(**params) log_command_execution(f"Created S3 bucket {bucket}", s3_bucket) - sleep(S3_SYNC_WAIT_TIME) return bucket @reporter.step("List buckets S3") @@ -155,7 +154,6 @@ class Boto3ClientWrapper(S3ClientWrapper): def delete_bucket(self, bucket: str) -> None: response = self.boto3_client.delete_bucket(Bucket=bucket) log_command_execution("S3 Delete bucket result", response) - sleep(S3_SYNC_WAIT_TIME) @reporter.step("Head bucket S3") @report_error @@ -364,7 +362,6 @@ class Boto3ClientWrapper(S3ClientWrapper): params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} response = self.boto3_client.delete_object(**params) log_command_execution("S3 Delete object result", response) - sleep(S3_SYNC_WAIT_TIME) return response @reporter.step("Delete objects S3") @@ -375,7 +372,6 @@ class Boto3ClientWrapper(S3ClientWrapper): assert ( "Errors" not in response ), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}' - sleep(S3_SYNC_WAIT_TIME) return response @reporter.step("Delete object versions S3") @@ -413,8 +409,10 @@ class Boto3ClientWrapper(S3ClientWrapper): grant_write: Optional[str] = None, grant_read: Optional[str] = None, ) -> list: - # pytest.skip("Method put_object_acl is not supported by boto3 client") - raise NotImplementedError("Unsupported for boto3 client") + params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} + response = self.boto3_client.put_object_acl(**params) + log_command_execution("S3 put object ACL", response) + return response.get("Grants") @reporter.step("Get object ACL") @report_error diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index ab0cee3..9b85766 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -47,7 +47,6 @@ def set_bucket_versioning(s3_client: S3ClientWrapper, bucket: str, status: Versi if status == VersioningStatus.UNDEFINED: return - s3_client.get_bucket_versioning_status(bucket) s3_client.put_bucket_versioning(bucket, status=status) bucket_status = s3_client.get_bucket_versioning_status(bucket) assert bucket_status == status.value, f"Expected {bucket_status} status. Got {status.value}" diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 4003dfd..3c6c268 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -440,9 +440,11 @@ class ClusterStateController: self.await_node_status(status, wallet, cluster_node) @wait_for_success(80, 8, title="Wait for node status become {status}") - def await_node_status(self, status: NodeStatus, wallet: WalletInfo, cluster_node: ClusterNode): + def await_node_status(self, status: NodeStatus, wallet: WalletInfo, cluster_node: ClusterNode, checker_node: ClusterNode = None): frostfs_cli = FrostfsCli(self.shell, FROSTFS_CLI_EXEC, wallet.config_path) - netmap = NetmapParser.snapshot_all_nodes(frostfs_cli.netmap.snapshot(cluster_node.storage_node.get_rpc_endpoint()).stdout) + if not checker_node: + checker_node = cluster_node + netmap = NetmapParser.snapshot_all_nodes(frostfs_cli.netmap.snapshot(checker_node.storage_node.get_rpc_endpoint()).stdout) netmap = [node for node in netmap if cluster_node.host_ip == node.node] if status == NodeStatus.OFFLINE: assert cluster_node.host_ip not in netmap, f"{cluster_node.host_ip} not in Offline" diff --git a/src/frostfs_testlib/testing/parallel.py b/src/frostfs_testlib/testing/parallel.py index 1c30cec..9c36118 100644 --- a/src/frostfs_testlib/testing/parallel.py +++ b/src/frostfs_testlib/testing/parallel.py @@ -2,6 +2,8 @@ import itertools from concurrent.futures import Future, ThreadPoolExecutor from typing import Callable, Collection, Optional, Union +MAX_WORKERS = 50 + def parallel( fn: Union[Callable, list[Callable]], @@ -54,7 +56,7 @@ def _run_by_fn_list(fn_list: list[Callable], *args, **kwargs) -> list[Future]: futures: list[Future] = [] - with ThreadPoolExecutor(max_workers=len(fn_list)) as executor: + with ThreadPoolExecutor(max_workers=min(len(fn_list), MAX_WORKERS)) as executor: for fn in fn_list: task_args = _get_args(*args) task_kwargs = _get_kwargs(**kwargs) @@ -67,7 +69,7 @@ def _run_by_fn_list(fn_list: list[Callable], *args, **kwargs) -> list[Future]: def _run_by_items(fn: Callable, parallel_items: Collection, *args, **kwargs) -> list[Future]: futures: list[Future] = [] - with ThreadPoolExecutor(max_workers=len(parallel_items)) as executor: + with ThreadPoolExecutor(max_workers=min(len(parallel_items), MAX_WORKERS)) as executor: for item in parallel_items: task_args = _get_args(*args) task_kwargs = _get_kwargs(**kwargs) From da16f3c3a52707a1c7e9c30835694f778f6c3aec Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Fri, 21 Jun 2024 10:41:28 +0300 Subject: [PATCH 255/363] [#248] add metrics methods --- src/frostfs_testlib/steps/metrics.py | 45 +++++++++++++++++++ .../storage/dataclasses/metrics.py | 10 ++--- 2 files changed, 50 insertions(+), 5 deletions(-) create mode 100644 src/frostfs_testlib/steps/metrics.py diff --git a/src/frostfs_testlib/steps/metrics.py b/src/frostfs_testlib/steps/metrics.py new file mode 100644 index 0000000..d999171 --- /dev/null +++ b/src/frostfs_testlib/steps/metrics.py @@ -0,0 +1,45 @@ +import re + +from frostfs_testlib import reporter +from frostfs_testlib.testing.test_control import wait_for_success +from frostfs_testlib.storage.cluster import ClusterNode + + +@reporter.step("Check metrics result") +@wait_for_success(interval=10) +def check_metrics_counter( + cluster_nodes: list[ClusterNode], + operator: str = "==", + counter_exp: int = 0, + parse_from_command: bool = False, + **metrics_greps: str, +): + counter_act = 0 + for cluster_node in cluster_nodes: + counter_act += get_metrics_value(cluster_node, parse_from_command, **metrics_greps) + assert eval( + f"{counter_act} {operator} {counter_exp}" + ), f"Expected: {counter_exp}, Actual: {counter_act} in node: {cluster_node}" + + +@reporter.step("Get metrics value from node: {node}") +def get_metrics_value(node: ClusterNode, parse_from_command: bool = False, **metrics_greps: str): + try: + command_result = node.metrics.storage.get_metrics_search_by_greps(**metrics_greps) + if parse_from_command: + metrics_counter = calc_metrics_count_from_stdout(command_result.stdout, **metrics_greps) + else: + metrics_counter = calc_metrics_count_from_stdout(command_result.stdout) + except RuntimeError as e: + metrics_counter = 0 + + return metrics_counter + + +@reporter.step("Parse metrics count and calc sum of result") +def calc_metrics_count_from_stdout(metric_result_stdout: str, command: str = None): + if command: + result = re.findall(rf"{command}\s*([\d.e+-]+)", metric_result_stdout) + else: + result = re.findall(r"}\s*([\d.e+-]+)", metric_result_stdout) + return sum(map(lambda x: int(float(x)), result)) diff --git a/src/frostfs_testlib/storage/dataclasses/metrics.py b/src/frostfs_testlib/storage/dataclasses/metrics.py index c79dcf8..81e757c 100644 --- a/src/frostfs_testlib/storage/dataclasses/metrics.py +++ b/src/frostfs_testlib/storage/dataclasses/metrics.py @@ -16,11 +16,6 @@ class StorageMetrics: self.host = host self.metrics_endpoint = metrics_endpoint - def get_metric_container(self, metric: str, cid: str) -> CommandResult: - shell = self.host.get_shell() - result = shell.exec(f"curl -s {self.metrics_endpoint} | grep {metric} |grep {cid}") - return result - def get_metrics_search_by_greps(self, **greps) -> CommandResult: """ Get a metrics, search by: cid, metric_type, shard_id etc. @@ -34,3 +29,8 @@ class StorageMetrics: additional_greps = " |grep ".join([grep_command for grep_command in greps.values()]) result = shell.exec(f"curl -s {self.metrics_endpoint} | grep {additional_greps}") return result + + def get_all_metrics(self) -> CommandResult: + shell = self.host.get_shell() + result = shell.exec(f"curl -s {self.metrics_endpoint}") + return result From c9e4c2c7bbded6a745e981e75f7cd1d234e74b22 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Tue, 25 Jun 2024 18:56:10 +0300 Subject: [PATCH 256/363] [#251] Update get object nodes command call Signed-off-by: a.berezin --- src/frostfs_testlib/cli/frostfs_cli/object.py | 1 + src/frostfs_testlib/steps/cli/object.py | 25 +++++++++++-------- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py index 5d5bd91..55c92be 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/object.py +++ b/src/frostfs_testlib/cli/frostfs_cli/object.py @@ -362,6 +362,7 @@ class FrostfsCliObject(CliCommand): trace: bool = False, root: bool = False, verify_presence_all: bool = False, + json: bool = False, ttl: Optional[int] = None, xhdr: Optional[dict] = None, timeout: Optional[str] = None, diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index b84a3a2..7de7a71 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -722,21 +722,27 @@ def get_object_nodes( cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config) - result_object_nodes = cli.object.nodes( + response = cli.object.nodes( rpc_endpoint=endpoint, cid=cid, oid=oid, bearer=bearer, ttl=1 if is_direct else None, + json=True, xhdr=xhdr, timeout=timeout, verify_presence_all=verify_presence_all, ) - parsing_output = parse_cmd_table(result_object_nodes.stdout, "|") - list_object_nodes = [ - node for node in parsing_output if node["should_contain_object"] == "true" and node["actually_contains_object"] == "true" - ] + response_json = json.loads(response.stdout) + # Currently, the command will show expected and confirmed nodes. + # And we (currently) count only nodes which are both expected and confirmed + object_nodes_id = { + required_node + for data_object in response_json["data_objects"] + for required_node in data_object["required_nodes"] + if required_node in data_object["confirmed_nodes"] + } netmap_nodes_list = parse_netmap_output( cli.netmap.snapshot( @@ -745,14 +751,11 @@ def get_object_nodes( ).stdout ) netmap_nodes = [ - netmap_node - for object_node in list_object_nodes - for netmap_node in netmap_nodes_list - if object_node["node_id"] == netmap_node.node_id + netmap_node for object_node in object_nodes_id for netmap_node in netmap_nodes_list if object_node == netmap_node.node_id ] - result = [ + object_nodes = [ cluster_node for netmap_node in netmap_nodes for cluster_node in cluster.cluster_nodes if netmap_node.node == cluster_node.host_ip ] - return result + return object_nodes From 3a4204f2e4d9180b32f79a630d4d7ed48ef79657 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Fri, 28 Jun 2024 15:18:20 +0300 Subject: [PATCH 257/363] [#253] Update S3 clients and permission matrixes Signed-off-by: a.berezin --- src/frostfs_testlib/resources/s3_acl_grants.py | 6 +++--- src/frostfs_testlib/s3/aws_cli_client.py | 8 ++++---- src/frostfs_testlib/s3/boto3_client.py | 8 ++++---- src/frostfs_testlib/utils/file_utils.py | 11 ++++++++--- src/frostfs_testlib/utils/string_utils.py | 14 ++++++++++++++ 5 files changed, 33 insertions(+), 14 deletions(-) diff --git a/src/frostfs_testlib/resources/s3_acl_grants.py b/src/frostfs_testlib/resources/s3_acl_grants.py index 37005e8..a716bc5 100644 --- a/src/frostfs_testlib/resources/s3_acl_grants.py +++ b/src/frostfs_testlib/resources/s3_acl_grants.py @@ -4,6 +4,6 @@ ALL_USERS_GROUP_READ_GRANT = {"Grantee": {"Type": "Group", "URI": ALL_USERS_GROU CANONICAL_USER_FULL_CONTROL_GRANT = {"Grantee": {"Type": "CanonicalUser"}, "Permission": "FULL_CONTROL"} # https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl -PRIVATE_GRANTS = [CANONICAL_USER_FULL_CONTROL_GRANT] -PUBLIC_READ_GRANTS = [CANONICAL_USER_FULL_CONTROL_GRANT, ALL_USERS_GROUP_READ_GRANT] -PUBLIC_READ_WRITE_GRANTS = [CANONICAL_USER_FULL_CONTROL_GRANT, ALL_USERS_GROUP_WRITE_GRANT, ALL_USERS_GROUP_READ_GRANT] +PRIVATE_GRANTS = [] +PUBLIC_READ_GRANTS = [ALL_USERS_GROUP_READ_GRANT] +PUBLIC_READ_WRITE_GRANTS = [ALL_USERS_GROUP_WRITE_GRANT, ALL_USERS_GROUP_READ_GRANT] diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 3568037..ae9254c 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -1,7 +1,6 @@ import json import logging import os -import uuid from datetime import datetime from time import sleep from typing import Literal, Optional, Union @@ -11,6 +10,7 @@ from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, R from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.shell import CommandOptions from frostfs_testlib.shell.local_shell import LocalShell +from frostfs_testlib.utils import string_utils # TODO: Refactor this code to use shell instead of _cmd_run from frostfs_testlib.utils.cli_utils import _configure_aws_cli @@ -68,7 +68,7 @@ class AwsCliClient(S3ClientWrapper): location_constraint: Optional[str] = None, ) -> str: if bucket is None: - bucket = str(uuid.uuid4()) + bucket = string_utils.unique_name("bucket-") if object_lock_enabled_for_bucket is None: object_lock = "" @@ -229,7 +229,7 @@ class AwsCliClient(S3ClientWrapper): if bucket is None: bucket = source_bucket if key is None: - key = os.path.join(os.getcwd(), str(uuid.uuid4())) + key = string_utils.unique_name("copy-object-") copy_source = f"{source_bucket}/{source_key}" cmd = ( @@ -315,7 +315,7 @@ class AwsCliClient(S3ClientWrapper): object_range: Optional[tuple[int, int]] = None, full_output: bool = False, ) -> dict | TestFile: - test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))) + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, string_utils.unique_name("dl-object-"))) version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api get-object --bucket {bucket} --key {key} " diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index a8a7828..150570c 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -1,7 +1,6 @@ import json import logging import os -import uuid from datetime import datetime from functools import wraps from time import sleep @@ -16,6 +15,7 @@ from mypy_boto3_s3 import S3Client from frostfs_testlib import reporter from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict +from frostfs_testlib.utils import string_utils # TODO: Refactor this code to use shell instead of _cmd_run from frostfs_testlib.utils.cli_utils import _configure_aws_cli, log_command_execution @@ -115,7 +115,7 @@ class Boto3ClientWrapper(S3ClientWrapper): location_constraint: Optional[str] = None, ) -> str: if bucket is None: - bucket = str(uuid.uuid4()) + bucket = string_utils.unique_name("bucket-") params = {"Bucket": bucket} if object_lock_enabled_for_bucket is not None: @@ -439,7 +439,7 @@ class Boto3ClientWrapper(S3ClientWrapper): if bucket is None: bucket = source_bucket if key is None: - key = os.path.join(os.getcwd(), str(uuid.uuid4())) + key = string_utils.unique_name("copy-object-") copy_source = f"{source_bucket}/{source_key}" params = { @@ -476,7 +476,7 @@ class Boto3ClientWrapper(S3ClientWrapper): if full_output: return response - test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))) + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, string_utils.unique_name("dl-object-"))) with open(test_file, "wb") as file: chunk = response["Body"].read(1024) while chunk: diff --git a/src/frostfs_testlib/utils/file_utils.py b/src/frostfs_testlib/utils/file_utils.py index e01ce31..c2b497f 100644 --- a/src/frostfs_testlib/utils/file_utils.py +++ b/src/frostfs_testlib/utils/file_utils.py @@ -6,6 +6,7 @@ from typing import Any, Optional from frostfs_testlib import reporter from frostfs_testlib.resources.common import ASSETS_DIR +from frostfs_testlib.utils import string_utils logger = logging.getLogger("NeoLogger") @@ -41,7 +42,9 @@ def ensure_directory_opener(path, flags): return os.open(path, flags) -@reporter.step("Generate file with size {size}") +# TODO: Do not add {size} to title yet, since it produces dynamic info in top level steps +# Use object_size dt in future as argument +@reporter.step("Generate file") def generate_file(size: int) -> TestFile: """Generates a binary file with the specified size in bytes. @@ -51,7 +54,7 @@ def generate_file(size: int) -> TestFile: Returns: The path to the generated file. """ - test_file = TestFile(os.path.join(ASSETS_DIR, str(uuid.uuid4()))) + test_file = TestFile(os.path.join(ASSETS_DIR, string_utils.unique_name("object-"))) with open(test_file, "wb", opener=ensure_directory_opener) as file: file.write(os.urandom(size)) logger.info(f"File with size {size} bytes has been generated: {test_file}") @@ -59,7 +62,9 @@ def generate_file(size: int) -> TestFile: return test_file -@reporter.step("Generate file with content of size {size}") +# TODO: Do not add {size} to title yet, since it produces dynamic info in top level steps +# Use object_size dt in future as argument +@reporter.step("Generate file with content") def generate_file_with_content( size: int, file_path: Optional[str | TestFile] = None, diff --git a/src/frostfs_testlib/utils/string_utils.py b/src/frostfs_testlib/utils/string_utils.py index a80192c..d8e91a4 100644 --- a/src/frostfs_testlib/utils/string_utils.py +++ b/src/frostfs_testlib/utils/string_utils.py @@ -1,12 +1,26 @@ import random import re import string +from datetime import datetime ONLY_ASCII_LETTERS = string.ascii_letters DIGITS_AND_ASCII_LETTERS = string.ascii_letters + string.digits NON_DIGITS_AND_LETTERS = string.punctuation +def unique_name(prefix: str = ""): + """ + Generate unique short name of anything with prefix. + This should be unique in scope of multiple runs + + Args: + prefix: prefix for unique name generation + Returns: + unique name string + """ + return f"{prefix}{hex(int(datetime.now().timestamp() * 1000000))}" + + def random_string(length: int = 5, source: str = ONLY_ASCII_LETTERS): """ Generate random string from source letters list From f4460194bcc24d22d698d0409e5336fe25390b1f Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Fri, 28 Jun 2024 17:13:35 +0300 Subject: [PATCH 258/363] [#252] add filter priority to get_filtered_logs method --- src/frostfs_testlib/hosting/docker_host.py | 1 + src/frostfs_testlib/hosting/interfaces.py | 3 +++ src/frostfs_testlib/steps/metrics.py | 2 +- 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 3c9883a..0fb5af0 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -240,6 +240,7 @@ class DockerHost(Host): until: Optional[datetime] = None, unit: Optional[str] = None, exclude_filter: Optional[str] = None, + priority: Optional[str] = None ) -> str: client = self._get_docker_client() filtered_logs = "" diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 13051e2..36c2804 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -297,6 +297,7 @@ class Host(ABC): until: Optional[datetime] = None, unit: Optional[str] = None, exclude_filter: Optional[str] = None, + priority: Optional[str] = None ) -> str: """Get logs from host filtered by regex. @@ -305,6 +306,8 @@ class Host(ABC): since: If set, limits the time from which logs should be collected. Must be in UTC. until: If set, limits the time until which logs should be collected. Must be in UTC. unit: required unit. + priority: logs level, 0 - emergency, 7 - debug. All messages with that code and higher. + For example, if we specify the -p 2 option, journalctl will show all messages with levels 2, 1 and 0. Returns: Found entries as str if any found. diff --git a/src/frostfs_testlib/steps/metrics.py b/src/frostfs_testlib/steps/metrics.py index d999171..29e49d4 100644 --- a/src/frostfs_testlib/steps/metrics.py +++ b/src/frostfs_testlib/steps/metrics.py @@ -19,7 +19,7 @@ def check_metrics_counter( counter_act += get_metrics_value(cluster_node, parse_from_command, **metrics_greps) assert eval( f"{counter_act} {operator} {counter_exp}" - ), f"Expected: {counter_exp}, Actual: {counter_act} in node: {cluster_node}" + ), f"Expected: {counter_exp} {operator} Actual: {counter_act} in node: {cluster_node}" @reporter.step("Get metrics value from node: {node}") From 376499a7e8c5dc2381da834b4c2cd7221da04371 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Fri, 28 Jun 2024 16:41:57 +0300 Subject: [PATCH 259/363] [#254] Added change for EC policy Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/cli/netmap_parser.py | 2 ++ src/frostfs_testlib/storage/dataclasses/storage_object_info.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/src/frostfs_testlib/cli/netmap_parser.py b/src/frostfs_testlib/cli/netmap_parser.py index 94d12b8..23ac4da 100644 --- a/src/frostfs_testlib/cli/netmap_parser.py +++ b/src/frostfs_testlib/cli/netmap_parser.py @@ -15,6 +15,8 @@ class NetmapParser: "epoch_duration": r"Epoch duration: (?P\d+)", "inner_ring_candidate_fee": r"Inner Ring candidate fee: (?P\d+)", "maximum_object_size": r"Maximum object size: (?P\d+)", + "maximum_count_of_data_shards": r"Maximum count of data shards: (?P\d+)", + "maximum_count_of_parity_shards": r"Maximum count of parity shards: (?P\d+)", "withdrawal_fee": r"Withdrawal fee: (?P\d+)", "homomorphic_hashing_disabled": r"Homomorphic hashing disabled: (?Ptrue|false)", "maintenance_mode_allowed": r"Maintenance mode allowed: (?Ptrue|false)", diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index 28fdaa5..1ecb300 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -70,6 +70,8 @@ class NodeNetInfo: epoch_duration: str = None inner_ring_candidate_fee: str = None maximum_object_size: str = None + maximum_count_of_data_shards: str = None + maximum_count_of_parity_shards: str = None withdrawal_fee: str = None homomorphic_hashing_disabled: str = None maintenance_mode_allowed: str = None From 429698944e94cc7bcc40e603aa868e9ba3d12481 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Wed, 3 Jul 2024 12:02:40 +0300 Subject: [PATCH 260/363] [#256] Allow to set mix of policies for containers and buckets Signed-off-by: a.berezin --- src/frostfs_testlib/load/load_config.py | 65 ++++++++++++------- tests/test_load_config.py | 84 ++++++++++++++++--------- 2 files changed, 95 insertions(+), 54 deletions(-) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 1128096..767e9f2 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -25,6 +25,16 @@ def convert_time_to_seconds(time: int | str | None) -> int: return seconds +def force_list(input: str | list[str]): + if input is None: + return None + + if isinstance(input, list): + return list(map(str.strip, input)) + + return [input.strip()] + + class LoadType(Enum): gRPC = "grpc" S3 = "s3" @@ -142,8 +152,29 @@ class K6ProcessAllocationStrategy(Enum): PER_ENDPOINT = "PER_ENDPOINT" +class MetaConfig: + def _get_field_formatter(self, field_name: str) -> Callable | None: + data_fields = fields(self) + formatters = [ + field.metadata["formatter"] + for field in data_fields + if field.name == field_name and "formatter" in field.metadata and field.metadata["formatter"] != None + ] + if formatters: + return formatters[0] + + return None + + def __setattr__(self, field_name, value): + formatter = self._get_field_formatter(field_name) + if formatter: + value = formatter(value) + + super().__setattr__(field_name, value) + + @dataclass -class Preset: +class Preset(MetaConfig): # ------ COMMON ------ # Amount of objects which should be created objects_count: Optional[int] = metadata_field(all_load_scenarios, "preload_obj", None, False) @@ -158,13 +189,13 @@ class Preset: # Amount of containers which should be created containers_count: Optional[int] = metadata_field(grpc_preset_scenarios, "containers", None, False) # Container placement policy for containers for gRPC - container_placement_policy: Optional[str] = metadata_field(grpc_preset_scenarios, "policy", None, False) + container_placement_policy: Optional[list[str]] = metadata_field(grpc_preset_scenarios, "policy", None, False, formatter=force_list) # ------ S3 ------ # Amount of buckets which should be created buckets_count: Optional[int] = metadata_field(s3_preset_scenarios, "buckets", None, False) # S3 region (AKA placement policy for S3 buckets) - s3_location: Optional[str] = metadata_field(s3_preset_scenarios, "location", None, False) + s3_location: Optional[list[str]] = metadata_field(s3_preset_scenarios, "location", None, False, formatter=force_list) # Delay between containers creation and object upload for preset object_upload_delay: Optional[int] = metadata_field(all_load_scenarios, "sleep", None, False) @@ -177,7 +208,7 @@ class Preset: @dataclass -class PrometheusParams: +class PrometheusParams(MetaConfig): # Prometheus server URL server_url: Optional[str] = metadata_field(all_load_scenarios, env_variable="K6_PROMETHEUS_RW_SERVER_URL", string_repr=False) # Prometheus trend stats @@ -187,7 +218,7 @@ class PrometheusParams: @dataclass -class LoadParams: +class LoadParams(MetaConfig): # ------- CONTROL PARAMS ------- # Load type can be gRPC, HTTP, S3. load_type: LoadType @@ -412,6 +443,11 @@ class LoadParams: # For preset calls, bool values are passed with just -- if the value is True return f"--{meta_field.metadata['preset_argument']}" if meta_field.value else "" + if isinstance(meta_field.value, list): + return ( + " ".join(f"--{meta_field.metadata['preset_argument']} '{value}'" for value in meta_field.value) if meta_field.value else "" + ) + return f"--{meta_field.metadata['preset_argument']} '{meta_field.value}'" @staticmethod @@ -431,25 +467,6 @@ class LoadParams: return fields_with_data or [] - def _get_field_formatter(self, field_name: str) -> Callable | None: - data_fields = fields(self) - formatters = [ - field.metadata["formatter"] - for field in data_fields - if field.name == field_name and "formatter" in field.metadata and field.metadata["formatter"] != None - ] - if formatters: - return formatters[0] - - return None - - def __setattr__(self, field_name, value): - formatter = self._get_field_formatter(field_name) - if formatter: - value = formatter(value) - - super().__setattr__(field_name, value) - def __str__(self) -> str: load_type_str = self.scenario.value if self.scenario else self.load_type.value # TODO: migrate load_params defaults to testlib diff --git a/tests/test_load_config.py b/tests/test_load_config.py index 62339f6..883b1f2 100644 --- a/tests/test_load_config.py +++ b/tests/test_load_config.py @@ -3,14 +3,7 @@ from typing import Any, get_args import pytest -from frostfs_testlib.load.load_config import ( - EndpointSelectionStrategy, - LoadParams, - LoadScenario, - LoadType, - Preset, - ReadFrom, -) +from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType, Preset, ReadFrom from frostfs_testlib.load.runners import DefaultRunner from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME from frostfs_testlib.storage.cluster import ClusterNode @@ -99,9 +92,7 @@ class TestLoadConfig: def test_load_controller_string_representation(self, load_params: LoadParams): load_params.endpoint_selection_strategy = EndpointSelectionStrategy.ALL load_params.object_size = 512 - background_load_controller = BackgroundLoadController( - "tmp", load_params, "wallet", None, None, DefaultRunner(None) - ) + background_load_controller = BackgroundLoadController("tmp", load_params, None, None, DefaultRunner(None)) expected = "grpc 512 KiB, writers=7, readers=7, deleters=8" assert f"{background_load_controller}" == expected assert repr(background_load_controller) == expected @@ -141,7 +132,7 @@ class TestLoadConfig: "--out 'pregen_json'", "--workers '7'", "--containers '16'", - "--policy 'container_placement_policy'", + "--policy 'container_placement_policy' --policy 'container_placement_policy_2'", "--ignore-errors", "--sleep '19'", "--local", @@ -173,7 +164,7 @@ class TestLoadConfig: "--out 'pregen_json'", "--workers '7'", "--containers '16'", - "--policy 'container_placement_policy'", + "--policy 'container_placement_policy' --policy 'container_placement_policy_2'", "--ignore-errors", "--sleep '19'", "--local", @@ -214,7 +205,7 @@ class TestLoadConfig: "--out 'pregen_json'", "--workers '7'", "--buckets '13'", - "--location 's3_location'", + "--location 's3_location' --location 's3_location_2'", "--ignore-errors", "--sleep '19'", "--acl 'acl'", @@ -248,7 +239,7 @@ class TestLoadConfig: "--out 'pregen_json'", "--workers '7'", "--buckets '13'", - "--location 's3_location'", + "--location 's3_location' --location 's3_location_2'", "--ignore-errors", "--sleep '19'", "--acl 'acl'", @@ -288,7 +279,7 @@ class TestLoadConfig: "--out 'pregen_json'", "--workers '7'", "--buckets '13'", - "--location 's3_location'", + "--location 's3_location' --location 's3_location_2'", "--ignore-errors", "--sleep '19'", "--acl 'acl'", @@ -329,7 +320,7 @@ class TestLoadConfig: "--out 'pregen_json'", "--workers '7'", "--containers '16'", - "--policy 'container_placement_policy'", + "--policy 'container_placement_policy' --policy 'container_placement_policy_2'", "--ignore-errors", "--sleep '19'", "--acl 'acl'", @@ -362,12 +353,13 @@ class TestLoadConfig: "--out 'pregen_json'", "--workers '7'", "--containers '16'", - "--policy 'container_placement_policy'", + "--policy 'container_placement_policy' --policy 'container_placement_policy_2'", "--ignore-errors", "--sleep '19'", "--acl 'acl'", ] expected_env_vars = { + "CONFIG_DIR": "config_dir", "CONFIG_FILE": "config_file", "DURATION": 9, "WRITE_OBJ_SIZE": 11, @@ -380,12 +372,49 @@ class TestLoadConfig: "DELETERS": 8, "READ_AGE": 8, "STREAMING": 9, + "MAX_TOTAL_SIZE_GB": 17, "PREGEN_JSON": "pregen_json", } self._check_preset_params(load_params, expected_preset_args) self._check_env_vars(load_params, expected_env_vars) + @pytest.mark.parametrize( + "input, value, params", + [ + (["A C ", " B"], ["A C", "B"], [f"--policy 'A C' --policy 'B'"]), + (" A ", ["A"], ["--policy 'A'"]), + (" A , B ", ["A , B"], ["--policy 'A , B'"]), + ([" A", "B "], ["A", "B"], ["--policy 'A' --policy 'B'"]), + (None, None, []), + ], + ) + def test_grpc_list_parsing_formatter(self, input, value, params): + load_params = LoadParams(LoadType.gRPC) + load_params.preset = Preset() + load_params.preset.container_placement_policy = input + assert load_params.preset.container_placement_policy == value + + self._check_preset_params(load_params, params) + + @pytest.mark.parametrize( + "input, value, params", + [ + (["A C ", " B"], ["A C", "B"], [f"--location 'A C' --location 'B'"]), + (" A ", ["A"], ["--location 'A'"]), + (" A , B ", ["A , B"], ["--location 'A , B'"]), + ([" A", "B "], ["A", "B"], ["--location 'A' --location 'B'"]), + (None, None, []), + ], + ) + def test_s3_list_parsing_formatter(self, input, value, params): + load_params = LoadParams(LoadType.S3) + load_params.preset = Preset() + load_params.preset.s3_location = input + assert load_params.preset.s3_location == value + + self._check_preset_params(load_params, params) + @pytest.mark.parametrize("load_params, load_type", [(LoadScenario.VERIFY, LoadType.S3)], indirect=True) def test_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams): expected_env_vars = { @@ -592,6 +621,7 @@ class TestLoadConfig: "--acl ''", ] expected_env_vars = { + "CONFIG_DIR": "", "CONFIG_FILE": "", "DURATION": 0, "WRITE_OBJ_SIZE": 0, @@ -599,6 +629,7 @@ class TestLoadConfig: "K6_OUT": "", "K6_MIN_ITERATION_DURATION": "", "K6_SETUP_TIMEOUT": "", + "MAX_TOTAL_SIZE_GB": 0, "WRITERS": 0, "READERS": 0, "DELETERS": 0, @@ -689,9 +720,7 @@ class TestLoadConfig: value = getattr(dataclass, field.name) assert value is not None, f"{field.name} is not None" - def _get_filled_load_params( - self, load_type: LoadType, load_scenario: LoadScenario, set_emtpy: bool = False - ) -> LoadParams: + def _get_filled_load_params(self, load_type: LoadType, load_scenario: LoadScenario, set_emtpy: bool = False) -> LoadParams: load_type_map = { LoadScenario.S3: LoadType.S3, LoadScenario.S3_CAR: LoadType.S3, @@ -708,13 +737,12 @@ class TestLoadConfig: meta_fields = self._get_meta_fields(load_params) for field in meta_fields: - if ( - getattr(field.instance, field.field.name) is None - and load_params.scenario in field.field.metadata["applicable_scenarios"] - ): + if getattr(field.instance, field.field.name) is None and load_params.scenario in field.field.metadata["applicable_scenarios"]: value_to_set_map = { int: 0 if set_emtpy else len(field.field.name), + float: 0 if set_emtpy else len(field.field.name), str: "" if set_emtpy else field.field.name, + list[str]: "" if set_emtpy else [field.field.name, f"{field.field.name}_2"], bool: False if set_emtpy else True, } value_to_set = value_to_set_map[field.field_type] @@ -727,11 +755,7 @@ class TestLoadConfig: def _get_meta_fields(self, instance): data_fields = fields(instance) - fields_with_data = [ - MetaTestField(field, self._get_actual_field_type(field), instance) - for field in data_fields - if field.metadata - ] + fields_with_data = [MetaTestField(field, self._get_actual_field_type(field), instance) for field in data_fields if field.metadata] for field in data_fields: actual_field_type = self._get_actual_field_type(field) From 996f92ffa79668d479070e3793d488722b9a9db2 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Wed, 10 Jul 2024 17:17:27 +0300 Subject: [PATCH 261/363] [#259] Improve logging of boto3 client requests Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/s3/boto3_client.py | 221 +++++++++++++------------ src/frostfs_testlib/utils/cli_utils.py | 15 +- 2 files changed, 128 insertions(+), 108 deletions(-) diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index 150570c..5686b78 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -18,7 +18,7 @@ from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _ma from frostfs_testlib.utils import string_utils # TODO: Refactor this code to use shell instead of _cmd_run -from frostfs_testlib.utils.cli_utils import _configure_aws_cli, log_command_execution +from frostfs_testlib.utils.cli_utils import log_command_execution from frostfs_testlib.utils.file_utils import TestFile logger = logging.getLogger("NeoLogger") @@ -34,7 +34,15 @@ def report_error(func): try: return func(*a, **kw) except ClientError as err: - log_command_execution("Result", str(err)) + url = None + params = {"args": a, "kwargs": kw} + + if isinstance(a[0], Boto3ClientWrapper): + client: Boto3ClientWrapper = a[0] + url = client.s3gate_endpoint + params = {"args": a[1:], "kwargs": kw} + + log_command_execution(url, f"Failed {err.operation_name}", err.response, params) raise return deco @@ -90,7 +98,7 @@ class Boto3ClientWrapper(S3ClientWrapper): verify=False, ) - def _to_s3_param(self, param: str): + def _to_s3_param(self, param: str) -> str: replacement_map = { "Acl": "ACL", "Cors": "CORS", @@ -101,6 +109,11 @@ class Boto3ClientWrapper(S3ClientWrapper): result = result.replace(find, replace) return result + def _convert_to_s3_params(self, scope: dict, exclude: Optional[list[str]] = None) -> dict: + if not exclude: + exclude = ["self"] + return {self._to_s3_param(param): value for param, value in scope if param not in exclude and value is not None} + # BUCKET METHODS # @reporter.step("Create bucket S3") @report_error @@ -133,7 +146,7 @@ class Boto3ClientWrapper(S3ClientWrapper): params.update({"CreateBucketConfiguration": {"LocationConstraint": location_constraint}}) s3_bucket = self.boto3_client.create_bucket(**params) - log_command_execution(f"Created S3 bucket {bucket}", s3_bucket) + log_command_execution(self.s3gate_endpoint, f"Created S3 bucket {bucket}", s3_bucket, params) return bucket @reporter.step("List buckets S3") @@ -142,7 +155,7 @@ class Boto3ClientWrapper(S3ClientWrapper): found_buckets = [] response = self.boto3_client.list_buckets() - log_command_execution("S3 List buckets result", response) + log_command_execution(self.s3gate_endpoint, "S3 List buckets result", response) for bucket in response["Buckets"]: found_buckets.append(bucket["Name"]) @@ -153,26 +166,27 @@ class Boto3ClientWrapper(S3ClientWrapper): @report_error def delete_bucket(self, bucket: str) -> None: response = self.boto3_client.delete_bucket(Bucket=bucket) - log_command_execution("S3 Delete bucket result", response) + log_command_execution(self.s3gate_endpoint, "S3 Delete bucket result", response, {"Bucket": bucket}) @reporter.step("Head bucket S3") @report_error def head_bucket(self, bucket: str) -> None: response = self.boto3_client.head_bucket(Bucket=bucket) - log_command_execution("S3 Head bucket result", response) + log_command_execution(self.s3gate_endpoint, "S3 Head bucket result", response, {"Bucket": bucket}) @reporter.step("Put bucket versioning status") @report_error def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: - response = self.boto3_client.put_bucket_versioning(Bucket=bucket, VersioningConfiguration={"Status": status.value}) - log_command_execution("S3 Set bucket versioning to", response) + params = {"Bucket": bucket, "VersioningConfiguration": {"Status": status.value}} + response = self.boto3_client.put_bucket_versioning(**params) + log_command_execution(self.s3gate_endpoint, "S3 Set bucket versioning to", response, params) @reporter.step("Get bucket versioning status") @report_error def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: response = self.boto3_client.get_bucket_versioning(Bucket=bucket) status = response.get("Status") - log_command_execution("S3 Got bucket versioning status", response) + log_command_execution(self.s3gate_endpoint, "S3 Got bucket versioning status", response, {"Bucket": bucket}) return status @reporter.step("Put bucket tagging") @@ -180,28 +194,29 @@ class Boto3ClientWrapper(S3ClientWrapper): def put_bucket_tagging(self, bucket: str, tags: list) -> None: tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tagging = {"TagSet": tags} - response = self.boto3_client.put_bucket_tagging(Bucket=bucket, Tagging=tagging) - log_command_execution("S3 Put bucket tagging", response) + params = self._convert_to_s3_params(locals().items(), exclude=["self", "tags"]) + response = self.boto3_client.put_bucket_tagging(**params) + log_command_execution(self.s3gate_endpoint, "S3 Put bucket tagging", response, params) @reporter.step("Get bucket tagging") @report_error def get_bucket_tagging(self, bucket: str) -> list: response = self.boto3_client.get_bucket_tagging(Bucket=bucket) - log_command_execution("S3 Get bucket tagging", response) + log_command_execution(self.s3gate_endpoint, "S3 Get bucket tagging", response, {"Bucket": bucket}) return response.get("TagSet") @reporter.step("Get bucket acl") @report_error def get_bucket_acl(self, bucket: str) -> list: response = self.boto3_client.get_bucket_acl(Bucket=bucket) - log_command_execution("S3 Get bucket acl", response) + log_command_execution(self.s3gate_endpoint, "S3 Get bucket acl", response, {"Bucket": bucket}) return response.get("Grants") @reporter.step("Delete bucket tagging") @report_error def delete_bucket_tagging(self, bucket: str) -> None: response = self.boto3_client.delete_bucket_tagging(Bucket=bucket) - log_command_execution("S3 Delete bucket tagging", response) + log_command_execution(self.s3gate_endpoint, "S3 Delete bucket tagging", response, {"Bucket": bucket}) @reporter.step("Put bucket ACL") @report_error @@ -212,71 +227,74 @@ class Boto3ClientWrapper(S3ClientWrapper): grant_write: Optional[str] = None, grant_read: Optional[str] = None, ) -> None: - params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} + params = self._convert_to_s3_params(locals().items()) response = self.boto3_client.put_bucket_acl(**params) - log_command_execution("S3 ACL bucket result", response) + log_command_execution(self.s3gate_endpoint, "S3 ACL bucket result", response, params) @reporter.step("Put object lock configuration") @report_error def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: - response = self.boto3_client.put_object_lock_configuration(Bucket=bucket, ObjectLockConfiguration=configuration) - log_command_execution("S3 put_object_lock_configuration result", response) + params = {"Bucket": bucket, "ObjectLockConfiguration": configuration} + response = self.boto3_client.put_object_lock_configuration(**params) + log_command_execution(self.s3gate_endpoint, "S3 put_object_lock_configuration result", response, params) return response @reporter.step("Get object lock configuration") @report_error def get_object_lock_configuration(self, bucket: str) -> dict: response = self.boto3_client.get_object_lock_configuration(Bucket=bucket) - log_command_execution("S3 get_object_lock_configuration result", response) + log_command_execution(self.s3gate_endpoint, "S3 get_object_lock_configuration result", response, {"Bucket": bucket}) return response.get("ObjectLockConfiguration") @reporter.step("Get bucket policy") @report_error def get_bucket_policy(self, bucket: str) -> str: response = self.boto3_client.get_bucket_policy(Bucket=bucket) - log_command_execution("S3 get_bucket_policy result", response) + log_command_execution(self.s3gate_endpoint, "S3 get_bucket_policy result", response, {"Bucket": bucket}) return response.get("Policy") @reporter.step("Delete bucket policy") @report_error def delete_bucket_policy(self, bucket: str) -> str: response = self.boto3_client.delete_bucket_policy(Bucket=bucket) - log_command_execution("S3 delete_bucket_policy result", response) + log_command_execution(self.s3gate_endpoint, "S3 delete_bucket_policy result", response, {"Bucket": bucket}) return response @reporter.step("Put bucket policy") @report_error def put_bucket_policy(self, bucket: str, policy: dict) -> None: - response = self.boto3_client.put_bucket_policy(Bucket=bucket, Policy=json.dumps(policy)) - log_command_execution("S3 put_bucket_policy result", response) + params = {"Bucket": bucket, "Policy": json.dumps(policy)} + response = self.boto3_client.put_bucket_policy(**params) + log_command_execution(self.s3gate_endpoint, "S3 put_bucket_policy result", response, params) return response @reporter.step("Get bucket cors") @report_error def get_bucket_cors(self, bucket: str) -> dict: response = self.boto3_client.get_bucket_cors(Bucket=bucket) - log_command_execution("S3 get_bucket_cors result", response) + log_command_execution(self.s3gate_endpoint, "S3 get_bucket_cors result", response, {"Bucket": bucket}) return response.get("CORSRules") @reporter.step("Get bucket location") @report_error def get_bucket_location(self, bucket: str) -> str: response = self.boto3_client.get_bucket_location(Bucket=bucket) - log_command_execution("S3 get_bucket_location result", response) + log_command_execution(self.s3gate_endpoint, "S3 get_bucket_location result", response, {"Bucket": bucket}) return response.get("LocationConstraint") @reporter.step("Put bucket cors") @report_error def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: - response = self.boto3_client.put_bucket_cors(Bucket=bucket, CORSConfiguration=cors_configuration) - log_command_execution("S3 put_bucket_cors result", response) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_client.put_bucket_cors(**params) + log_command_execution(self.s3gate_endpoint, "S3 put_bucket_cors result", response, params) return response @reporter.step("Delete bucket cors") @report_error def delete_bucket_cors(self, bucket: str) -> None: response = self.boto3_client.delete_bucket_cors(Bucket=bucket) - log_command_execution("S3 delete_bucket_cors result", response) + log_command_execution(self.s3gate_endpoint, "S3 delete_bucket_cors result", response, {"Bucket": bucket}) # END OF BUCKET METHODS # # OBJECT METHODS # @@ -284,8 +302,9 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("List objects S3 v2") @report_error def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + params = self._convert_to_s3_params(locals().items()) response = self.boto3_client.list_objects_v2(Bucket=bucket) - log_command_execution("S3 v2 List objects result", response) + log_command_execution(self.s3gate_endpoint, "S3 v2 List objects result", response, params) obj_list = [obj["Key"] for obj in response.get("Contents", [])] logger.info(f"Found s3 objects: {obj_list}") @@ -295,8 +314,9 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("List objects S3") @report_error def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + params = self._convert_to_s3_params(locals().items()) response = self.boto3_client.list_objects(Bucket=bucket) - log_command_execution("S3 List objects result", response) + log_command_execution(self.s3gate_endpoint, "S3 List objects result", response, params) obj_list = [obj["Key"] for obj in response.get("Contents", [])] logger.info(f"Found s3 objects: {obj_list}") @@ -306,15 +326,17 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("List objects versions S3") @report_error def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: + params = self._convert_to_s3_params(locals().items()) response = self.boto3_client.list_object_versions(Bucket=bucket) - log_command_execution("S3 List objects versions result", response) + log_command_execution(self.s3gate_endpoint, "S3 List objects versions result", response, params) return response if full_output else response.get("Versions", []) @reporter.step("List objects delete markers S3") @report_error def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: + params = self._convert_to_s3_params(locals().items()) response = self.boto3_client.list_object_versions(Bucket=bucket) - log_command_execution("S3 List objects delete markers result", response) + log_command_execution(self.s3gate_endpoint, "S3 List objects delete markers result", response, params) return response if full_output else response.get("DeleteMarkers", []) @reporter.step("Put object S3") @@ -339,36 +361,33 @@ class Boto3ClientWrapper(S3ClientWrapper): with open(filepath, "rb") as put_file: body = put_file.read() - params = { - self._to_s3_param(param): value - for param, value in locals().items() - if param not in ["self", "filepath", "put_file"] and value is not None - } - response = self.boto3_client.put_object(**params) - log_command_execution("S3 Put object result", response) + params = self._convert_to_s3_params(locals().items(), exclude=["self", "filepath", "put_file", "body"]) + response = self.boto3_client.put_object(Body=body, **params) + log_command_execution(self.s3gate_endpoint, "S3 Put object result", response, params) return response.get("VersionId") @reporter.step("Head object S3") @report_error def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: - params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} + params = self._convert_to_s3_params(locals().items()) response = self.boto3_client.head_object(**params) - log_command_execution("S3 Head object result", response) + log_command_execution(self.s3gate_endpoint, "S3 Head object result", response, params) return response @reporter.step("Delete object S3") @report_error def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: - params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} + params = self._convert_to_s3_params(locals().items()) response = self.boto3_client.delete_object(**params) - log_command_execution("S3 Delete object result", response) + log_command_execution(self.s3gate_endpoint, "S3 Delete object result", response, params) return response @reporter.step("Delete objects S3") @report_error def delete_objects(self, bucket: str, keys: list[str]) -> dict: - response = self.boto3_client.delete_objects(Bucket=bucket, Delete=_make_objs_dict(keys)) - log_command_execution("S3 Delete objects result", response) + params = {"Bucket": bucket, "Delete": _make_objs_dict(keys)} + response = self.boto3_client.delete_objects(**params) + log_command_execution(self.s3gate_endpoint, "S3 Delete objects result", response, params) assert ( "Errors" not in response ), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}' @@ -387,8 +406,9 @@ class Boto3ClientWrapper(S3ClientWrapper): for object_version in object_versions ] } - response = self.boto3_client.delete_objects(Bucket=bucket, Delete=delete_list) - log_command_execution("S3 Delete objects result", response) + params = {"Bucket": bucket, "Delete": delete_list} + response = self.boto3_client.delete_objects(**params) + log_command_execution(self.s3gate_endpoint, "S3 Delete objects result", response, params) return response @reporter.step("Delete object versions S3 without delete markers") @@ -396,8 +416,9 @@ class Boto3ClientWrapper(S3ClientWrapper): def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: # Delete objects without creating delete markers for object_version in object_versions: - response = self.boto3_client.delete_object(Bucket=bucket, Key=object_version["Key"], VersionId=object_version["VersionId"]) - log_command_execution("S3 Delete object result", response) + params = {"Bucket": bucket, "Key": object_version["Key"], "VersionId": object_version["VersionId"]} + response = self.boto3_client.delete_object(**params) + log_command_execution(self.s3gate_endpoint, "S3 Delete object result", response, params) @reporter.step("Put object ACL") @report_error @@ -409,17 +430,17 @@ class Boto3ClientWrapper(S3ClientWrapper): grant_write: Optional[str] = None, grant_read: Optional[str] = None, ) -> list: - params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} + params = self._convert_to_s3_params(locals().items()) response = self.boto3_client.put_object_acl(**params) - log_command_execution("S3 put object ACL", response) + log_command_execution(self.s3gate_endpoint, "S3 put object ACL", response, params) return response.get("Grants") @reporter.step("Get object ACL") @report_error def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: - params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} + params = self._convert_to_s3_params(locals().items()) response = self.boto3_client.get_object_acl(**params) - log_command_execution("S3 ACL objects result", response) + log_command_execution(self.s3gate_endpoint, "S3 ACL objects result", response, params) return response.get("Grants") @reporter.step("Copy object S3") @@ -442,13 +463,9 @@ class Boto3ClientWrapper(S3ClientWrapper): key = string_utils.unique_name("copy-object-") copy_source = f"{source_bucket}/{source_key}" - params = { - self._to_s3_param(param): value - for param, value in locals().items() - if param not in ["self", "source_bucket", "source_key"] and value is not None - } + params = self._convert_to_s3_params(locals().items(), exclude=["self", "source_bucket", "source_key"]) response = self.boto3_client.copy_object(**params) - log_command_execution("S3 Copy objects result", response) + log_command_execution(self.s3gate_endpoint, "S3 Copy objects result", response, params) return key @reporter.step("Get object S3") @@ -465,13 +482,12 @@ class Boto3ClientWrapper(S3ClientWrapper): if object_range: range_str = f"bytes={object_range[0]}-{object_range[1]}" - params = { - self._to_s3_param(param): value - for param, value in {**locals(), **{"Range": range_str}}.items() - if param not in ["self", "object_range", "full_output", "range_str", "filename"] and value is not None - } + params = self._convert_to_s3_params( + {**locals(), **{"Range": range_str}}.items(), + exclude=["self", "object_range", "full_output", "range_str"], + ) response = self.boto3_client.get_object(**params) - log_command_execution("S3 Get objects result", response) + log_command_execution(self.s3gate_endpoint, "S3 Get objects result", response, params) if full_output: return response @@ -487,8 +503,9 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("Create multipart upload S3") @report_error def create_multipart_upload(self, bucket: str, key: str) -> str: - response = self.boto3_client.create_multipart_upload(Bucket=bucket, Key=key) - log_command_execution("S3 Created multipart upload", response) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_client.create_multipart_upload(**params) + log_command_execution(self.s3gate_endpoint, "S3 Created multipart upload", response, params) assert response.get("UploadId"), f"Expected UploadId in response:\n{response}" return response["UploadId"] @@ -497,15 +514,16 @@ class Boto3ClientWrapper(S3ClientWrapper): @report_error def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: response = self.boto3_client.list_multipart_uploads(Bucket=bucket) - log_command_execution("S3 List multipart upload", response) + log_command_execution(self.s3gate_endpoint, "S3 List multipart upload", response, {"Bucket": bucket}) return response.get("Uploads") @reporter.step("Abort multipart upload S3") @report_error def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: - response = self.boto3_client.abort_multipart_upload(Bucket=bucket, Key=key, UploadId=upload_id) - log_command_execution("S3 Abort multipart upload", response) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_client.abort_multipart_upload(**params) + log_command_execution(self.s3gate_endpoint, "S3 Abort multipart upload", response, params) @reporter.step("Upload part S3") @report_error @@ -513,14 +531,10 @@ class Boto3ClientWrapper(S3ClientWrapper): with open(filepath, "rb") as put_file: body = put_file.read() - response = self.boto3_client.upload_part( - UploadId=upload_id, - Bucket=bucket, - Key=key, - PartNumber=part_num, - Body=body, - ) - log_command_execution("S3 Upload part", response) + params = self._convert_to_s3_params(locals().items(), exclude=["self", "put_file", "part_num", "filepath", "body"]) + params["PartNumber"] = part_num + response = self.boto3_client.upload_part(Body=body, **params) + log_command_execution(self.s3gate_endpoint, "S3 Upload part", response, params) assert response.get("ETag"), f"Expected ETag in response:\n{response}" return response["ETag"] @@ -528,14 +542,10 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("Upload copy part S3") @report_error def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: - response = self.boto3_client.upload_part_copy( - UploadId=upload_id, - Bucket=bucket, - Key=key, - PartNumber=part_num, - CopySource=copy_source, - ) - log_command_execution("S3 Upload copy part", response) + params = self._convert_to_s3_params(locals().items(), exclude=["self", "put_file", "part_num", "filepath"]) + params["PartNumber"] = part_num + response = self.boto3_client.upload_part_copy(**params) + log_command_execution(self.s3gate_endpoint, "S3 Upload copy part", response, params) assert response.get("CopyPartResult", []).get("ETag"), f"Expected ETag in response:\n{response}" return response["CopyPartResult"]["ETag"] @@ -543,8 +553,9 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("List parts S3") @report_error def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: - response = self.boto3_client.list_parts(UploadId=upload_id, Bucket=bucket, Key=key) - log_command_execution("S3 List part", response) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_client.list_parts(**params) + log_command_execution(self.s3gate_endpoint, "S3 List part", response, params) assert response.get("Parts"), f"Expected Parts in response:\n{response}" return response["Parts"] @@ -553,8 +564,10 @@ class Boto3ClientWrapper(S3ClientWrapper): @report_error def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts] - response = self.boto3_client.complete_multipart_upload(Bucket=bucket, Key=key, UploadId=upload_id, MultipartUpload={"Parts": parts}) - log_command_execution("S3 Complete multipart upload", response) + params = self._convert_to_s3_params(locals().items(), exclude=["self", "parts"]) + params["MultipartUpload"] = {"Parts": parts} + response = self.boto3_client.complete_multipart_upload(**params) + log_command_execution(self.s3gate_endpoint, "S3 Complete multipart upload", response, params) return response @@ -568,9 +581,9 @@ class Boto3ClientWrapper(S3ClientWrapper): version_id: Optional[str] = None, bypass_governance_retention: Optional[bool] = None, ) -> None: - params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} + params = self._convert_to_s3_params(locals().items()) response = self.boto3_client.put_object_retention(**params) - log_command_execution("S3 Put object retention ", response) + log_command_execution(self.s3gate_endpoint, "S3 Put object retention ", response, params) @reporter.step("Put object legal hold") @report_error @@ -582,35 +595,33 @@ class Boto3ClientWrapper(S3ClientWrapper): version_id: Optional[str] = None, ) -> None: legal_hold = {"Status": legal_hold_status} - params = { - self._to_s3_param(param): value - for param, value in locals().items() - if param not in ["self", "legal_hold_status"] and value is not None - } + params = self._convert_to_s3_params(locals().items(), exclude=["self", "legal_hold_status"]) response = self.boto3_client.put_object_legal_hold(**params) - log_command_execution("S3 Put object legal hold ", response) + log_command_execution(self.s3gate_endpoint, "S3 Put object legal hold ", response, params) @reporter.step("Put object tagging") @report_error def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = "") -> None: tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tagging = {"TagSet": tags} - response = self.boto3_client.put_object_tagging(Bucket=bucket, Key=key, Tagging=tagging, VersionId=version_id) - log_command_execution("S3 Put object tagging", response) + params = self._convert_to_s3_params(locals().items(), exclude=["self", "tags"]) + response = self.boto3_client.put_object_tagging(**params) + log_command_execution(self.s3gate_endpoint, "S3 Put object tagging", response, params) @reporter.step("Get object tagging") @report_error def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: - params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None} + params = self._convert_to_s3_params(locals().items()) response = self.boto3_client.get_object_tagging(**params) - log_command_execution("S3 Get object tagging", response) + log_command_execution(self.s3gate_endpoint, "S3 Get object tagging", response, params) return response.get("TagSet") @reporter.step("Delete object tagging") @report_error def delete_object_tagging(self, bucket: str, key: str) -> None: - response = self.boto3_client.delete_object_tagging(Bucket=bucket, Key=key) - log_command_execution("S3 Delete object tagging", response) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_client.delete_object_tagging(**params) + log_command_execution(self.s3gate_endpoint, "S3 Delete object tagging", response, params) @reporter.step("Get object attributes") @report_error diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index 0a1b5fd..d22f5c1 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -15,7 +15,7 @@ from contextlib import suppress from datetime import datetime from io import StringIO from textwrap import shorten -from typing import Dict, List, TypedDict, Union +from typing import Dict, List, Optional, TypedDict, Union import pexpect @@ -75,12 +75,21 @@ def _attach_allure_log(cmd: str, output: str, return_code: int, start_time: date reporter.attach(command_attachment, "Command execution") -def log_command_execution(cmd: str, output: Union[str, TypedDict]) -> None: +def log_command_execution(url: str, cmd: str, output: Union[str, TypedDict], params: Optional[dict] = None) -> None: logger.info(f"{cmd}: {output}") + with suppress(Exception): json_output = json.dumps(output, indent=4, sort_keys=True) output = json_output - command_attachment = f"COMMAND: '{cmd}'\n" f"OUTPUT:\n {output}\n" + + try: + json_params = json.dumps(params, indent=4, sort_keys=True) + except TypeError as err: + logger.warning(f"Failed to serialize '{cmd}' request parameters:\n{params}\nException: {err}") + else: + params = json_params + + command_attachment = f"COMMAND: '{cmd}'\n" f"URL: {url}\n" f"PARAMS:\n{params}\n" f"OUTPUT:\n{output}\n" with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'): reporter.attach(command_attachment, "Command execution") From 6f99aef4065a8ab045cc7ea483252bbd63284954 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Tue, 16 Jul 2024 13:36:02 +0300 Subject: [PATCH 262/363] [#263] Unify version parsing Function `_parse_version` renamed to `parse_version` and changed regex for version parsing Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/utils/version_utils.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/frostfs_testlib/utils/version_utils.py b/src/frostfs_testlib/utils/version_utils.py index 7fcc9de..490abb0 100644 --- a/src/frostfs_testlib/utils/version_utils.py +++ b/src/frostfs_testlib/utils/version_utils.py @@ -18,14 +18,14 @@ def get_local_binaries_versions(shell: Shell) -> dict[str, str]: for binary in [NEOGO_EXECUTABLE, FROSTFS_AUTHMATE_EXEC]: out = shell.exec(f"{binary} --version").stdout - versions[binary] = _parse_version(out) + versions[binary] = parse_version(out) frostfs_cli = FrostfsCli(shell, FROSTFS_CLI_EXEC) - versions[FROSTFS_CLI_EXEC] = _parse_version(frostfs_cli.version.get().stdout) + versions[FROSTFS_CLI_EXEC] = parse_version(frostfs_cli.version.get().stdout) try: frostfs_adm = FrostfsAdm(shell, FROSTFS_ADM_EXEC) - versions[FROSTFS_ADM_EXEC] = _parse_version(frostfs_adm.version.get().stdout) + versions[FROSTFS_ADM_EXEC] = parse_version(frostfs_adm.version.get().stdout) except RuntimeError: logger.info(f"{FROSTFS_ADM_EXEC} not installed") @@ -63,7 +63,7 @@ def parallel_binary_verions(host: Host) -> dict[str, str]: binary_path = binary["exec_path"] try: result = shell.exec(f"{binary_path} {binary['param']}") - version = _parse_version(result.stdout) or _parse_version(result.stderr) or "Unknown" + version = parse_version(result.stdout) or parse_version(result.stderr) or "Unknown" versions_at_host[binary_name] = version except Exception as exc: logger.error(f"Cannot get version for {binary_path} because of\n{exc}") @@ -85,6 +85,6 @@ def get_remote_binaries_versions(hosting: Hosting) -> dict[str, dict[str, str]]: return versions_by_host -def _parse_version(version_output: str) -> str: - version = re.search(r"version[:\s]*v?(.+)", version_output, re.IGNORECASE) - return version.group(1).strip() if version else version_output +def parse_version(version_output: str) -> str: + version = re.search(r"(?<=version[:=])\s?[\"\']?v?(.+)", version_output, re.IGNORECASE) + return version.group(1).strip("\"'\n\t ") if version else version_output From b6a657e76c03818bf0f663284a6c5036ab713687 Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Tue, 9 Jul 2024 14:47:32 +0300 Subject: [PATCH 263/363] [#258] add tests for preupgrade --- src/frostfs_testlib/cli/frostfs_adm/morph.py | 2 +- .../storage/controllers/cluster_state_controller.py | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index 1d753d9..d8fd61c 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -110,7 +110,7 @@ class FrostfsAdmMorph(CliCommand): **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) - def dump_hashes(self, rpc_endpoint: str) -> CommandResult: + def dump_hashes(self, rpc_endpoint: str, domain: Optional[str] = None) -> CommandResult: """Dump deployed contract hashes. Args: diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 3c6c268..cec5ed3 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -531,3 +531,11 @@ class ClusterStateController: except Exception as err: logger.warning(f"Host ping fails with error {err}") return HostStatus.ONLINE + + @reporter.step("Get contract by domain - {domain_name}") + def get_domain_contracts(self, cluster_node: ClusterNode, domain_name: str): + frostfs_adm = FrostfsAdm( + shell=cluster_node.host.get_shell(), + frostfs_adm_exec_path=FROSTFS_ADM_EXEC, + ) + return frostfs_adm.morph.dump_hashes(cluster_node.morph_chain.get_endpoint(), domain_name).stdout From f472d7e1ce93c1e1fecbd612150cf650c5a95123 Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Tue, 16 Jul 2024 13:00:50 +0300 Subject: [PATCH 264/363] [#261] Add error pattern no rule --- src/frostfs_testlib/resources/error_patterns.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/frostfs_testlib/resources/error_patterns.py b/src/frostfs_testlib/resources/error_patterns.py index 5491a7a..3b9231e 100644 --- a/src/frostfs_testlib/resources/error_patterns.py +++ b/src/frostfs_testlib/resources/error_patterns.py @@ -28,3 +28,4 @@ S3_MALFORMED_XML_REQUEST = "The XML you provided was not well-formed or did not RULE_ACCESS_DENIED_CONTAINER = "access to container operation {operation} is denied by access policy engine: Access denied" RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied" +NO_RULE_FOUND_CONTAINER = "access to container operation {operation} is denied by access policy engine: NoRuleFound" From 40dfd015a854d1d34cd00acdac2a6cc12b1cd8a0 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Thu, 18 Jul 2024 00:00:48 +0300 Subject: [PATCH 265/363] [#264] Add APE related commands Signed-off-by: a.berezin --- .../cli/frostfs_cli/ape_manager.py | 70 +++++++++++ src/frostfs_testlib/cli/frostfs_cli/bearer.py | 54 ++++++++ src/frostfs_testlib/cli/frostfs_cli/cli.py | 4 + src/frostfs_testlib/cli/frostfs_cli/util.py | 8 ++ .../storage/dataclasses/ape.py | 115 ++++++++++++++++++ .../testing/cluster_test_base.py | 5 +- src/frostfs_testlib/utils/string_utils.py | 4 +- 7 files changed, 257 insertions(+), 3 deletions(-) create mode 100644 src/frostfs_testlib/cli/frostfs_cli/ape_manager.py create mode 100644 src/frostfs_testlib/cli/frostfs_cli/bearer.py create mode 100644 src/frostfs_testlib/storage/dataclasses/ape.py diff --git a/src/frostfs_testlib/cli/frostfs_cli/ape_manager.py b/src/frostfs_testlib/cli/frostfs_cli/ape_manager.py new file mode 100644 index 0000000..525a9be --- /dev/null +++ b/src/frostfs_testlib/cli/frostfs_cli/ape_manager.py @@ -0,0 +1,70 @@ +from typing import Optional + +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult + + +class FrostfsCliApeManager(CliCommand): + """Operations with APE manager.""" + + def add( + self, + rpc_endpoint: str, + chain_id: Optional[str] = None, + chain_id_hex: Optional[str] = None, + path: Optional[str] = None, + rule: Optional[str] | Optional[list[str]] = None, + target_name: Optional[str] = None, + target_type: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Add rule chain for a target.""" + + return self._execute( + "ape-manager add", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def list( + self, + rpc_endpoint: str, + target_name: Optional[str] = None, + target_type: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Generate APE override by target and APE chains. Util command. + + Generated APE override can be dumped to a file in JSON format that is passed to + "create" command. + """ + + return self._execute( + "ape-manager list", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def remove( + self, + rpc_endpoint: str, + chain_id: Optional[str] = None, + chain_id_hex: Optional[str] = None, + target_name: Optional[str] = None, + target_type: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Generate APE override by target and APE chains. Util command. + + Generated APE override can be dumped to a file in JSON format that is passed to + "create" command. + """ + + return self._execute( + "ape-manager remove", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/bearer.py b/src/frostfs_testlib/cli/frostfs_cli/bearer.py new file mode 100644 index 0000000..e21a6c8 --- /dev/null +++ b/src/frostfs_testlib/cli/frostfs_cli/bearer.py @@ -0,0 +1,54 @@ +from typing import Optional + +from frostfs_testlib.cli.cli_command import CliCommand +from frostfs_testlib.shell import CommandResult + + +class FrostfsCliBearer(CliCommand): + def create( + self, + rpc_endpoint: str, + out: str, + issued_at: Optional[str] = None, + expire_at: Optional[str] = None, + not_valid_before: Optional[str] = None, + ape: Optional[str] = None, + eacl: Optional[str] = None, + owner: Optional[str] = None, + json: Optional[bool] = False, + impersonate: Optional[bool] = False, + wallet: Optional[str] = None, + address: Optional[str] = None, + ) -> CommandResult: + """Create bearer token. + + All epoch flags can be specified relative to the current epoch with the +n syntax. + In this case --rpc-endpoint flag should be specified and the epoch in bearer token + is set to current epoch + n. + """ + return self._execute( + "bearer create", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def generate_ape_override( + self, + chain_id: Optional[str] = None, + chain_id_hex: Optional[str] = None, + cid: Optional[str] = None, + output: Optional[str] = None, + path: Optional[str] = None, + rule: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + ) -> CommandResult: + """Generate APE override by target and APE chains. Util command. + + Generated APE override can be dumped to a file in JSON format that is passed to + "create" command. + """ + + return self._execute( + "bearer generate-ape-override", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/frostfs_testlib/cli/frostfs_cli/cli.py b/src/frostfs_testlib/cli/frostfs_cli/cli.py index c20a987..d83b7ae 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/cli.py +++ b/src/frostfs_testlib/cli/frostfs_cli/cli.py @@ -2,6 +2,8 @@ from typing import Optional from frostfs_testlib.cli.frostfs_cli.accounting import FrostfsCliAccounting from frostfs_testlib.cli.frostfs_cli.acl import FrostfsCliACL +from frostfs_testlib.cli.frostfs_cli.ape_manager import FrostfsCliApeManager +from frostfs_testlib.cli.frostfs_cli.bearer import FrostfsCliBearer from frostfs_testlib.cli.frostfs_cli.container import FrostfsCliContainer from frostfs_testlib.cli.frostfs_cli.control import FrostfsCliControl from frostfs_testlib.cli.frostfs_cli.netmap import FrostfsCliNetmap @@ -41,3 +43,5 @@ class FrostfsCli: self.version = FrostfsCliVersion(shell, frostfs_cli_exec_path, config=config_file) self.tree = FrostfsCliTree(shell, frostfs_cli_exec_path, config=config_file) self.control = FrostfsCliControl(shell, frostfs_cli_exec_path, config=config_file) + self.bearer = FrostfsCliBearer(shell, frostfs_cli_exec_path, config=config_file) + self.ape_manager = FrostfsCliApeManager(shell, frostfs_cli_exec_path, config=config_file) diff --git a/src/frostfs_testlib/cli/frostfs_cli/util.py b/src/frostfs_testlib/cli/frostfs_cli/util.py index 7914169..37347a5 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/util.py +++ b/src/frostfs_testlib/cli/frostfs_cli/util.py @@ -54,3 +54,11 @@ class FrostfsCliUtil(CliCommand): "util sign session-token", **{param: value for param, value in locals().items() if param not in ["self"]}, ) + + def convert_eacl(self, from_file: str, to_file: str, json: Optional[bool] = False, ape: Optional[bool] = False): + """Convert representation of extended ACL table.""" + + return self._execute( + "util convert eacl", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/frostfs_testlib/storage/dataclasses/ape.py b/src/frostfs_testlib/storage/dataclasses/ape.py new file mode 100644 index 0000000..84b3033 --- /dev/null +++ b/src/frostfs_testlib/storage/dataclasses/ape.py @@ -0,0 +1,115 @@ +import logging +from dataclasses import dataclass +from enum import Enum +from typing import Optional + +from frostfs_testlib.testing.readable import HumanReadableEnum +from frostfs_testlib.utils import string_utils + +logger = logging.getLogger("NeoLogger") +EACL_LIFETIME = 100500 +FROSTFS_CONTRACT_CACHE_TIMEOUT = 30 + + +class ObjectOperations(HumanReadableEnum): + PUT = "object.put" + GET = "object.get" + HEAD = "object.head" + GET_RANGE = "object.range" + GET_RANGE_HASH = "object.hash" + SEARCH = "object.search" + DELETE = "object.delete" + WILDCARD_ALL = "object.*" + + @staticmethod + def get_all(): + return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL] + + +class Verb(HumanReadableEnum): + ALLOW = "allow" + DENY = "deny" + + +class Role(HumanReadableEnum): + OWNER = "owner" + IR = "ir" + CONTAINER = "container" + OTHERS = "others" + + +class ConditionType(HumanReadableEnum): + RESOURCE = "ResourceCondition" + REQUEST = "RequestCondition" + + +# See https://git.frostfs.info/TrueCloudLab/policy-engine/src/branch/master/schema/native/consts.go#L40-L53 +class ConditionKey(HumanReadableEnum): + ROLE = '"\\$Actor:role"' + PUBLIC_KEY = '"\\$Actor:publicKey"' + + +class MatchType(HumanReadableEnum): + EQUAL = "=" + NOT_EQUAL = "!=" + + +@dataclass +class Condition: + condition_key: ConditionKey | str + condition_value: str + condition_type: ConditionType = ConditionType.REQUEST + match_type: MatchType = MatchType.EQUAL + + def as_string(self): + key = self.condition_key.value if isinstance(self.condition_key, ConditionKey) else self.condition_key + value = self.condition_value.value if isinstance(self.condition_value, Enum) else self.condition_value + + return f"{self.condition_type.value}:{key}{self.match_type.value}{value}" + + @staticmethod + def by_role(*args, **kwargs) -> "Condition": + return Condition(ConditionKey.ROLE, *args, **kwargs) + + @staticmethod + def by_key(*args, **kwargs) -> "Condition": + return Condition(ConditionKey.PUBLIC_KEY, *args, **kwargs) + + +class Rule: + def __init__( + self, + access: Verb, + operations: list[ObjectOperations] | ObjectOperations, + conditions: list[Condition] | Condition = None, + chain_id: Optional[str] = None, + ) -> None: + self.access = access + self.operations = operations + + if not conditions: + self.conditions = [] + elif isinstance(conditions, Condition): + self.conditions = [conditions] + else: + self.conditions = conditions + + if not isinstance(self.conditions, list): + raise RuntimeError("Conditions must be a list") + + if not operations: + self.operations = [] + elif isinstance(operations, ObjectOperations): + self.operations = [operations] + else: + self.operations = operations + + if not isinstance(self.operations, list): + raise RuntimeError("Operations must be a list") + + self.chain_id = chain_id if chain_id else string_utils.unique_name("chain-id-") + + def as_string(self): + conditions = " ".join([cond.as_string() for cond in self.conditions]) + operations = " ".join([op.value for op in self.operations]) + return f"{self.access.value} {operations} {conditions} *" diff --git a/src/frostfs_testlib/testing/cluster_test_base.py b/src/frostfs_testlib/testing/cluster_test_base.py index 49c6afd..f2e10ad 100644 --- a/src/frostfs_testlib/testing/cluster_test_base.py +++ b/src/frostfs_testlib/testing/cluster_test_base.py @@ -32,7 +32,7 @@ class ClusterTestBase: ): epoch.tick_epoch(self.shell, self.cluster, alive_node=alive_node) if wait_block: - time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * wait_block) + self.wait_for_blocks(wait_block) def wait_for_epochs_align(self): epoch.wait_for_epochs_align(self.shell, self.cluster) @@ -42,3 +42,6 @@ class ClusterTestBase: def ensure_fresh_epoch(self): return epoch.ensure_fresh_epoch(self.shell, self.cluster) + + def wait_for_blocks(self, blocks_count: int = 1): + time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * blocks_count) diff --git a/src/frostfs_testlib/utils/string_utils.py b/src/frostfs_testlib/utils/string_utils.py index d8e91a4..80efa65 100644 --- a/src/frostfs_testlib/utils/string_utils.py +++ b/src/frostfs_testlib/utils/string_utils.py @@ -8,7 +8,7 @@ DIGITS_AND_ASCII_LETTERS = string.ascii_letters + string.digits NON_DIGITS_AND_LETTERS = string.punctuation -def unique_name(prefix: str = ""): +def unique_name(prefix: str = "", postfix: str = ""): """ Generate unique short name of anything with prefix. This should be unique in scope of multiple runs @@ -18,7 +18,7 @@ def unique_name(prefix: str = ""): Returns: unique name string """ - return f"{prefix}{hex(int(datetime.now().timestamp() * 1000000))}" + return f"{prefix}{hex(int(datetime.now().timestamp() * 1000000))}{postfix}" def random_string(length: int = 5, source: str = ONLY_ASCII_LETTERS): From 4c0d76408cf7eade7fc5ac1a460b12c2335bdb5c Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Thu, 18 Jul 2024 18:21:46 +0300 Subject: [PATCH 266/363] [#265] Update codeowners Signed-off-by: a.berezin --- CODEOWNERS | 1 + 1 file changed, 1 insertion(+) create mode 100644 CODEOWNERS diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 0000000..4a621d3 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1 @@ +* @JuliaKovshova @abereziny @d.zayakin @anikeev-yadro @anurindm @ylukoyan @i.niyazov From 166e44da9c3bcaf22ce1896ff829bbbc9819614a Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Thu, 18 Jul 2024 19:48:38 +0300 Subject: [PATCH 267/363] [#266] Remove duplicate messages in logs Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/shell/local_shell.py | 49 ++++++++++------------ src/frostfs_testlib/steps/cli/container.py | 1 - src/frostfs_testlib/utils/cli_utils.py | 5 +-- 3 files changed, 23 insertions(+), 32 deletions(-) diff --git a/src/frostfs_testlib/shell/local_shell.py b/src/frostfs_testlib/shell/local_shell.py index acf01ff..2fb6631 100644 --- a/src/frostfs_testlib/shell/local_shell.py +++ b/src/frostfs_testlib/shell/local_shell.py @@ -28,10 +28,10 @@ class LocalShell(Shell): for inspector in [*self.command_inspectors, *extra_inspectors]: command = inspector.inspect(original_command, command) - logger.info(f"Executing command: {command}") - if options.interactive_inputs: - return self._exec_interactive(command, options) - return self._exec_non_interactive(command, options) + with reporter.step(f"Executing command: {command}"): + if options.interactive_inputs: + return self._exec_interactive(command, options) + return self._exec_non_interactive(command, options) def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult: start_time = datetime.utcnow() @@ -60,9 +60,7 @@ class LocalShell(Shell): if options.check and result.return_code != 0: raise RuntimeError( - f"Command: {command}\nreturn code: {result.return_code}\n" - f"Output: {result.stdout}\n" - f"Stderr: {result.stderr}\n" + f"Command: {command}\nreturn code: {result.return_code}\n" f"Output: {result.stdout}\n" f"Stderr: {result.stderr}\n" ) return result @@ -93,9 +91,7 @@ class LocalShell(Shell): stderr="", return_code=exc.returncode, ) - raise RuntimeError( - f"Command: {command}\nError:\n" f"return code: {exc.returncode}\n" f"output: {exc.output}" - ) from exc + raise RuntimeError(f"Command: {command}\nError with retcode: {exc.returncode}\n Output: {exc.output}") from exc except OSError as exc: raise RuntimeError(f"Command: {command}\nOutput: {exc.strerror}") from exc finally: @@ -129,22 +125,19 @@ class LocalShell(Shell): end_time: datetime, result: Optional[CommandResult], ) -> None: - # TODO: increase logging level if return code is non 0, should be warning at least - logger.info( - f"Command: {command}\n" - f"{'Success:' if result and result.return_code == 0 else 'Error:'}\n" - f"return code: {result.return_code if result else ''} " - f"\nOutput: {result.stdout if result else ''}" - ) + if not result: + logger.warning(f"Command: {command}\n" f"Error: result is None") + return - if result: - elapsed_time = end_time - start_time - command_attachment = ( - f"COMMAND: {command}\n" - f"RETCODE: {result.return_code}\n\n" - f"STDOUT:\n{result.stdout}\n" - f"STDERR:\n{result.stderr}\n" - f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}" - ) - with reporter.step(f"COMMAND: {command}"): - reporter.attach(command_attachment, "Command execution.txt") + status, log_method = ("Success", logger.info) if result.return_code == 0 else ("Error", logger.warning) + log_method(f"Command: {command}\n" f"{status} with retcode {result.return_code}\n" f"Output: \n{result.stdout}") + + elapsed_time = end_time - start_time + command_attachment = ( + f"COMMAND: {command}\n" + f"RETCODE: {result.return_code}\n\n" + f"STDOUT:\n{result.stdout}\n" + f"STDERR:\n{result.stderr}\n" + f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}" + ) + reporter.attach(command_attachment, "Command execution.txt") diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index fa739a8..641b321 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -200,7 +200,6 @@ def list_containers(wallet: WalletInfo, shell: Shell, endpoint: str, timeout: Op """ cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path) result = cli.container.list(rpc_endpoint=endpoint, timeout=timeout) - logger.info(f"Containers: \n{result}") return result.stdout.split() diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index d22f5c1..8e019ea 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -75,7 +75,7 @@ def _attach_allure_log(cmd: str, output: str, return_code: int, start_time: date reporter.attach(command_attachment, "Command execution") -def log_command_execution(url: str, cmd: str, output: Union[str, TypedDict], params: Optional[dict] = None) -> None: +def log_command_execution(url: str, cmd: str, output: Union[str, dict], params: Optional[dict] = None) -> None: logger.info(f"{cmd}: {output}") with suppress(Exception): @@ -90,8 +90,7 @@ def log_command_execution(url: str, cmd: str, output: Union[str, TypedDict], par params = json_params command_attachment = f"COMMAND: '{cmd}'\n" f"URL: {url}\n" f"PARAMS:\n{params}\n" f"OUTPUT:\n{output}\n" - with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'): - reporter.attach(command_attachment, "Command execution") + reporter.attach(command_attachment, "Command execution") def parse_netmap_output(output: str) -> list[NodeNetmapInfo]: From 7a500330de02c6167485de9a48e9c833f812b8ce Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Fri, 26 Jul 2024 16:34:47 +0300 Subject: [PATCH 268/363] [#270] Updates related to testing platform Signed-off-by: a.berezin --- .devenv.hosting.yaml | 108 ++++++++++++++++++ pyproject.toml | 5 +- src/frostfs_testlib/__init__.py | 2 + src/frostfs_testlib/analytics/__init__.py | 2 +- .../analytics/test_exporter.py | 11 +- .../analytics/testrail_exporter.py | 34 ++---- src/frostfs_testlib/fixtures.py | 35 ++++++ src/frostfs_testlib/resources/common.py | 5 + 8 files changed, 168 insertions(+), 34 deletions(-) create mode 100644 .devenv.hosting.yaml create mode 100644 src/frostfs_testlib/fixtures.py diff --git a/.devenv.hosting.yaml b/.devenv.hosting.yaml new file mode 100644 index 0000000..d096625 --- /dev/null +++ b/.devenv.hosting.yaml @@ -0,0 +1,108 @@ +hosts: +- address: localhost + attributes: + sudo_shell: false + plugin_name: docker + healthcheck_plugin_name: basic + attributes: + skip_readiness_check: True + force_transactions: True + services: + - name: frostfs-storage_01 + attributes: + container_name: s01 + config_path: /etc/frostfs/storage/config.yml + wallet_path: ../frostfs-dev-env/services/storage/wallet01.json + local_wallet_config_path: ./TemporaryDir/empty-password.yml + local_wallet_path: ../frostfs-dev-env/services/storage/wallet01.json + wallet_password: "" + volume_name: storage_storage_s01 + endpoint_data0: s01.frostfs.devenv:8080 + control_endpoint: s01.frostfs.devenv:8081 + un_locode: "RU MOW" + - name: frostfs-storage_02 + attributes: + container_name: s02 + config_path: /etc/frostfs/storage/config.yml + wallet_path: ../frostfs-dev-env/services/storage/wallet02.json + local_wallet_config_path: ./TemporaryDir/empty-password.yml + local_wallet_path: ../frostfs-dev-env/services/storage/wallet02.json + wallet_password: "" + volume_name: storage_storage_s02 + endpoint_data0: s02.frostfs.devenv:8080 + control_endpoint: s02.frostfs.devenv:8081 + un_locode: "RU LED" + - name: frostfs-storage_03 + attributes: + container_name: s03 + config_path: /etc/frostfs/storage/config.yml + wallet_path: ../frostfs-dev-env/services/storage/wallet03.json + local_wallet_config_path: ./TemporaryDir/empty-password.yml + local_wallet_path: ../frostfs-dev-env/services/storage/wallet03.json + wallet_password: "" + volume_name: storage_storage_s03 + endpoint_data0: s03.frostfs.devenv:8080 + control_endpoint: s03.frostfs.devenv:8081 + un_locode: "SE STO" + - name: frostfs-storage_04 + attributes: + container_name: s04 + config_path: /etc/frostfs/storage/config.yml + wallet_path: ../frostfs-dev-env/services/storage/wallet04.json + local_wallet_config_path: ./TemporaryDir/empty-password.yml + local_wallet_path: ../frostfs-dev-env/services/storage/wallet04.json + wallet_password: "" + volume_name: storage_storage_s04 + endpoint_data0: s04.frostfs.devenv:8080 + control_endpoint: s04.frostfs.devenv:8081 + un_locode: "FI HEL" + - name: frostfs-s3_01 + attributes: + container_name: s3_gate + config_path: ../frostfs-dev-env/services/s3_gate/.s3.env + wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json + local_wallet_config_path: ./TemporaryDir/password-s3.yml + local_wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json + wallet_password: "s3" + endpoint_data0: https://s3.frostfs.devenv:8080 + - name: frostfs-http_01 + attributes: + container_name: http_gate + config_path: ../frostfs-dev-env/services/http_gate/.http.env + wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json + local_wallet_config_path: ./TemporaryDir/password-other.yml + local_wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json + wallet_password: "one" + endpoint_data0: http://http.frostfs.devenv + - name: frostfs-ir_01 + attributes: + container_name: ir01 + config_path: ../frostfs-dev-env/services/ir/.ir.env + wallet_path: ../frostfs-dev-env/services/ir/az.json + local_wallet_config_path: ./TemporaryDir/password-other.yml + local_wallet_path: ../frostfs-dev-env/services/ir/az.json + wallet_password: "one" + - name: neo-go_01 + attributes: + container_name: morph_chain + config_path: ../frostfs-dev-env/services/morph_chain/protocol.privnet.yml + wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json + local_wallet_config_path: ./TemporaryDir/password-other.yml + local_wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json + wallet_password: "one" + endpoint_internal0: http://morph-chain.frostfs.devenv:30333 + - name: main-chain_01 + attributes: + container_name: main_chain + config_path: ../frostfs-dev-env/services/chain/protocol.privnet.yml + wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json + local_wallet_config_path: ./TemporaryDir/password-other.yml + local_wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json + wallet_password: "one" + endpoint_internal0: http://main-chain.frostfs.devenv:30333 + - name: coredns_01 + attributes: + container_name: coredns + clis: + - name: frostfs-cli + exec_path: frostfs-cli diff --git a/pyproject.toml b/pyproject.toml index 5a38dba..296ce65 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,4 +89,7 @@ push = false filterwarnings = [ "ignore:Blowfish has been deprecated:cryptography.utils.CryptographyDeprecationWarning", ] -testpaths = ["tests"] \ No newline at end of file +testpaths = ["tests"] + +[project.entry-points.pytest11] +testlib = "frostfs_testlib" \ No newline at end of file diff --git a/src/frostfs_testlib/__init__.py b/src/frostfs_testlib/__init__.py index 159d48b..2cdaf4e 100644 --- a/src/frostfs_testlib/__init__.py +++ b/src/frostfs_testlib/__init__.py @@ -1 +1,3 @@ __version__ = "2.0.1" + +from .fixtures import configure_testlib, hosting diff --git a/src/frostfs_testlib/analytics/__init__.py b/src/frostfs_testlib/analytics/__init__.py index 6995a08..b057418 100644 --- a/src/frostfs_testlib/analytics/__init__.py +++ b/src/frostfs_testlib/analytics/__init__.py @@ -1,5 +1,5 @@ from frostfs_testlib.analytics import test_case from frostfs_testlib.analytics.test_case import TestCasePriority from frostfs_testlib.analytics.test_collector import TestCase, TestCaseCollector -from frostfs_testlib.analytics.test_exporter import TestExporter +from frostfs_testlib.analytics.test_exporter import TСExporter from frostfs_testlib.analytics.testrail_exporter import TestrailExporter diff --git a/src/frostfs_testlib/analytics/test_exporter.py b/src/frostfs_testlib/analytics/test_exporter.py index 5a569c6..dd6a7fb 100644 --- a/src/frostfs_testlib/analytics/test_exporter.py +++ b/src/frostfs_testlib/analytics/test_exporter.py @@ -3,7 +3,8 @@ from abc import ABC, abstractmethod from frostfs_testlib.analytics.test_collector import TestCase -class TestExporter(ABC): +# TODO: REMOVE ME +class TСExporter(ABC): test_cases_cache = [] test_suites_cache = [] @@ -46,9 +47,7 @@ class TestExporter(ABC): """ @abstractmethod - def update_test_case( - self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section - ) -> None: + def update_test_case(self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section) -> None: """ Update test case in TMS """ @@ -60,9 +59,7 @@ class TestExporter(ABC): for test_case in test_cases: test_suite = self.get_or_create_test_suite(test_case.suite_name) - test_section = self.get_or_create_suite_section( - test_suite, test_case.suite_section_name - ) + test_section = self.get_or_create_suite_section(test_suite, test_case.suite_section_name) test_case_in_tms = self.search_test_case_id(test_case.id) steps = [{"content": value, "expected": " "} for key, value in test_case.steps.items()] diff --git a/src/frostfs_testlib/analytics/testrail_exporter.py b/src/frostfs_testlib/analytics/testrail_exporter.py index 610fee5..36c482c 100644 --- a/src/frostfs_testlib/analytics/testrail_exporter.py +++ b/src/frostfs_testlib/analytics/testrail_exporter.py @@ -1,10 +1,10 @@ from testrail_api import TestRailAPI from frostfs_testlib.analytics.test_collector import TestCase -from frostfs_testlib.analytics.test_exporter import TestExporter +from frostfs_testlib.analytics.test_exporter import TСExporter -class TestrailExporter(TestExporter): +class TestrailExporter(TСExporter): def __init__( self, tr_url: str, @@ -62,19 +62,13 @@ class TestrailExporter(TestExporter): It's help do not call TMS each time then we search test case """ for test_suite in self.test_suites_cache: - self.test_cases_cache.extend( - self.api.cases.get_cases(self.tr_project_id, suite_id=test_suite["id"]) - ) + self.test_cases_cache.extend(self.api.cases.get_cases(self.tr_project_id, suite_id=test_suite["id"])) def search_test_case_id(self, test_case_id: str) -> object: """ Find test cases in TestRail (cache) by ID """ - test_cases = [ - test_case - for test_case in self.test_cases_cache - if test_case["custom_autotest_name"] == test_case_id - ] + test_cases = [test_case for test_case in self.test_cases_cache if test_case["custom_autotest_name"] == test_case_id] if len(test_cases) > 1: raise RuntimeError(f"Too many results found in test rail for id {test_case_id}") @@ -87,9 +81,7 @@ class TestrailExporter(TestExporter): """ Get suite name with exact name from Testrail or create if not exist """ - test_rail_suites = [ - suite for suite in self.test_suites_cache if suite["name"] == test_suite_name - ] + test_rail_suites = [suite for suite in self.test_suites_cache if suite["name"] == test_suite_name] if not test_rail_suites: test_rail_suite = self.api.suites.add_suite( @@ -102,17 +94,13 @@ class TestrailExporter(TestExporter): elif len(test_rail_suites) == 1: return test_rail_suites.pop() else: - raise RuntimeError( - f"Too many results found in test rail for suite name {test_suite_name}" - ) + raise RuntimeError(f"Too many results found in test rail for suite name {test_suite_name}") def get_or_create_suite_section(self, test_rail_suite, section_name) -> object: """ Get suite section with exact name from Testrail or create new one if not exist """ - test_rail_sections = [ - section for section in test_rail_suite["sections"] if section["name"] == section_name - ] + test_rail_sections = [section for section in test_rail_suite["sections"] if section["name"] == section_name] if not test_rail_sections: test_rail_section = self.api.sections.add_section( @@ -128,9 +116,7 @@ class TestrailExporter(TestExporter): elif len(test_rail_sections) == 1: return test_rail_sections.pop() else: - raise RuntimeError( - f"Too many results found in test rail for section name {section_name}" - ) + raise RuntimeError(f"Too many results found in test rail for section name {section_name}") def prepare_request_body(self, test_case: TestCase, test_suite, test_suite_section) -> dict: """ @@ -164,9 +150,7 @@ class TestrailExporter(TestExporter): self.api.cases.add_case(**request_body) - def update_test_case( - self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section - ) -> None: + def update_test_case(self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section) -> None: """ Update test case in Testrail """ diff --git a/src/frostfs_testlib/fixtures.py b/src/frostfs_testlib/fixtures.py new file mode 100644 index 0000000..8f6873f --- /dev/null +++ b/src/frostfs_testlib/fixtures.py @@ -0,0 +1,35 @@ +import logging +import os +from importlib.metadata import entry_points + +import pytest +import yaml + +from frostfs_testlib import reporter +from frostfs_testlib.hosting.hosting import Hosting +from frostfs_testlib.resources.common import HOSTING_CONFIG_FILE +from frostfs_testlib.storage import get_service_registry + + +@pytest.fixture(scope="session") +def configure_testlib(): + reporter.get_reporter().register_handler(reporter.AllureHandler()) + reporter.get_reporter().register_handler(reporter.StepsLogger()) + logging.getLogger("paramiko").setLevel(logging.INFO) + + # Register Services for cluster + registry = get_service_registry() + services = entry_points(group="frostfs.testlib.services") + for svc in services: + registry.register_service(svc.name, svc.load()) + + +@pytest.fixture(scope="session") +def hosting(configure_testlib) -> Hosting: + with open(HOSTING_CONFIG_FILE, "r") as file: + hosting_config = yaml.full_load(file) + + hosting_instance = Hosting() + hosting_instance.configure(hosting_config) + + return hosting_instance diff --git a/src/frostfs_testlib/resources/common.py b/src/frostfs_testlib/resources/common.py index 7f8d2c4..03fdce9 100644 --- a/src/frostfs_testlib/resources/common.py +++ b/src/frostfs_testlib/resources/common.py @@ -46,3 +46,8 @@ with open(DEFAULT_WALLET_CONFIG, "w") as file: MAX_REQUEST_ATTEMPTS = 5 RETRY_MODE = "standard" CREDENTIALS_CREATE_TIMEOUT = "1m" + + +HOSTING_CONFIG_FILE = os.getenv( + "HOSTING_CONFIG_FILE", os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..", ".devenv.hosting.yaml")) +) From a983e0566e73284f70ad6862eceb7c9ef21e120c Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 26 Jul 2024 19:36:20 +0300 Subject: [PATCH 269/363] [#272] Add --generate-key flag to object operations Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/cli/frostfs_cli/container.py | 10 +++++++++- src/frostfs_testlib/cli/frostfs_cli/object.py | 16 ++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/container.py b/src/frostfs_testlib/cli/frostfs_cli/container.py index 43c3ec6..1ff217f 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/container.py +++ b/src/frostfs_testlib/cli/frostfs_cli/container.py @@ -100,6 +100,7 @@ class FrostfsCliContainer(CliCommand): cid: str, wallet: Optional[str] = None, address: Optional[str] = None, + generate_key: Optional[bool] = None, await_mode: bool = False, to: Optional[str] = None, json_mode: bool = False, @@ -121,6 +122,7 @@ class FrostfsCliContainer(CliCommand): wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. timeout: Timeout for the operation (default 15s). + generate_key: Generate a new private key. Returns: Command's result. @@ -136,6 +138,7 @@ class FrostfsCliContainer(CliCommand): cid: str, wallet: Optional[str] = None, address: Optional[str] = None, + generate_key: Optional[bool] = None, await_mode: bool = False, to: Optional[str] = None, session: Optional[str] = None, @@ -157,6 +160,7 @@ class FrostfsCliContainer(CliCommand): wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. timeout: Timeout for the operation (default 15s). + generate_key: Generate a new private key. Returns: Command's result. @@ -172,6 +176,7 @@ class FrostfsCliContainer(CliCommand): rpc_endpoint: str, wallet: Optional[str] = None, address: Optional[str] = None, + generate_key: Optional[bool] = None, owner: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, @@ -189,6 +194,7 @@ class FrostfsCliContainer(CliCommand): wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. timeout: Timeout for the operation (default 15s). + generate_key: Generate a new private key. Returns: Command's result. @@ -204,6 +210,7 @@ class FrostfsCliContainer(CliCommand): cid: str, wallet: Optional[str] = None, address: Optional[str] = None, + generate_key: Optional[bool] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, timeout: Optional[str] = None, @@ -219,6 +226,7 @@ class FrostfsCliContainer(CliCommand): wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. timeout: Timeout for the operation (default 15s). + generate_key: Generate a new private key. Returns: Command's result. @@ -291,7 +299,7 @@ class FrostfsCliContainer(CliCommand): timeout: duration Timeout for the operation (default 15 s) short: shorten the output of node information. xhdr: Dict with request X-Headers. - generate_key: Generate a new private key + generate_key: Generate a new private key. Returns: diff --git a/src/frostfs_testlib/cli/frostfs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py index 55c92be..070def0 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/object.py +++ b/src/frostfs_testlib/cli/frostfs_cli/object.py @@ -13,6 +13,7 @@ class FrostfsCliObject(CliCommand): wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, session: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, @@ -25,6 +26,7 @@ class FrostfsCliObject(CliCommand): address: Address of wallet account. bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. + generate_key: Generate new private key. oid: Object ID. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). session: Filepath to a JSON- or binary-encoded token of the object DELETE session. @@ -49,6 +51,7 @@ class FrostfsCliObject(CliCommand): wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, file: Optional[str] = None, header: Optional[str] = None, no_progress: bool = False, @@ -66,6 +69,7 @@ class FrostfsCliObject(CliCommand): bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. file: File to write object payload to. Default: stdout. + generate_key: Generate new private key. header: File to write header to. Default: stdout. no_progress: Do not show progress bar. oid: Object ID. @@ -93,6 +97,7 @@ class FrostfsCliObject(CliCommand): wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, range: Optional[str] = None, salt: Optional[str] = None, ttl: Optional[int] = None, @@ -108,6 +113,7 @@ class FrostfsCliObject(CliCommand): address: Address of wallet account. bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. + generate_key: Generate new private key. oid: Object ID. range: Range to take hash from in the form offset1:length1,... rpc_endpoint: Remote node address (as 'multiaddr' or ':'). @@ -135,6 +141,7 @@ class FrostfsCliObject(CliCommand): wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, file: Optional[str] = None, json_mode: bool = False, main_only: bool = False, @@ -153,6 +160,7 @@ class FrostfsCliObject(CliCommand): bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. file: File to write object payload to. Default: stdout. + generate_key: Generate new private key. json_mode: Marshal output in JSON. main_only: Return only main fields. oid: Object ID. @@ -183,6 +191,7 @@ class FrostfsCliObject(CliCommand): expire_at: Optional[int] = None, address: Optional[str] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, session: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, @@ -195,6 +204,7 @@ class FrostfsCliObject(CliCommand): address: Address of wallet account. bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. + generate_key: Generate new private key. oid: Object ID. lifetime: Lock lifetime. expire_at: Lock expiration epoch. @@ -222,6 +232,7 @@ class FrostfsCliObject(CliCommand): address: Optional[str] = None, attributes: Optional[dict] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, copies_number: Optional[int] = None, disable_filename: bool = False, disable_timestamp: bool = False, @@ -246,6 +257,7 @@ class FrostfsCliObject(CliCommand): disable_timestamp: Do not set well-known timestamp attribute. expire_at: Last epoch in the life of the object. file: File with object payload. + generate_key: Generate new private key. no_progress: Do not show progress bar. notify: Object notification in the form of *epoch*:*topic*; '-' topic means using default. @@ -273,6 +285,7 @@ class FrostfsCliObject(CliCommand): wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, file: Optional[str] = None, json_mode: bool = False, raw: bool = False, @@ -289,6 +302,7 @@ class FrostfsCliObject(CliCommand): bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. file: File to write object payload to. Default: stdout. + generate_key: Generate new private key. json_mode: Marshal output in JSON. oid: Object ID. range: Range to take data from in the form offset:length. @@ -315,6 +329,7 @@ class FrostfsCliObject(CliCommand): wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, + generate_key: Optional[bool] = None, filters: Optional[list] = None, oid: Optional[str] = None, phy: bool = False, @@ -332,6 +347,7 @@ class FrostfsCliObject(CliCommand): bearer: File with signed JSON or binary encoded bearer token. cid: Container ID. filters: Repeated filter expressions or files with protobuf JSON. + generate_key: Generate new private key. oid: Object ID. phy: Search physically stored objects. root: Search for user objects. From 6b036a09b757782bbcde8ae087513075ca03e094 Mon Sep 17 00:00:00 2001 From: "s.makhov" Date: Wed, 31 Jul 2024 19:53:28 +0300 Subject: [PATCH 270/363] [#275] Add 'retry' and 'PRESET_CONTAINER_CREATION_RETRY_COUNT' variables to define max num of container creation retries --- src/frostfs_testlib/load/load_config.py | 2 ++ src/frostfs_testlib/resources/load_params.py | 1 + 2 files changed, 3 insertions(+) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 767e9f2..15103e0 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -190,6 +190,8 @@ class Preset(MetaConfig): containers_count: Optional[int] = metadata_field(grpc_preset_scenarios, "containers", None, False) # Container placement policy for containers for gRPC container_placement_policy: Optional[list[str]] = metadata_field(grpc_preset_scenarios, "policy", None, False, formatter=force_list) + # Number of retries for creation of container + container_creation_retry: Optional[int] = metadata_field(grpc_preset_scenarios, "retry", None, False) # ------ S3 ------ # Amount of buckets which should be created diff --git a/src/frostfs_testlib/resources/load_params.py b/src/frostfs_testlib/resources/load_params.py index 97193cc..ad3ed1c 100644 --- a/src/frostfs_testlib/resources/load_params.py +++ b/src/frostfs_testlib/resources/load_params.py @@ -26,6 +26,7 @@ BACKGROUND_LOAD_CONTAINER_PLACEMENT_POLICY = os.getenv( ) BACKGROUND_LOAD_S3_LOCATION = os.getenv("BACKGROUND_LOAD_S3_LOCATION", "node-off") PRESET_CONTAINERS_COUNT = os.getenv("CONTAINERS_COUNT", "40") +PRESET_CONTAINER_CREATION_RETRY_COUNT = os.getenv("CONTAINER_CREATION_RETRY_COUNT", "20") # TODO: At lease one object is required due to bug in xk6 (buckets with no objects produce millions exceptions in read) PRESET_OBJECTS_COUNT = os.getenv("OBJ_COUNT", "1") K6_DIRECTORY = os.getenv("K6_DIRECTORY", "/etc/k6") From 8306a9f3ff4c231b145e6d2a2d264ce7cfe4973a Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Thu, 1 Aug 2024 16:32:41 +0300 Subject: [PATCH 271/363] [#276] Context manager for parralel func Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/testing/parallel.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/frostfs_testlib/testing/parallel.py b/src/frostfs_testlib/testing/parallel.py index 9c36118..0549e61 100644 --- a/src/frostfs_testlib/testing/parallel.py +++ b/src/frostfs_testlib/testing/parallel.py @@ -1,10 +1,22 @@ import itertools from concurrent.futures import Future, ThreadPoolExecutor +from contextlib import contextmanager from typing import Callable, Collection, Optional, Union MAX_WORKERS = 50 +@contextmanager +def parallel_workers_limit(workers_count: int): + global MAX_WORKERS + original_value = MAX_WORKERS + MAX_WORKERS = workers_count + try: + yield + finally: + MAX_WORKERS = original_value + + def parallel( fn: Union[Callable, list[Callable]], parallel_items: Optional[Collection] = None, From ea60c2104a0941a2f19549f6f412b97fb11e6002 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 5 Aug 2024 09:18:05 +0300 Subject: [PATCH 272/363] [#277] MInor change for shard Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/cli/frostfs_cli/shards.py | 98 +++++++++++++++++++ .../storage/controllers/shards_watcher.py | 30 +++--- 2 files changed, 113 insertions(+), 15 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/shards.py b/src/frostfs_testlib/cli/frostfs_cli/shards.py index 4399b13..e88707a 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/shards.py +++ b/src/frostfs_testlib/cli/frostfs_cli/shards.py @@ -143,3 +143,101 @@ class FrostfsCliShards(CliCommand): **{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]}, ) + def evacuation_start( + self, + endpoint: str, + id: Optional[str] = None, + scope: Optional[str] = None, + all: bool = False, + no_errors: bool = True, + await_mode: bool = False, + address: Optional[str] = None, + timeout: Optional[str] = None, + no_progress: bool = False, + ) -> CommandResult: + """ + Objects evacuation from shard to other shards. + + Args: + address: Address of wallet account + all: Process all shards + await: Block execution until evacuation is completed + endpoint: Remote node control address (as 'multiaddr' or ':') + id: List of shard IDs in base58 encoding + no_errors: Skip invalid/unreadable objects (default true) + no_progress: Print progress if await provided + scope: Evacuation scope; possible values: trees, objects, all (default "all") + timeout: Timeout for an operation (default 15s) + + Returns: + Command's result. + """ + return self._execute( + "control shards evacuation start", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def evacuation_reset( + self, + endpoint: str, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """ + Reset evacuate objects from shard to other shards status. + + Args: + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + timeout: Timeout for an operation (default 15s) + Returns: + Command's result. + """ + return self._execute( + "control shards evacuation reset", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def evacuation_stop( + self, + endpoint: str, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """ + Stop running evacuate process from shard to other shards. + + Args: + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + timeout: Timeout for an operation (default 15s) + + Returns: + Command's result. + """ + return self._execute( + "control shards evacuation stop", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def evacuation_status( + self, + endpoint: str, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """ + Get evacuate objects from shard to other shards status. + + Args: + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + timeout: Timeout for an operation (default 15s) + + Returns: + Command's result. + """ + return self._execute( + "control shards evacuation status", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) diff --git a/src/frostfs_testlib/storage/controllers/shards_watcher.py b/src/frostfs_testlib/storage/controllers/shards_watcher.py index 3d313f1..5017406 100644 --- a/src/frostfs_testlib/storage/controllers/shards_watcher.py +++ b/src/frostfs_testlib/storage/controllers/shards_watcher.py @@ -2,22 +2,22 @@ import json from typing import Any from frostfs_testlib.cli.frostfs_cli.shards import FrostfsCliShards +from frostfs_testlib.shell.interfaces import CommandResult from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.testing.test_control import wait_for_success class ShardsWatcher: - shards_snapshots: list[dict[str, Any]] = [] - def __init__(self, node_under_test: ClusterNode) -> None: + self.shards_snapshots: list[dict[str, Any]] = [] self.storage_node = node_under_test.storage_node self.take_shards_snapshot() - def take_shards_snapshot(self): + def take_shards_snapshot(self) -> None: snapshot = self.get_shards_snapshot() self.shards_snapshots.append(snapshot) - def get_shards_snapshot(self): + def get_shards_snapshot(self) -> dict[str, Any]: shards_snapshot: dict[str, Any] = {} shards = self.get_shards() @@ -26,17 +26,17 @@ class ShardsWatcher: return shards_snapshot - def _get_current_snapshot(self): + def _get_current_snapshot(self) -> dict[str, Any]: return self.shards_snapshots[-1] - def _get_previous_snapshot(self): + def _get_previous_snapshot(self) -> dict[str, Any]: return self.shards_snapshots[-2] - def _is_shard_present(self, shard_id): + def _is_shard_present(self, shard_id) -> bool: snapshot = self._get_current_snapshot() return shard_id in snapshot - def get_shards_with_new_errors(self): + def get_shards_with_new_errors(self) -> dict[str, Any]: current_snapshot = self._get_current_snapshot() previous_snapshot = self._get_previous_snapshot() shards_with_new_errors: dict[str, Any] = {} @@ -46,7 +46,7 @@ class ShardsWatcher: return shards_with_new_errors - def get_shards_with_errors(self): + def get_shards_with_errors(self) -> dict[str, Any]: snapshot = self.get_shards_snapshot() shards_with_errors: dict[str, Any] = {} for shard_id, shard in snapshot.items(): @@ -55,7 +55,7 @@ class ShardsWatcher: return shards_with_errors - def get_shard_status(self, shard_id: str): + def get_shard_status(self, shard_id: str): # -> Any: snapshot = self.get_shards_snapshot() assert shard_id in snapshot, f"Shard {shard_id} is missing: {snapshot}" @@ -63,18 +63,18 @@ class ShardsWatcher: return snapshot[shard_id]["mode"] @wait_for_success(60, 2) - def await_for_all_shards_status(self, status: str): + def await_for_all_shards_status(self, status: str) -> None: snapshot = self.get_shards_snapshot() for shard_id in snapshot: assert snapshot[shard_id]["mode"] == status, f"Shard {shard_id} have wrong shard status" @wait_for_success(60, 2) - def await_for_shard_status(self, shard_id: str, status: str): + def await_for_shard_status(self, shard_id: str, status: str) -> None: assert self.get_shard_status(shard_id) == status @wait_for_success(60, 2) - def await_for_shard_have_new_errors(self, shard_id: str): + def await_for_shard_have_new_errors(self, shard_id: str) -> None: self.take_shards_snapshot() assert self._is_shard_present(shard_id) shards_with_new_errors = self.get_shards_with_new_errors() @@ -82,7 +82,7 @@ class ShardsWatcher: assert shard_id in shards_with_new_errors, f"Expected shard {shard_id} to have new errors, but haven't {self.shards_snapshots[-1]}" @wait_for_success(300, 5) - def await_for_shards_have_no_new_errors(self): + def await_for_shards_have_no_new_errors(self) -> None: self.take_shards_snapshot() shards_with_new_errors = self.get_shards_with_new_errors() assert len(shards_with_new_errors) == 0 @@ -102,7 +102,7 @@ class ShardsWatcher: return json.loads(response.stdout.split(">", 1)[1]) - def set_shard_mode(self, shard_id: str, mode: str, clear_errors: bool = True): + def set_shard_mode(self, shard_id: str, mode: str, clear_errors: bool = True) -> CommandResult: shards_cli = FrostfsCliShards( self.storage_node.host.get_shell(), self.storage_node.host.get_cli_config("frostfs-cli").exec_path, From 54b42e2d8d1c3815d1379918f54d432381a685b2 Mon Sep 17 00:00:00 2001 From: Roman Chernykh Date: Wed, 31 Jul 2024 18:32:08 +0500 Subject: [PATCH 273/363] [#274] Fix iam_attach_group_policy function --- src/frostfs_testlib/s3/aws_cli_client.py | 2 +- src/frostfs_testlib/s3/interfaces.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index ae9254c..8169afe 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -975,7 +975,7 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) assert response.get("Policy"), f"Expected Policy in response:\n{response}" - assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}" + assert response["Policy"].get("Arn") == policy_arn, f"PolicyArn should be equal to {policy_arn}" return response diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index f3793e0..b1825d5 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -408,7 +408,7 @@ class S3ClientWrapper(HumanReadableABC): """Adds the specified user to the specified group""" @abstractmethod - def iam_attach_group_policy(self, group: str, policy_arn: str) -> dict: + def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: """Attaches the specified managed policy to the specified IAM group""" @abstractmethod From ae9e8d8c30217485d5442525d32c432aa3f183ce Mon Sep 17 00:00:00 2001 From: Roman Chernykh Date: Fri, 2 Aug 2024 17:38:56 +0500 Subject: [PATCH 274/363] [#274] Fix iam_get_policy function --- src/frostfs_testlib/s3/boto3_client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index 5686b78..a644a6f 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -776,7 +776,7 @@ class Boto3ClientWrapper(S3ClientWrapper): def iam_get_policy(self, policy_arn: str) -> dict: response = self.boto3_iam_client.get_policy(PolicyArn=policy_arn) assert response.get("Policy"), f"Expected Policy in response:\n{response}" - assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}" + assert response["Policy"].get("Arn") == policy_arn, f"PolicyArn should be equal to {policy_arn}" return response From 5bdacdf5ba30d4e60fef82a1c113a1f467c3c99a Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Thu, 25 Jul 2024 15:32:07 +0300 Subject: [PATCH 275/363] [#269] Fix get contracts method --- .../storage/controllers/cluster_state_controller.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index cec5ed3..5d87a60 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -538,4 +538,4 @@ class ClusterStateController: shell=cluster_node.host.get_shell(), frostfs_adm_exec_path=FROSTFS_ADM_EXEC, ) - return frostfs_adm.morph.dump_hashes(cluster_node.morph_chain.get_endpoint(), domain_name).stdout + return frostfs_adm.morph.dump_hashes(cluster_node.morph_chain.get_http_endpoint(), domain_name).stdout From 8a8b35846e9bca105233a9cad30d39d8e98dd312 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Wed, 7 Aug 2024 17:35:02 +0300 Subject: [PATCH 276/363] [#278] Small QoL updates Signed-off-by: a.berezin --- src/frostfs_testlib/resources/common.py | 2 ++ src/frostfs_testlib/shell/local_shell.py | 5 ++++- src/frostfs_testlib/steps/cli/object.py | 7 +------ src/frostfs_testlib/storage/dataclasses/ape.py | 5 +++++ 4 files changed, 12 insertions(+), 7 deletions(-) diff --git a/src/frostfs_testlib/resources/common.py b/src/frostfs_testlib/resources/common.py index 03fdce9..1c93b12 100644 --- a/src/frostfs_testlib/resources/common.py +++ b/src/frostfs_testlib/resources/common.py @@ -51,3 +51,5 @@ CREDENTIALS_CREATE_TIMEOUT = "1m" HOSTING_CONFIG_FILE = os.getenv( "HOSTING_CONFIG_FILE", os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..", ".devenv.hosting.yaml")) ) + +MORE_LOG = os.getenv("MORE_LOG", "1") diff --git a/src/frostfs_testlib/shell/local_shell.py b/src/frostfs_testlib/shell/local_shell.py index 2fb6631..746070f 100644 --- a/src/frostfs_testlib/shell/local_shell.py +++ b/src/frostfs_testlib/shell/local_shell.py @@ -1,15 +1,18 @@ import logging import subprocess import tempfile +from contextlib import nullcontext from datetime import datetime from typing import IO, Optional import pexpect from frostfs_testlib import reporter +from frostfs_testlib.resources.common import MORE_LOG from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell logger = logging.getLogger("frostfs.testlib.shell") +step_context = reporter.step if MORE_LOG == "1" else nullcontext class LocalShell(Shell): @@ -28,7 +31,7 @@ class LocalShell(Shell): for inspector in [*self.command_inspectors, *extra_inspectors]: command = inspector.inspect(original_command, command) - with reporter.step(f"Executing command: {command}"): + with step_context(f"Executing command: {command}"): if options.interactive_inputs: return self._exec_interactive(command, options) return self._exec_non_interactive(command, options) diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index 7de7a71..72debc2 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -15,7 +15,7 @@ from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing import wait_for_success from frostfs_testlib.utils import json_utils -from frostfs_testlib.utils.cli_utils import parse_cmd_table, parse_netmap_output +from frostfs_testlib.utils.cli_utils import parse_netmap_output from frostfs_testlib.utils.file_utils import TestFile logger = logging.getLogger("NeoLogger") @@ -623,25 +623,20 @@ def head_object( # If response is Complex Object header, it has `splitId` key if "splitId" in decoded.keys(): - logger.info("decoding split header") return json_utils.decode_split_header(decoded) # If response is Last or Linking Object header, # it has `header` dictionary and non-null `split` dictionary if "split" in decoded["header"].keys(): if decoded["header"]["split"]: - logger.info("decoding linking object") return json_utils.decode_linking_object(decoded) if decoded["header"]["objectType"] == "STORAGE_GROUP": - logger.info("decoding storage group") return json_utils.decode_storage_group(decoded) if decoded["header"]["objectType"] == "TOMBSTONE": - logger.info("decoding tombstone") return json_utils.decode_tombstone(decoded) - logger.info("decoding simple header") return json_utils.decode_simple_header(decoded) diff --git a/src/frostfs_testlib/storage/dataclasses/ape.py b/src/frostfs_testlib/storage/dataclasses/ape.py index 84b3033..de1648e 100644 --- a/src/frostfs_testlib/storage/dataclasses/ape.py +++ b/src/frostfs_testlib/storage/dataclasses/ape.py @@ -47,6 +47,7 @@ class ConditionType(HumanReadableEnum): class ConditionKey(HumanReadableEnum): ROLE = '"\\$Actor:role"' PUBLIC_KEY = '"\\$Actor:publicKey"' + OBJECT_TYPE = '"\\$Object:objectType"' class MatchType(HumanReadableEnum): @@ -75,6 +76,10 @@ class Condition: def by_key(*args, **kwargs) -> "Condition": return Condition(ConditionKey.PUBLIC_KEY, *args, **kwargs) + @staticmethod + def by_object_type(*args, **kwargs) -> "Condition": + return Condition(ConditionKey.OBJECT_TYPE, *args, **kwargs) + class Rule: def __init__( From 0ba4a73db336b47f8023036999aab4b24096458d Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Thu, 8 Aug 2024 18:34:46 +0300 Subject: [PATCH 277/363] [#279] Add objectID filter for APE Signed-off-by: a.berezin --- src/frostfs_testlib/storage/dataclasses/ape.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/frostfs_testlib/storage/dataclasses/ape.py b/src/frostfs_testlib/storage/dataclasses/ape.py index de1648e..b6563f4 100644 --- a/src/frostfs_testlib/storage/dataclasses/ape.py +++ b/src/frostfs_testlib/storage/dataclasses/ape.py @@ -48,6 +48,7 @@ class ConditionKey(HumanReadableEnum): ROLE = '"\\$Actor:role"' PUBLIC_KEY = '"\\$Actor:publicKey"' OBJECT_TYPE = '"\\$Object:objectType"' + OBJECT_ID = '"\\$Object:objectID"' class MatchType(HumanReadableEnum): @@ -80,6 +81,10 @@ class Condition: def by_object_type(*args, **kwargs) -> "Condition": return Condition(ConditionKey.OBJECT_TYPE, *args, **kwargs) + @staticmethod + def by_object_id(*args, **kwargs) -> "Condition": + return Condition(ConditionKey.OBJECT_ID, *args, **kwargs) + class Rule: def __init__( From 1c2ed2592912834073d3ab9c7d139abc2b04e346 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 13 Aug 2024 10:09:28 +0300 Subject: [PATCH 278/363] [#280] Fix neo-go query height in steps Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/steps/cli/object.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index 72debc2..f28de06 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -690,11 +690,13 @@ def neo_go_query_height(shell: Shell, endpoint: str) -> dict: latest_block = first_line.split(":") # taking second line from command's output contain wallet key second_line = output.split("\n")[1] - validated_state = second_line.split(":") - return { - latest_block[0].replace(":", ""): int(latest_block[1]), - validated_state[0].replace(":", ""): int(validated_state[1]), - } + if second_line != "": + validated_state = second_line.split(":") + return { + latest_block[0].replace(":", ""): int(latest_block[1]), + validated_state[0].replace(":", ""): int(validated_state[1]), + } + return {latest_block[0].replace(":", ""): int(latest_block[1])} @wait_for_success() From 6926c09dbe4574f5b54b140163b08a1fed24376b Mon Sep 17 00:00:00 2001 From: "m.malygina" Date: Wed, 7 Aug 2024 16:19:00 +0300 Subject: [PATCH 279/363] [#281] add hostname to HostConfig Signed-off-by: m.malygina --- .devenv.hosting.yaml | 1 + src/frostfs_testlib/hosting/config.py | 1 + 2 files changed, 2 insertions(+) diff --git a/.devenv.hosting.yaml b/.devenv.hosting.yaml index d096625..f3b8c51 100644 --- a/.devenv.hosting.yaml +++ b/.devenv.hosting.yaml @@ -1,5 +1,6 @@ hosts: - address: localhost + hostname: localhost attributes: sudo_shell: false plugin_name: docker diff --git a/src/frostfs_testlib/hosting/config.py b/src/frostfs_testlib/hosting/config.py index f52f8b7..6cdee39 100644 --- a/src/frostfs_testlib/hosting/config.py +++ b/src/frostfs_testlib/hosting/config.py @@ -60,6 +60,7 @@ class HostConfig: """ plugin_name: str + hostname: str healthcheck_plugin_name: str address: str s3_creds_plugin_name: str = field(default="authmate") From 8ae1b99db9777944d59507b36d2e76acd52942ba Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Fri, 16 Aug 2024 10:22:21 +0300 Subject: [PATCH 280/363] [#282] New grpc realization for object operations Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/storage/constants.py | 7 + .../dataclasses/storage_object_info.py | 13 + .../storage/grpc_operations/__init__.py | 0 .../grpc_operations/client_wrappers.py | 14 + .../implementations/__init__.py | 0 .../grpc_operations/implementations/chunks.py | 124 ++++ .../implementations/container.py | 112 ++++ .../grpc_operations/implementations/object.py | 616 ++++++++++++++++++ .../storage/grpc_operations/interfaces.py | 285 ++++++++ 9 files changed, 1171 insertions(+) create mode 100644 src/frostfs_testlib/storage/grpc_operations/__init__.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/client_wrappers.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/implementations/container.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/implementations/object.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/interfaces.py diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 66bf5cc..84f8d24 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -16,3 +16,10 @@ class ConfigAttributes: ENDPOINT_PROMETHEUS = "endpoint_prometheus" CONTROL_ENDPOINT = "control_endpoint" UN_LOCODE = "un_locode" + + +class PlacementRule: + DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X" + SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X" + REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X" + DEFAULT_EC_PLACEMENT_RULE = "EC 3.1" diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index 1ecb300..d192de5 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -77,3 +77,16 @@ class NodeNetInfo: maintenance_mode_allowed: str = None eigen_trust_alpha: str = None eigen_trust_iterations: str = None + + +@dataclass +class Chunk: + def __init__(self, object_id: str, required_nodes: list, confirmed_nodes: list, ec_parent_object_id: str, ec_index: int) -> None: + self.object_id = object_id + self.required_nodes = required_nodes + self.confirmed_nodes = confirmed_nodes + self.ec_parent_object_id = ec_parent_object_id + self.ec_index = ec_index + + def __str__(self) -> str: + return self.object_id diff --git a/src/frostfs_testlib/storage/grpc_operations/__init__.py b/src/frostfs_testlib/storage/grpc_operations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py b/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py new file mode 100644 index 0000000..8cef23b --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py @@ -0,0 +1,14 @@ +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.storage.grpc_operations import interfaces +from frostfs_testlib.storage.grpc_operations.implementations import container, object + + +class CliClientWrapper(interfaces.GrpcClientWrapper): + def __init__(self, cli: FrostfsCli) -> None: + self.cli = cli + self.object: interfaces.ObjectInterface = object.ObjectOperations(self.cli) + self.container: interfaces.ContainerInterface = container.ContainerOperations(self.cli) + + +class RpcClientWrapper(interfaces.GrpcClientWrapper): + pass # The next series diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py b/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py new file mode 100644 index 0000000..70d0823 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py @@ -0,0 +1,124 @@ +import json +from typing import Optional + +from frostfs_testlib import reporter +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.controllers.shards_watcher import ShardsWatcher +from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo +from frostfs_testlib.storage.grps_operations import interfaces +from frostfs_testlib.utils.cli_utils import parse_netmap_output + + +class ChunksOperations(interfaces.ChunksInterface): + def __init__(self, cli: FrostfsCli) -> None: + self.cli = cli + + @reporter.step("Search node without chunks") + def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]: + if not endpoint: + endpoint = cluster.default_rpc_endpoint + netmap = parse_netmap_output(self.cli.netmap.snapshot(endpoint, timeout=CLI_DEFAULT_TIMEOUT).stdout) + chunks_node_key = [] + for chunk in chunks: + chunks_node_key.extend(chunk.confirmed_nodes) + for node_info in netmap.copy(): + if node_info.node_id in chunks_node_key and node_info in netmap: + netmap.remove(node_info) + result = [] + for node_info in netmap: + for cluster_node in cluster.cluster_nodes: + if node_info.node == cluster_node.host_ip: + result.append(cluster_node) + return result + + @reporter.step("Search node with chunk {chunk}") + def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]: + netmap = parse_netmap_output(self.cli.netmap.snapshot(cluster.default_rpc_endpoint, timeout=CLI_DEFAULT_TIMEOUT).stdout) + for node_info in netmap: + if node_info.node_id in chunk.confirmed_nodes: + for cluster_node in cluster.cluster_nodes: + if cluster_node.host_ip == node_info.node: + return (cluster_node, node_info) + + @reporter.step("Search shard with chunk {chunk}") + def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str: + oid_path = f"{chunk.object_id[0]}/{chunk.object_id[1]}/{chunk.object_id[2]}/{chunk.object_id[3]}" + node_shell = node.storage_node.host.get_shell() + shards_watcher = ShardsWatcher(node) + + with reporter.step("Search object file"): + for shard_id, shard_info in shards_watcher.shards_snapshots[-1].items(): + check_dir = node_shell.exec(f" [ -d {shard_info['blobstor'][1]['path']}/{oid_path} ] && echo 1 || echo 0").stdout + if "1" in check_dir.strip(): + return shard_id + + @reporter.step("Get all chunks") + def get_all( + self, + rpc_endpoint: str, + cid: str, + oid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> list[Chunk]: + object_nodes = self.cli.object.nodes(**{param: value for param, value in locals().items() if param not in ["self"]}) + return self._parse_object_nodes(object_nodes.stdout) + + @reporter.step("Get last parity chunk") + def get_parity( + self, + rpc_endpoint: str, + cid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + oid: Optional[str] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> Chunk: + object_nodes = self.cli.object.nodes(**{param: value for param, value in locals().items() if param not in ["self"]}) + return self._parse_object_nodes(object_nodes.stdout)[-1] + + @reporter.step("Get first data chunk") + def get_first_data( + self, + rpc_endpoint: str, + cid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + oid: Optional[str] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> Chunk: + object_nodes = self.cli.object.nodes(**{param: value for param, value in locals().items() if param not in ["self"]}) + return self._parse_object_nodes(object_nodes.stdout)[0] + + def _parse_object_nodes(self, object_nodes: str) -> list[Chunk]: + parse_result = json.loads(object_nodes) + if parse_result.get("errors"): + raise parse_result["errors"] + return [Chunk(**chunk) for chunk in parse_result["data_objects"]] diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py new file mode 100644 index 0000000..077bdfd --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py @@ -0,0 +1,112 @@ +import logging +from typing import Optional + +from frostfs_testlib import reporter +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT +from frostfs_testlib.storage.constants import PlacementRule +from frostfs_testlib.storage.grps_operations import interfaces + +logger = logging.getLogger("NeoLogger") + + +class ContainerOperations(interfaces.ContainerInterface): + def __init__(self, cli: FrostfsCli) -> None: + self.cli = cli + + @reporter.step("Create Container") + def create( + self, + endpoint: str, + rule: str = PlacementRule.DEFAULT_PLACEMENT_RULE, + basic_acl: str = "", + attributes: Optional[dict] = None, + session_token: str = "", + name: Optional[str] = None, + options: Optional[dict] = None, + await_mode: bool = True, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + """ + A wrapper for `frostfs-cli container create` call. + + Args: + wallet (WalletInfo): a wallet on whose behalf a container is created + rule (optional, str): placement rule for container + basic_acl (optional, str): an ACL for container, will be + appended to `--basic-acl` key + attributes (optional, dict): container attributes , will be + appended to `--attributes` key + session_token (optional, str): a path to session token file + session_wallet(optional, str): a path to the wallet which signed + the session token; this parameter makes sense + when paired with `session_token` + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + options (optional, dict): any other options to pass to the call + name (optional, str): container name attribute + await_mode (bool): block execution until container is persisted + wait_for_creation (): Wait for container shows in container list + timeout: Timeout for the operation. + + Returns: + (str): CID of the created container + """ + result = self.cli.container.create( + rpc_endpoint=endpoint, + policy=rule, + basic_acl=basic_acl, + attributes=attributes, + name=name, + session=session_token, + await_mode=await_mode, + timeout=timeout, + **options or {}, + ) + + cid = self._parse_cid(result.stdout) + + logger.info("Container created; waiting until it is persisted in the sidechain") + + return cid + + @reporter.step("List Containers") + def list(self, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT) -> list[str]: + """ + A wrapper for `frostfs-cli container list` call. It returns all the + available containers for the given wallet. + Args: + wallet (WalletInfo): a wallet on whose behalf we list the containers + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + timeout: Timeout for the operation. + Returns: + (list): list of containers + """ + result = self.cli.container.list(rpc_endpoint=endpoint, timeout=timeout) + return result.stdout.split() + + def _parse_cid(self, output: str) -> str: + """ + Parses container ID from a given CLI output. The input string we expect: + container ID: 2tz86kVTDpJxWHrhw3h6PbKMwkLtBEwoqhHQCKTre1FN + awaiting... + container has been persisted on sidechain + We want to take 'container ID' value from the string. + + Args: + output (str): CLI output to parse + + Returns: + (str): extracted CID + """ + try: + # taking first line from command's output + first_line = output.split("\n")[0] + except Exception: + first_line = "" + logger.error(f"Got empty output: {output}") + splitted = first_line.split(": ") + if len(splitted) != 2: + raise ValueError(f"no CID was parsed from command output: \t{first_line}") + return splitted[1] diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py new file mode 100644 index 0000000..a967853 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py @@ -0,0 +1,616 @@ +import json +import logging +import os +import re +import uuid +from typing import Any, Optional + +from frostfs_testlib import reporter, utils +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT +from frostfs_testlib.resources.common import ASSETS_DIR +from frostfs_testlib.shell.interfaces import CommandResult +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.grps_operations import interfaces +from frostfs_testlib.storage.grps_operations.implementations.chunks import ChunksOperations +from frostfs_testlib.testing.test_control import wait_for_success +from frostfs_testlib.utils import cli_utils, file_utils + +logger = logging.getLogger("NeoLogger") + + +class ObjectOperations(interfaces.ObjectInterface): + def __init__(self, cli: FrostfsCli) -> None: + self.cli = cli + self.chunks: interfaces.ChunksInterface = ChunksOperations(self.cli) + + @reporter.step("Delete object") + def delete( + self, + cid: str, + oid: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + """ + DELETE an Object. + + Args: + cid: ID of Container where we get the Object from + oid: ID of Object we are going to delete + bearer: path to Bearer Token file, appends to `--bearer` key + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str): Tombstone ID + """ + result = self.cli.object.delete( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + bearer=bearer, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + id_str = result.stdout.split("\n")[1] + tombstone = id_str.split(":")[1] + return tombstone.strip() + + @reporter.step("Get object") + def get( + self, + cid: str, + oid: str, + endpoint: str, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> file_utils.TestFile: + """ + GET from FrostFS. + + Args: + cid (str): ID of Container where we get the Object from + oid (str): Object ID + bearer: path to Bearer Token file, appends to `--bearer` key + write_object: path to downloaded file, appends to `--file` key + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + no_progress(optional, bool): do not show progress bar + xhdr (optional, dict): Request X-Headers in form of Key=Value + session (optional, dict): path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str): path to downloaded file + """ + if not write_object: + write_object = str(uuid.uuid4()) + test_file = file_utils.TestFile(os.path.join(ASSETS_DIR, write_object)) + + self.cli.object.get( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + file=test_file, + bearer=bearer, + no_progress=no_progress, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + return test_file + + @reporter.step("Get object from random node") + def get_from_random_node( + self, + cid: str, + oid: str, + cluster: Cluster, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + """ + GET from FrostFS random storage node + + Args: + cid: ID of Container where we get the Object from + oid: Object ID + cluster: cluster object + bearer (optional, str): path to Bearer Token file, appends to `--bearer` key + write_object (optional, str): path to downloaded file, appends to `--file` key + no_progress(optional, bool): do not show progress bar + xhdr (optional, dict): Request X-Headers in form of Key=Value + session (optional, dict): path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str): path to downloaded file + """ + endpoint = cluster.get_random_storage_rpc_endpoint() + return self.get( + cid, + oid, + endpoint, + bearer, + write_object, + xhdr, + no_progress, + session, + timeout, + ) + + @reporter.step("Get hash object") + def hash( + self, + rpc_endpoint: str, + cid: str, + oid: str, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + range: Optional[str] = None, + salt: Optional[str] = None, + ttl: Optional[int] = None, + session: Optional[str] = None, + hash_type: Optional[str] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + """ + Get object hash. + + Args: + address: Address of wallet account. + bearer: File with signed JSON or binary encoded bearer token. + cid: Container ID. + generate_key: Generate new private key. + oid: Object ID. + range: Range to take hash from in the form offset1:length1,... + rpc_endpoint: Remote node address (as 'multiaddr' or ':'). + salt: Salt in hex format. + ttl: TTL value in request meta header (default 2). + session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session. + hash_type: Hash type. Either 'sha256' or 'tz' (default "sha256"). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Dict with request X-Headers. + timeout: Timeout for the operation (default 15s). + + Returns: + Command's result. + """ + result = self.cli.object.hash( + rpc_endpoint=rpc_endpoint, + cid=cid, + oid=oid, + address=address, + bearer=bearer, + generate_key=generate_key, + range=range, + salt=salt, + ttl=ttl, + xhdr=xhdr, + session=session, + hash_type=hash_type, + timeout=timeout, + ) + return result.stdout + + @reporter.step("Head object") + def head( + self, + cid: str, + oid: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + json_output: bool = True, + is_raw: bool = False, + is_direct: bool = False, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> CommandResult | Any: + """ + HEAD an Object. + + Args: + cid (str): ID of Container where we get the Object from + oid (str): ObjectID to HEAD + bearer (optional, str): path to Bearer Token file, appends to `--bearer` key + endpoint(optional, str): FrostFS endpoint to send request to + json_output(optional, bool): return response in JSON format or not; this flag + turns into `--json` key + is_raw(optional, bool): send "raw" request or not; this flag + turns into `--raw` key + is_direct(optional, bool): send request directly to the node or not; this flag + turns into `--ttl 1` key + xhdr (optional, dict): Request X-Headers in form of Key=Value + session (optional, dict): path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + depending on the `json_output` parameter value, the function returns + (dict): HEAD response in JSON format + or + (str): HEAD response as a plain text + """ + result = self.cli.object.head( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + bearer=bearer, + json_mode=json_output, + raw=is_raw, + ttl=1 if is_direct else None, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + if not json_output: + return result + + try: + decoded = json.loads(result.stdout) + except Exception as exc: + # If we failed to parse output as JSON, the cause might be + # the plain text string in the beginning of the output. + # Here we cut off first string and try to parse again. + logger.info(f"failed to parse output: {exc}") + logger.info("parsing output in another way") + fst_line_idx = result.stdout.find("\n") + decoded = json.loads(result.stdout[fst_line_idx:]) + + # if response + if "chunks" in decoded.keys(): + logger.info("decoding ec chunks") + return decoded["chunks"] + + # If response is Complex Object header, it has `splitId` key + if "splitId" in decoded.keys(): + logger.info("decoding split header") + return utils.json_utils.decode_split_header(decoded) + + # If response is Last or Linking Object header, + # it has `header` dictionary and non-null `split` dictionary + if "split" in decoded["header"].keys(): + if decoded["header"]["split"]: + logger.info("decoding linking object") + return utils.json_utils.decode_linking_object(decoded) + + if decoded["header"]["objectType"] == "STORAGE_GROUP": + logger.info("decoding storage group") + return utils.json_utils.decode_storage_group(decoded) + + if decoded["header"]["objectType"] == "TOMBSTONE": + logger.info("decoding tombstone") + return utils.json_utils.decode_tombstone(decoded) + + logger.info("decoding simple header") + return utils.json_utils.decode_simple_header(decoded) + + @reporter.step("Lock Object") + def lock( + self, + cid: str, + oid: str, + endpoint: str, + lifetime: Optional[int] = None, + expire_at: Optional[int] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + """ + Locks object in container. + + Args: + address: Address of wallet account. + bearer: File with signed JSON or binary encoded bearer token. + cid: Container ID. + oid: Object ID. + lifetime: Lock lifetime. + expire_at: Lock expiration epoch. + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + session: Path to a JSON-encoded container session token. + ttl: TTL value in request meta header (default 2). + wallet: WIF (NEP-2) string or path to the wallet or binary key. + xhdr: Dict with request X-Headers. + timeout: Timeout for the operation. + + Returns: + Lock object ID + """ + result = self.cli.object.lock( + rpc_endpoint=endpoint, + lifetime=lifetime, + expire_at=expire_at, + address=address, + cid=cid, + oid=oid, + bearer=bearer, + xhdr=xhdr, + session=session, + ttl=ttl, + timeout=timeout, + ) + + # Splitting CLI output to separate lines and taking the penultimate line + id_str = result.stdout.strip().split("\n")[0] + oid = id_str.split(":")[1] + return oid.strip() + + @reporter.step("Put object") + def put( + self, + path: str, + cid: str, + endpoint: str, + bearer: Optional[str] = None, + copies_number: Optional[int] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + """ + PUT of given file. + + Args: + path: path to file to be PUT + cid: ID of Container where we get the Object from + bearer: path to Bearer Token file, appends to `--bearer` key + copies_number: Number of copies of the object to store within the RPC call + attributes: User attributes in form of Key1=Value1,Key2=Value2 + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + no_progress: do not show progress bar + expire_at: Last epoch in the life of the object + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str): ID of uploaded Object + """ + result = self.cli.object.put( + rpc_endpoint=endpoint, + file=path, + cid=cid, + attributes=attributes, + bearer=bearer, + copies_number=copies_number, + expire_at=expire_at, + no_progress=no_progress, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + # Splitting CLI output to separate lines and taking the penultimate line + id_str = result.stdout.strip().split("\n")[-2] + oid = id_str.split(":")[1] + return oid.strip() + + @reporter.step("Put object to random node") + def put_to_random_node( + self, + path: str, + cid: str, + cluster: Cluster, + bearer: Optional[str] = None, + copies_number: Optional[int] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + """ + PUT of given file to a random storage node. + + Args: + path: path to file to be PUT + cid: ID of Container where we get the Object from + cluster: cluster under test + bearer: path to Bearer Token file, appends to `--bearer` key + copies_number: Number of copies of the object to store within the RPC call + attributes: User attributes in form of Key1=Value1,Key2=Value2 + cluster: cluster under test + no_progress: do not show progress bar + expire_at: Last epoch in the life of the object + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + ID of uploaded Object + """ + endpoint = cluster.get_random_storage_rpc_endpoint() + return self.put( + path, + cid, + endpoint, + bearer, + copies_number, + attributes, + xhdr, + expire_at, + no_progress, + session, + timeout=timeout, + ) + + @reporter.step("Get Range") + def range( + self, + cid: str, + oid: str, + range_cut: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> tuple[file_utils.TestFile, bytes]: + """ + GETRANGE an Object. + + Args: + wallet: wallet on whose behalf GETRANGE is done + cid: ID of Container where we get the Object from + oid: ID of Object we are going to request + range_cut: range to take data from in the form offset:length + shell: executor for cli command + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + bearer: path to Bearer Token file, appends to `--bearer` key + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + timeout: Timeout for the operation. + Returns: + (str, bytes) - path to the file with range content and content of this file as bytes + """ + test_file = file_utils.TestFile(os.path.join(ASSETS_DIR, str(uuid.uuid4()))) + + self.cli.object.range( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + range=range_cut, + file=test_file, + bearer=bearer, + xhdr=xhdr, + session=session, + timeout=timeout, + ) + + with open(test_file, "rb") as file: + content = file.read() + return test_file, content + + @reporter.step("Search object") + def search( + self, + cid: str, + endpoint: str, + bearer: str = "", + filters: Optional[dict] = None, + expected_objects_list: Optional[list] = None, + xhdr: Optional[dict] = None, + session: Optional[str] = None, + phy: bool = False, + root: bool = False, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> list: + """ + SEARCH an Object. + + Args: + wallet: wallet on whose behalf SEARCH is done + cid: ID of Container where we get the Object from + shell: executor for cli command + bearer: path to Bearer Token file, appends to `--bearer` key + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + filters: key=value pairs to filter Objects + expected_objects_list: a list of ObjectIDs to compare found Objects with + xhdr: Request X-Headers in form of Key=Value + session: path to a JSON-encoded container session token + phy: Search physically stored objects. + root: Search for user objects. + timeout: Timeout for the operation. + + Returns: + list of found ObjectIDs + """ + result = self.cli.object.search( + rpc_endpoint=endpoint, + cid=cid, + bearer=bearer, + xhdr=xhdr, + filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] if filters else None, + session=session, + phy=phy, + root=root, + timeout=timeout, + ) + + found_objects = re.findall(r"(\w{43,44})", result.stdout) + + if expected_objects_list: + if sorted(found_objects) == sorted(expected_objects_list): + logger.info(f"Found objects list '{found_objects}' " f"is equal for expected list '{expected_objects_list}'") + else: + logger.warning(f"Found object list {found_objects} " f"is not equal to expected list '{expected_objects_list}'") + + return found_objects + + @wait_for_success() + @reporter.step("Search object nodes") + def nodes( + self, + cluster: Cluster, + cid: str, + oid: str, + alive_node: ClusterNode, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> list[ClusterNode]: + endpoint = alive_node.storage_node.get_rpc_endpoint() + + response = self.cli.object.nodes( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + bearer=bearer, + ttl=1 if is_direct else None, + json=True, + xhdr=xhdr, + timeout=timeout, + verify_presence_all=verify_presence_all, + ) + + response_json = json.loads(response.stdout) + # Currently, the command will show expected and confirmed nodes. + # And we (currently) count only nodes which are both expected and confirmed + object_nodes_id = { + required_node + for data_object in response_json["data_objects"] + for required_node in data_object["required_nodes"] + if required_node in data_object["confirmed_nodes"] + } + + netmap_nodes_list = cli_utils.parse_netmap_output( + self.cli.netmap.snapshot( + rpc_endpoint=endpoint, + ).stdout + ) + netmap_nodes = [ + netmap_node for object_node in object_nodes_id for netmap_node in netmap_nodes_list if object_node == netmap_node.node_id + ] + + object_nodes = [ + cluster_node + for netmap_node in netmap_nodes + for cluster_node in cluster.cluster_nodes + if netmap_node.node == cluster_node.host_ip + ] + + return object_nodes diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces.py b/src/frostfs_testlib/storage/grpc_operations/interfaces.py new file mode 100644 index 0000000..c39accc --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces.py @@ -0,0 +1,285 @@ +from abc import ABC, abstractmethod +from typing import Any, Optional + +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT +from frostfs_testlib.shell.interfaces import CommandResult +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.constants import PlacementRule +from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo +from frostfs_testlib.utils import file_utils + + +class ChunksInterface(ABC): + @abstractmethod + def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]: + pass + + @abstractmethod + def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]: + pass + + @abstractmethod + def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str: + pass + + @abstractmethod + def get_all( + self, + rpc_endpoint: str, + cid: str, + oid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> list[Chunk]: + pass + + @abstractmethod + def get_parity( + self, + rpc_endpoint: str, + cid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + oid: Optional[str] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> Chunk: + pass + + @abstractmethod + def get_first_data( + self, + rpc_endpoint: str, + cid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + oid: Optional[str] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> Chunk: + pass + + +class ObjectInterface(ABC): + def __init__(self) -> None: + self.chunks: ChunksInterface + + @abstractmethod + def delete( + self, + cid: str, + oid: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + pass + + @abstractmethod + def get( + self, + cid: str, + oid: str, + endpoint: str, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> file_utils.TestFile: + pass + + @abstractmethod + def get_from_random_node( + self, + cid: str, + oid: str, + cluster: Cluster, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + pass + + @abstractmethod + def hash( + self, + rpc_endpoint: str, + cid: str, + oid: str, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + range: Optional[str] = None, + salt: Optional[str] = None, + ttl: Optional[int] = None, + session: Optional[str] = None, + hash_type: Optional[str] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + pass + + @abstractmethod + def head( + self, + cid: str, + oid: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + json_output: bool = True, + is_raw: bool = False, + is_direct: bool = False, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> CommandResult | Any: + pass + + @abstractmethod + def lock( + self, + cid: str, + oid: str, + endpoint: str, + lifetime: Optional[int] = None, + expire_at: Optional[int] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + pass + + @abstractmethod + def put( + self, + path: str, + cid: str, + endpoint: str, + bearer: Optional[str] = None, + copies_number: Optional[int] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + pass + + @abstractmethod + def put_to_random_node( + self, + path: str, + cid: str, + cluster: Cluster, + bearer: Optional[str] = None, + copies_number: Optional[int] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + pass + + @abstractmethod + def range( + self, + cid: str, + oid: str, + range_cut: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> tuple[file_utils.TestFile, bytes]: + pass + + @abstractmethod + def search( + self, + cid: str, + endpoint: str, + bearer: str = "", + filters: Optional[dict] = None, + expected_objects_list: Optional[list] = None, + xhdr: Optional[dict] = None, + session: Optional[str] = None, + phy: bool = False, + root: bool = False, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> list: + pass + + @abstractmethod + def nodes( + self, + cluster: Cluster, + cid: str, + oid: str, + alive_node: ClusterNode, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> list[ClusterNode]: + pass + + +class ContainerInterface(ABC): + @abstractmethod + def create( + self, + endpoint: str, + rule: str = PlacementRule.DEFAULT_PLACEMENT_RULE, + basic_acl: str = "", + attributes: Optional[dict] = None, + session_token: str = "", + name: Optional[str] = None, + options: Optional[dict] = None, + await_mode: bool = True, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> str: + pass + + +class GrpcClientWrapper(ABC): + def __init__(self) -> None: + self.object: ObjectInterface + self.container: ContainerInterface From 0caca54e36751d3b9f9b4feeaae07c396ff656a9 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Fri, 16 Aug 2024 18:12:25 +0300 Subject: [PATCH 281/363] [#283] Fix mistakes Signed-off-by: a.berezin --- .../storage/grpc_operations/implementations/chunks.py | 2 +- .../storage/grpc_operations/implementations/container.py | 2 +- .../storage/grpc_operations/implementations/object.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py index 70d0823..b0f196e 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py @@ -7,7 +7,7 @@ from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.controllers.shards_watcher import ShardsWatcher from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo -from frostfs_testlib.storage.grps_operations import interfaces +from frostfs_testlib.storage.grpc_operations import interfaces from frostfs_testlib.utils.cli_utils import parse_netmap_output diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py index 077bdfd..cac2df4 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py @@ -5,7 +5,7 @@ from frostfs_testlib import reporter from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.storage.constants import PlacementRule -from frostfs_testlib.storage.grps_operations import interfaces +from frostfs_testlib.storage.grpc_operations import interfaces logger = logging.getLogger("NeoLogger") diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py index a967853..63a2922 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py @@ -11,8 +11,8 @@ from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.shell.interfaces import CommandResult from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.grps_operations import interfaces -from frostfs_testlib.storage.grps_operations.implementations.chunks import ChunksOperations +from frostfs_testlib.storage.grpc_operations import interfaces +from frostfs_testlib.storage.grpc_operations.implementations.chunks import ChunksOperations from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils import cli_utils, file_utils From 85c2707ec807a4220504a40bcf1655e2aefe4869 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Wed, 28 Aug 2024 12:12:05 +0300 Subject: [PATCH 282/363] [#284] Add container operational in CliWrapper Signed-off-by: Dmitriy Zayakin --- .../cli/frostfs_cli/container.py | 20 ++ .../implementations/container.py | 247 ++++++++++++++++-- .../storage/grpc_operations/interfaces.py | 148 +++++++++-- 3 files changed, 377 insertions(+), 38 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/container.py b/src/frostfs_testlib/cli/frostfs_cli/container.py index 1ff217f..8bcbe9e 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/container.py +++ b/src/frostfs_testlib/cli/frostfs_cli/container.py @@ -16,6 +16,8 @@ class FrostfsCliContainer(CliCommand): basic_acl: Optional[str] = None, await_mode: bool = False, disable_timestamp: bool = False, + force: bool = False, + trace: bool = False, name: Optional[str] = None, nonce: Optional[str] = None, policy: Optional[str] = None, @@ -37,6 +39,8 @@ class FrostfsCliContainer(CliCommand): basic_acl: Hex encoded basic ACL value or keywords like 'public-read-write', 'private', 'eacl-public-read' (default "private"). disable_timestamp: Disable timestamp container attribute. + force: Skip placement validity check. + trace: Generate trace ID and print it. name: Container name attribute. nonce: UUIDv4 nonce value for container. policy: QL-encoded or JSON-encoded placement policy or path to file with it. @@ -69,6 +73,7 @@ class FrostfsCliContainer(CliCommand): ttl: Optional[int] = None, xhdr: Optional[dict] = None, force: bool = False, + trace: bool = False, ) -> CommandResult: """ Delete an existing container. @@ -78,6 +83,7 @@ class FrostfsCliContainer(CliCommand): address: Address of wallet account. await_mode: Block execution until container is removed. cid: Container ID. + trace: Generate trace ID and print it. force: Do not check whether container contains locks and remove immediately. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). session: Path to a JSON-encoded container session token. @@ -104,6 +110,7 @@ class FrostfsCliContainer(CliCommand): await_mode: bool = False, to: Optional[str] = None, json_mode: bool = False, + trace: bool = False, ttl: Optional[int] = None, xhdr: Optional[dict] = None, timeout: Optional[str] = None, @@ -116,6 +123,7 @@ class FrostfsCliContainer(CliCommand): await_mode: Block execution until container is removed. cid: Container ID. json_mode: Print or dump container in JSON format. + trace: Generate trace ID and print it. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). to: Path to dump encoded container. ttl: TTL value in request meta header (default 2). @@ -155,6 +163,8 @@ class FrostfsCliContainer(CliCommand): cid: Container ID. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). to: Path to dump encoded container. + json_mode: Print or dump container in JSON format. + trace: Generate trace ID and print it. session: Path to a JSON-encoded container session token. ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. @@ -174,6 +184,7 @@ class FrostfsCliContainer(CliCommand): def list( self, rpc_endpoint: str, + name: Optional[str] = None, wallet: Optional[str] = None, address: Optional[str] = None, generate_key: Optional[bool] = None, @@ -188,11 +199,13 @@ class FrostfsCliContainer(CliCommand): Args: address: Address of wallet account. + name: List containers by the attribute name. owner: Owner of containers (omit to use owner from private key). rpc_endpoint: Remote node address (as 'multiaddr' or ':'). ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + trace: Generate trace ID and print it. timeout: Timeout for the operation (default 15s). generate_key: Generate a new private key. @@ -208,9 +221,11 @@ class FrostfsCliContainer(CliCommand): self, rpc_endpoint: str, cid: str, + bearer: Optional[str] = None, wallet: Optional[str] = None, address: Optional[str] = None, generate_key: Optional[bool] = None, + trace: bool = False, ttl: Optional[int] = None, xhdr: Optional[dict] = None, timeout: Optional[str] = None, @@ -221,10 +236,12 @@ class FrostfsCliContainer(CliCommand): Args: address: Address of wallet account. cid: Container ID. + bearer: File with signed JSON or binary encoded bearer token. rpc_endpoint: Remote node address (as 'multiaddr' or ':'). ttl: TTL value in request meta header (default 2). wallet: WIF (NEP-2) string or path to the wallet or binary key. xhdr: Dict with request X-Headers. + trace: Generate trace ID and print it. timeout: Timeout for the operation (default 15s). generate_key: Generate a new private key. @@ -236,6 +253,7 @@ class FrostfsCliContainer(CliCommand): **{param: value for param, value in locals().items() if param not in ["self"]}, ) + # TODO Deprecated method with 0.42 def set_eacl( self, rpc_endpoint: str, @@ -281,6 +299,7 @@ class FrostfsCliContainer(CliCommand): address: Optional[str] = None, ttl: Optional[int] = None, from_file: Optional[str] = None, + trace: bool = False, short: Optional[bool] = True, xhdr: Optional[dict] = None, generate_key: Optional[bool] = None, @@ -298,6 +317,7 @@ class FrostfsCliContainer(CliCommand): from_file: string File path with encoded container timeout: duration Timeout for the operation (default 15 s) short: shorten the output of node information. + trace: Generate trace ID and print it. xhdr: Dict with request X-Headers. generate_key: Generate a new private key. diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py index cac2df4..c8360ea 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py @@ -1,11 +1,16 @@ +import json import logging -from typing import Optional +import re +from typing import List, Optional, Union from frostfs_testlib import reporter from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.plugins import load_plugin from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT -from frostfs_testlib.storage.constants import PlacementRule +from frostfs_testlib.s3.interfaces import BucketContainerResolver +from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.grpc_operations import interfaces +from frostfs_testlib.utils import json_utils logger = logging.getLogger("NeoLogger") @@ -18,13 +23,22 @@ class ContainerOperations(interfaces.ContainerInterface): def create( self, endpoint: str, - rule: str = PlacementRule.DEFAULT_PLACEMENT_RULE, - basic_acl: str = "", + nns_zone: Optional[str] = None, + nns_name: Optional[str] = None, + address: Optional[str] = None, attributes: Optional[dict] = None, - session_token: str = "", + basic_acl: Optional[str] = None, + await_mode: bool = False, + disable_timestamp: bool = False, + force: bool = False, + trace: bool = False, name: Optional[str] = None, - options: Optional[dict] = None, - await_mode: bool = True, + nonce: Optional[str] = None, + policy: Optional[str] = None, + session: Optional[str] = None, + subnet: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, ) -> str: """ @@ -54,14 +68,23 @@ class ContainerOperations(interfaces.ContainerInterface): """ result = self.cli.container.create( rpc_endpoint=endpoint, - policy=rule, - basic_acl=basic_acl, + policy=policy, + nns_zone=nns_zone, + nns_name=nns_name, + address=address, attributes=attributes, - name=name, - session=session_token, + basic_acl=basic_acl, await_mode=await_mode, + disable_timestamp=disable_timestamp, + force=force, + trace=trace, + name=name, + nonce=nonce, + session=session, + subnet=subnet, + ttl=ttl, + xhdr=xhdr, timeout=timeout, - **options or {}, ) cid = self._parse_cid(result.stdout) @@ -71,21 +94,215 @@ class ContainerOperations(interfaces.ContainerInterface): return cid @reporter.step("List Containers") - def list(self, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT) -> list[str]: + def list( + self, + endpoint: str, + name: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + owner: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + **params, + ) -> List[str]: """ A wrapper for `frostfs-cli container list` call. It returns all the available containers for the given wallet. Args: - wallet (WalletInfo): a wallet on whose behalf we list the containers shell: executor for cli command endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key timeout: Timeout for the operation. Returns: (list): list of containers """ - result = self.cli.container.list(rpc_endpoint=endpoint, timeout=timeout) + result = self.cli.container.list( + rpc_endpoint=endpoint, + name=name, + address=address, + generate_key=generate_key, + owner=owner, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + **params, + ) return result.stdout.split() + @reporter.step("List Objects in container") + def list_objects( + self, + endpoint: str, + cid: str, + bearer: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> List[str]: + """ + A wrapper for `frostfs-cli container list-objects` call. It returns all the + available objects in container. + Args: + container_id: cid of container + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + timeout: Timeout for the operation. + Returns: + (list): list of containers + """ + result = self.cli.container.list_objects( + rpc_endpoint=endpoint, + cid=cid, + bearer=bearer, + wallet=wallet, + address=address, + generate_key=generate_key, + trace=trace, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + logger.info(f"Container objects: \n{result}") + return result.stdout.split() + + @reporter.step("Delete container") + def delete( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + await_mode: bool = False, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + force: bool = False, + trace: bool = False, + ): + try: + return self.cli.container.delete( + rpc_endpoint=endpoint, + cid=cid, + address=address, + await_mode=await_mode, + session=session, + ttl=ttl, + xhdr=xhdr, + force=force, + trace=trace, + ).stdout + except RuntimeError as e: + print(f"Error request:\n{e}") + + @reporter.step("Get container") + def get( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + to: Optional[str] = None, + json_mode: bool = True, + trace: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> Union[dict, str]: + result = self.cli.container.get( + rpc_endpoint=endpoint, + cid=cid, + address=address, + generate_key=generate_key, + await_mode=await_mode, + to=to, + json_mode=json_mode, + trace=trace, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + container_info = json.loads(result.stdout) + attributes = dict() + for attr in container_info["attributes"]: + attributes[attr["key"]] = attr["value"] + container_info["attributes"] = attributes + container_info["ownerID"] = json_utils.json_reencode(container_info["ownerID"]["value"]) + return container_info + + @reporter.step("Get eacl container") + def get_eacl( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + json_mode: bool = True, + trace: bool = False, + to: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ): + return self.cli.container.get_eacl( + rpc_endpoint=endpoint, + cid=cid, + address=address, + generate_key=generate_key, + await_mode=await_mode, + to=to, + session=session, + ttl=ttl, + xhdr=xhdr, + timeout=CLI_DEFAULT_TIMEOUT, + ).stdout + + @reporter.step("Get nodes container") + def nodes( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + ttl: Optional[int] = None, + from_file: Optional[str] = None, + trace: bool = False, + short: Optional[bool] = True, + xhdr: Optional[dict] = None, + generate_key: Optional[bool] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> List[ClusterNode]: + result = self.cli.container.search_node( + rpc_endpoint=endpoint, + cid=cid, + address=address, + ttl=ttl, + from_file=from_file, + trace=trace, + short=short, + xhdr=xhdr, + generate_key=generate_key, + timeout=timeout, + ).stdout + + pattern = r"[0-9]+(?:\.[0-9]+){3}" + nodes_ip = list(set(re.findall(pattern, result))) + + with reporter.step(f"nodes ips = {nodes_ip}"): + nodes_list = cluster.get_nodes_by_ip(nodes_ip) + + with reporter.step(f"Return nodes - {nodes_list}"): + return nodes_list + + @reporter.step("Resolve container by name") + def resolve_container_by_name(name: str, node: ClusterNode): + resolver_cls = load_plugin("frostfs.testlib.bucket_cid_resolver", node.host.config.product) + resolver: BucketContainerResolver = resolver_cls() + return resolver.resolve(node, name) + def _parse_cid(self, output: str) -> str: """ Parses container ID from a given CLI output. The input string we expect: diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces.py b/src/frostfs_testlib/storage/grpc_operations/interfaces.py index c39accc..1947435 100644 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces.py +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces.py @@ -1,7 +1,6 @@ from abc import ABC, abstractmethod -from typing import Any, Optional +from typing import Any, List, Optional -from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.shell.interfaces import CommandResult from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.constants import PlacementRule @@ -96,7 +95,7 @@ class ObjectInterface(ABC): bearer: str = "", xhdr: Optional[dict] = None, session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + timeout: Optional[str] = None, ) -> str: pass @@ -111,7 +110,7 @@ class ObjectInterface(ABC): xhdr: Optional[dict] = None, no_progress: bool = True, session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + timeout: Optional[str] = None, ) -> file_utils.TestFile: pass @@ -126,14 +125,14 @@ class ObjectInterface(ABC): xhdr: Optional[dict] = None, no_progress: bool = True, session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + timeout: Optional[str] = None, ) -> str: pass @abstractmethod def hash( self, - rpc_endpoint: str, + endpoint: str, cid: str, oid: str, address: Optional[str] = None, @@ -145,7 +144,7 @@ class ObjectInterface(ABC): session: Optional[str] = None, hash_type: Optional[str] = None, xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + timeout: Optional[str] = None, ) -> str: pass @@ -161,7 +160,7 @@ class ObjectInterface(ABC): is_raw: bool = False, is_direct: bool = False, session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + timeout: Optional[str] = None, ) -> CommandResult | Any: pass @@ -178,7 +177,7 @@ class ObjectInterface(ABC): session: Optional[str] = None, ttl: Optional[int] = None, xhdr: Optional[dict] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + timeout: Optional[str] = None, ) -> str: pass @@ -195,7 +194,7 @@ class ObjectInterface(ABC): expire_at: Optional[int] = None, no_progress: bool = True, session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + timeout: Optional[str] = None, ) -> str: pass @@ -212,7 +211,7 @@ class ObjectInterface(ABC): expire_at: Optional[int] = None, no_progress: bool = True, session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + timeout: Optional[str] = None, ) -> str: pass @@ -226,7 +225,7 @@ class ObjectInterface(ABC): bearer: str = "", xhdr: Optional[dict] = None, session: Optional[str] = None, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + timeout: Optional[str] = None, ) -> tuple[file_utils.TestFile, bytes]: pass @@ -242,8 +241,8 @@ class ObjectInterface(ABC): session: Optional[str] = None, phy: bool = False, root: bool = False, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> list: + timeout: Optional[str] = None, + ) -> List: pass @abstractmethod @@ -257,8 +256,8 @@ class ObjectInterface(ABC): xhdr: Optional[dict] = None, is_direct: bool = False, verify_presence_all: bool = False, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> list[ClusterNode]: + timeout: Optional[str] = None, + ) -> List[ClusterNode]: pass @@ -267,16 +266,119 @@ class ContainerInterface(ABC): def create( self, endpoint: str, - rule: str = PlacementRule.DEFAULT_PLACEMENT_RULE, - basic_acl: str = "", + nns_zone: Optional[str] = None, + nns_name: Optional[str] = None, + address: Optional[str] = None, attributes: Optional[dict] = None, - session_token: str = "", + basic_acl: Optional[str] = None, + await_mode: bool = False, + disable_timestamp: bool = False, + force: bool = False, + trace: bool = False, name: Optional[str] = None, - options: Optional[dict] = None, - await_mode: bool = True, - timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + nonce: Optional[str] = None, + policy: Optional[str] = None, + session: Optional[str] = None, + subnet: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, ) -> str: - pass + """ + Create a new container and register it in the FrostFS. + It will be stored in the sidechain when the Inner Ring accepts it. + """ + raise NotImplementedError("No implemethed method create") + + @abstractmethod + def delete( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + await_mode: bool = False, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + force: bool = False, + trace: bool = False, + ) -> List[str]: + """ + Delete an existing container. + Only the owner of the container has permission to remove the container. + """ + raise NotImplementedError("No implemethed method delete") + + @abstractmethod + def get( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + to: Optional[str] = None, + json_mode: bool = True, + trace: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[str]: + """Get container field info.""" + raise NotImplementedError("No implemethed method get") + + @abstractmethod + def get_eacl( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + json_mode: bool = True, + trace: bool = False, + to: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[str]: + """Get extended ACL table of container.""" + raise NotImplementedError("No implemethed method get-eacl") + + @abstractmethod + def list( + self, + endpoint: str, + name: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, + owner: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + **params, + ) -> List[str]: + """List all created containers.""" + raise NotImplementedError("No implemethed method list") + + @abstractmethod + def nodes( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + ttl: Optional[int] = None, + from_file: Optional[str] = None, + trace: bool = False, + short: Optional[bool] = True, + xhdr: Optional[dict] = None, + generate_key: Optional[bool] = None, + timeout: Optional[str] = None, + ) -> List[str]: + """Show the nodes participating in the container in the current epoch.""" + raise NotImplementedError("No implemethed method nodes") class GrpcClientWrapper(ABC): From eba782e7d26945d75bb1e233b16058e3b1b52f7d Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 2 Sep 2024 13:30:01 +0300 Subject: [PATCH 283/363] [#285] Change func search bucket nodes and remove old resolver bucket cnr Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/steps/cli/container.py | 7 ------- src/frostfs_testlib/steps/s3/s3_helper.py | 6 ++++-- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index 641b321..809b39a 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -327,13 +327,6 @@ def _parse_cid(output: str) -> str: return splitted[1] -@reporter.step("Search container by name") -def search_container_by_name(name: str, node: ClusterNode): - resolver_cls = load_plugin("frostfs.testlib.bucket_cid_resolver", node.host.config.product) - resolver: BucketContainerResolver = resolver_cls() - return resolver.resolve(node, name) - - @reporter.step("Search for nodes with a container") def search_nodes_with_container( wallet: WalletInfo, diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index 9b85766..dbf48d3 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -7,8 +7,9 @@ from dateutil.parser import parse from frostfs_testlib import reporter from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus +from frostfs_testlib.s3.interfaces import BucketContainerResolver from frostfs_testlib.shell import Shell -from frostfs_testlib.steps.cli.container import search_container_by_name, search_nodes_with_container +from frostfs_testlib.steps.cli.container import search_nodes_with_container from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.dataclasses.wallet import WalletInfo @@ -175,10 +176,11 @@ def search_nodes_with_bucket( wallet: WalletInfo, shell: Shell, endpoint: str, + bucket_container_resolver: BucketContainerResolver, ) -> list[ClusterNode]: cid = None for cluster_node in cluster.cluster_nodes: - cid = search_container_by_name(name=bucket_name, node=cluster_node) + cid = bucket_container_resolver.resolve(cluster_node, bucket_name) if cid: break nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster) From d2f8323fb95c547ae35b984744b1ef63ce502dba Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 3 Sep 2024 15:11:43 +0300 Subject: [PATCH 284/363] [#286] Change args id in shards.set-mode command Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/cli/frostfs_cli/shards.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/shards.py b/src/frostfs_testlib/cli/frostfs_cli/shards.py index e88707a..82ea87b 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/shards.py +++ b/src/frostfs_testlib/cli/frostfs_cli/shards.py @@ -40,7 +40,7 @@ class FrostfsCliShards(CliCommand): self, endpoint: str, mode: str, - id: Optional[list[str]], + id: Optional[list[str]] = None, wallet: Optional[str] = None, wallet_password: Optional[str] = None, address: Optional[str] = None, From 84e83487f9896cc1e95c64680bf7664724a4c59c Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 10 Sep 2024 13:54:51 +0300 Subject: [PATCH 285/363] [#288] Update object and chunks Clients Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/cli/frostfs_cli/object.py | 2 +- .../grpc_operations/implementations/chunks.py | 63 +++++++++++++++---- 2 files changed, 52 insertions(+), 13 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py index 070def0..1857987 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/object.py +++ b/src/frostfs_testlib/cli/frostfs_cli/object.py @@ -370,11 +370,11 @@ class FrostfsCliObject(CliCommand): self, rpc_endpoint: str, cid: str, + oid: Optional[str] = None, wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, generate_key: Optional[bool] = None, - oid: Optional[str] = None, trace: bool = False, root: bool = False, verify_presence_all: bool = False, diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py index b0f196e..d1bba9f 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py @@ -60,7 +60,6 @@ class ChunksOperations(interfaces.ChunksInterface): rpc_endpoint: str, cid: str, oid: str, - wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, generate_key: Optional[bool] = None, @@ -72,15 +71,28 @@ class ChunksOperations(interfaces.ChunksInterface): xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> list[Chunk]: - object_nodes = self.cli.object.nodes(**{param: value for param, value in locals().items() if param not in ["self"]}) - return self._parse_object_nodes(object_nodes.stdout) + object_nodes = self.cli.object.nodes( + rpc_endpoint=rpc_endpoint, + cid=cid, + address=address, + bearer=bearer, + generate_key=generate_key, + oid=oid, + trace=trace, + root=root, + verify_presence_all=verify_presence_all, + json=json, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[0] @reporter.step("Get last parity chunk") def get_parity( self, rpc_endpoint: str, cid: str, - wallet: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, generate_key: Optional[bool] = None, @@ -93,29 +105,56 @@ class ChunksOperations(interfaces.ChunksInterface): xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> Chunk: - object_nodes = self.cli.object.nodes(**{param: value for param, value in locals().items() if param not in ["self"]}) - return self._parse_object_nodes(object_nodes.stdout)[-1] + object_nodes = self.cli.object.nodes( + rpc_endpoint=rpc_endpoint, + cid=cid, + address=address, + bearer=bearer, + generate_key=generate_key, + oid=oid, + trace=trace, + root=root, + verify_presence_all=verify_presence_all, + json=json, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[0] @reporter.step("Get first data chunk") def get_first_data( self, rpc_endpoint: str, cid: str, - wallet: Optional[str] = None, + oid: Optional[str] = None, address: Optional[str] = None, bearer: Optional[str] = None, generate_key: Optional[bool] = None, - oid: Optional[str] = None, - trace: bool = False, + trace: bool = True, root: bool = False, verify_presence_all: bool = False, json: bool = True, ttl: Optional[int] = None, xhdr: Optional[dict] = None, - timeout: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, ) -> Chunk: - object_nodes = self.cli.object.nodes(**{param: value for param, value in locals().items() if param not in ["self"]}) - return self._parse_object_nodes(object_nodes.stdout)[0] + object_nodes = self.cli.object.nodes( + rpc_endpoint=rpc_endpoint, + cid=cid, + address=address, + bearer=bearer, + generate_key=generate_key, + oid=oid, + trace=trace, + root=root, + verify_presence_all=verify_presence_all, + json=json, + ttl=ttl, + xhdr=xhdr, + timeout=timeout, + ) + return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[0] def _parse_object_nodes(self, object_nodes: str) -> list[Chunk]: parse_result = json.loads(object_nodes) From 565fd4c72b6ab562f3024d471ff0aad5f2f42514 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Tue, 10 Sep 2024 15:14:32 +0300 Subject: [PATCH 286/363] [#289] Move temp dir fixture to testlib Signed-off-by: a.berezin --- src/frostfs_testlib/__init__.py | 2 +- src/frostfs_testlib/fixtures.py | 12 +++++++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/__init__.py b/src/frostfs_testlib/__init__.py index 2cdaf4e..f3143e6 100644 --- a/src/frostfs_testlib/__init__.py +++ b/src/frostfs_testlib/__init__.py @@ -1,3 +1,3 @@ __version__ = "2.0.1" -from .fixtures import configure_testlib, hosting +from .fixtures import configure_testlib, hosting, temp_directory diff --git a/src/frostfs_testlib/fixtures.py b/src/frostfs_testlib/fixtures.py index 8f6873f..d0f92f2 100644 --- a/src/frostfs_testlib/fixtures.py +++ b/src/frostfs_testlib/fixtures.py @@ -7,7 +7,7 @@ import yaml from frostfs_testlib import reporter from frostfs_testlib.hosting.hosting import Hosting -from frostfs_testlib.resources.common import HOSTING_CONFIG_FILE +from frostfs_testlib.resources.common import ASSETS_DIR, HOSTING_CONFIG_FILE from frostfs_testlib.storage import get_service_registry @@ -24,6 +24,16 @@ def configure_testlib(): registry.register_service(svc.name, svc.load()) +@pytest.fixture(scope="session") +def temp_directory(configure_testlib): + with reporter.step("Prepare tmp directory"): + full_path = ASSETS_DIR + if not os.path.exists(full_path): + os.mkdir(full_path) + + return full_path + + @pytest.fixture(scope="session") def hosting(configure_testlib) -> Hosting: with open(HOSTING_CONFIG_FILE, "r") as file: From 36bfe385d59f9ddb69593d1095e8d15c0d1c4e0d Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Mon, 9 Sep 2024 20:44:31 +0300 Subject: [PATCH 287/363] Added method get s3 endpoint for namespace --- src/frostfs_testlib/storage/constants.py | 1 + src/frostfs_testlib/storage/dataclasses/frostfs_services.py | 3 +++ 2 files changed, 4 insertions(+) diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 84f8d24..2cffd3a 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -12,6 +12,7 @@ class ConfigAttributes: REMOTE_WALLET_CONFIG = "remote_wallet_config_path" ENDPOINT_DATA_0 = "endpoint_data0" ENDPOINT_DATA_1 = "endpoint_data1" + ENDPOINT_DATA_0_NS = "endpoint_data0_namespace" ENDPOINT_INTERNAL = "endpoint_internal0" ENDPOINT_PROMETHEUS = "endpoint_prometheus" CONTROL_ENDPOINT = "control_endpoint" diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index 16efd72..1420356 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -45,6 +45,9 @@ class S3Gate(NodeBase): self._get_attribute(ConfigAttributes.ENDPOINT_DATA_1), ] + def get_ns_endpoint(self, ns_name: str) -> str: + return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0_NS).format(namespace=ns_name) + def service_healthcheck(self) -> bool: health_metric = "frostfs_s3_gw_state_health" output = self.host.get_shell().exec(f"curl -s localhost:8086 | grep {health_metric} | sed 1,2d").stdout From 4a2ac8a9b6ed8fe37c25bff91422f2d4232d2ab3 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Wed, 11 Sep 2024 10:42:51 +0300 Subject: [PATCH 288/363] [#290] Update restore traffic method Signed-off-by: Dmitriy Zayakin --- .../storage/controllers/cluster_state_controller.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 5d87a60..7f93e40 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -325,6 +325,8 @@ class ClusterStateController: node: ClusterNode, ) -> None: IpHelper.restore_input_traffic_to_node(node=node) + index = self.dropped_traffic.index(node) + self.dropped_traffic.pop(index) @reporter.step("Restore blocked nodes") def restore_all_traffic(self): @@ -531,7 +533,7 @@ class ClusterStateController: except Exception as err: logger.warning(f"Host ping fails with error {err}") return HostStatus.ONLINE - + @reporter.step("Get contract by domain - {domain_name}") def get_domain_contracts(self, cluster_node: ClusterNode, domain_name: str): frostfs_adm = FrostfsAdm( From 1bee69042b1982f5167bfbef9e7b01a768452688 Mon Sep 17 00:00:00 2001 From: "m.malygina" Date: Tue, 10 Sep 2024 10:45:22 +0300 Subject: [PATCH 289/363] [#294] add wipe data using wipefs method Signed-off-by: m.malygina --- src/frostfs_testlib/hosting/docker_host.py | 8 +++++++- src/frostfs_testlib/hosting/interfaces.py | 19 +++++++++++++++++-- 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 0fb5af0..5110e63 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -185,6 +185,12 @@ class DockerHost(Host): def is_file_exist(self, file_path: str) -> None: raise NotImplementedError("Not implemented for docker") + def wipefs_storage_node_data(self, service_name: str) -> None: + raise NotImplementedError("Not implemented for docker") + + def finish_wipefs(self, service_name: str) -> None: + raise NotImplementedError("Not implemented for docker") + def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None: volume_path = self.get_data_directory(service_name) @@ -240,7 +246,7 @@ class DockerHost(Host): until: Optional[datetime] = None, unit: Optional[str] = None, exclude_filter: Optional[str] = None, - priority: Optional[str] = None + priority: Optional[str] = None, ) -> str: client = self._get_docker_client() filtered_logs = "" diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 36c2804..b84326a 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -178,6 +178,21 @@ class Host(ABC): cache_only: To delete cache only. """ + @abstractmethod + def wipefs_storage_node_data(self, service_name: str) -> None: + """Erases all data of the storage node with specified name. + + Args: + service_name: Name of storage node service. + """ + + def finish_wipefs(self, service_name: str) -> None: + """Erases all data of the storage node with specified name. + + Args: + service_name: Name of storage node service. + """ + @abstractmethod def delete_fstree(self, service_name: str) -> None: """ @@ -297,7 +312,7 @@ class Host(ABC): until: Optional[datetime] = None, unit: Optional[str] = None, exclude_filter: Optional[str] = None, - priority: Optional[str] = None + priority: Optional[str] = None, ) -> str: """Get logs from host filtered by regex. @@ -306,7 +321,7 @@ class Host(ABC): since: If set, limits the time from which logs should be collected. Must be in UTC. until: If set, limits the time until which logs should be collected. Must be in UTC. unit: required unit. - priority: logs level, 0 - emergency, 7 - debug. All messages with that code and higher. + priority: logs level, 0 - emergency, 7 - debug. All messages with that code and higher. For example, if we specify the -p 2 option, journalctl will show all messages with levels 2, 1 and 0. Returns: From 0d750ed114653c05f810d35b0ab05d1104af40c2 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 17 Sep 2024 07:52:32 +0300 Subject: [PATCH 290/363] [#293] Add in CSC methods change blockchain netmap and update CliWrapper Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/steps/node_management.py | 40 +++++---------- .../controllers/cluster_state_controller.py | 49 ++++++++++--------- .../dataclasses/storage_object_info.py | 3 ++ .../grpc_operations/implementations/chunks.py | 10 ++-- .../implementations/container.py | 3 +- .../grpc_operations/implementations/object.py | 8 +++ .../storage/grpc_operations/interfaces.py | 7 ++- 7 files changed, 63 insertions(+), 57 deletions(-) diff --git a/src/frostfs_testlib/steps/node_management.py b/src/frostfs_testlib/steps/node_management.py index ece674b..42b1fc5 100644 --- a/src/frostfs_testlib/steps/node_management.py +++ b/src/frostfs_testlib/steps/node_management.py @@ -13,6 +13,7 @@ from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.shell import Shell from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align from frostfs_testlib.storage.cluster import Cluster, StorageNode +from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils import datetime_utils logger = logging.getLogger("NeoLogger") @@ -111,10 +112,7 @@ def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str: storage_wallet_path = node.get_wallet_path() cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, config_file=storage_wallet_config) - return cli.netmap.snapshot( - rpc_endpoint=node.get_rpc_endpoint(), - wallet=storage_wallet_path, - ).stdout + return cli.netmap.snapshot(rpc_endpoint=node.get_rpc_endpoint(), wallet=storage_wallet_path).stdout @reporter.step("Get shard list for {node}") @@ -202,12 +200,7 @@ def delete_node_data(node: StorageNode) -> None: @reporter.step("Exclude node {node_to_exclude} from network map") -def exclude_node_from_network_map( - node_to_exclude: StorageNode, - alive_node: StorageNode, - shell: Shell, - cluster: Cluster, -) -> None: +def exclude_node_from_network_map(node_to_exclude: StorageNode, alive_node: StorageNode, shell: Shell, cluster: Cluster) -> None: node_netmap_key = node_to_exclude.get_wallet_public_key() storage_node_set_status(node_to_exclude, status="offline") @@ -221,12 +214,7 @@ def exclude_node_from_network_map( @reporter.step("Include node {node_to_include} into network map") -def include_node_to_network_map( - node_to_include: StorageNode, - alive_node: StorageNode, - shell: Shell, - cluster: Cluster, -) -> None: +def include_node_to_network_map(node_to_include: StorageNode, alive_node: StorageNode, shell: Shell, cluster: Cluster) -> None: storage_node_set_status(node_to_include, status="online") # Per suggestion of @fyrchik we need to wait for 2 blocks after we set status and after tick epoch. @@ -236,7 +224,7 @@ def include_node_to_network_map( tick_epoch(shell, cluster) time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2) - check_node_in_map(node_to_include, shell, alive_node) + await_node_in_map(node_to_include, shell, alive_node) @reporter.step("Check node {node} in network map") @@ -250,6 +238,11 @@ def check_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[Stor assert node_netmap_key in snapshot, f"Expected node with key {node_netmap_key} to be in network map" +@wait_for_success(300, 15, title="Await node {node} in network map") +def await_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None: + check_node_in_map(node, shell, alive_node) + + @reporter.step("Check node {node} NOT in network map") def check_node_not_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None: alive_node = alive_node or node @@ -276,12 +269,7 @@ def wait_for_node_to_be_ready(node: StorageNode) -> None: @reporter.step("Remove nodes from network map trough cli-adm morph command") -def remove_nodes_from_map_morph( - shell: Shell, - cluster: Cluster, - remove_nodes: list[StorageNode], - alive_node: Optional[StorageNode] = None, -): +def remove_nodes_from_map_morph(shell: Shell, cluster: Cluster, remove_nodes: list[StorageNode], alive_node: Optional[StorageNode] = None): """ Move node to the Offline state in the candidates list and tick an epoch to update the netmap using frostfs-adm @@ -300,9 +288,5 @@ def remove_nodes_from_map_morph( if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH: # If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests) - frostfsadm = FrostfsAdm( - shell=remote_shell, - frostfs_adm_exec_path=FROSTFS_ADM_EXEC, - config_file=FROSTFS_ADM_CONFIG_PATH, - ) + frostfsadm = FrostfsAdm(shell=remote_shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH) frostfsadm.morph.remove_nodes(node_netmap_keys) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 7f93e40..53098b1 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -14,6 +14,7 @@ from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_E from frostfs_testlib.resources.common import MORPH_BLOCK_TIME from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider from frostfs_testlib.steps.network import IpHelper +from frostfs_testlib.steps.node_management import include_node_to_network_map, remove_nodes_from_map_morph from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass @@ -39,6 +40,7 @@ class ClusterStateController: self.stopped_nodes: list[ClusterNode] = [] self.detached_disks: dict[str, DiskController] = {} self.dropped_traffic: list[ClusterNode] = [] + self.excluded_from_netmap: list[StorageNode] = [] self.stopped_services: set[NodeBase] = set() self.cluster = cluster self.healthcheck = healthcheck @@ -307,23 +309,14 @@ class ClusterStateController: self.suspended_services = {} @reporter.step("Drop traffic to {node}, nodes - {block_nodes}") - def drop_traffic( - self, - node: ClusterNode, - wakeup_timeout: int, - name_interface: str, - block_nodes: list[ClusterNode] = None, - ) -> None: + def drop_traffic(self, node: ClusterNode, wakeup_timeout: int, name_interface: str, block_nodes: list[ClusterNode] = None) -> None: list_ip = self._parse_interfaces(block_nodes, name_interface) IpHelper.drop_input_traffic_to_node(node, list_ip) time.sleep(wakeup_timeout) self.dropped_traffic.append(node) @reporter.step("Start traffic to {node}") - def restore_traffic( - self, - node: ClusterNode, - ) -> None: + def restore_traffic(self, node: ClusterNode) -> None: IpHelper.restore_input_traffic_to_node(node=node) index = self.dropped_traffic.index(node) self.dropped_traffic.pop(index) @@ -410,9 +403,7 @@ class ClusterStateController: @reporter.step("Set MaintenanceModeAllowed - {status}") def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None: frostfs_adm = FrostfsAdm( - shell=cluster_node.host.get_shell(), - frostfs_adm_exec_path=FROSTFS_ADM_EXEC, - config_file=FROSTFS_ADM_CONFIG_PATH, + shell=cluster_node.host.get_shell(), frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH ) frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}") @@ -453,6 +444,25 @@ class ClusterStateController: else: assert netmap[0].node_status == status, f"Node status should be '{status}', but was '{netmap[0].node_status}'" + def remove_node_from_netmap(self, removes_nodes: list[StorageNode]) -> None: + alive_storage = list(set(self.cluster.storage_nodes) - set(removes_nodes))[0] + remove_nodes_from_map_morph(self.shell, self.cluster, removes_nodes, alive_storage) + self.excluded_from_netmap.extend(removes_nodes) + + def include_node_to_netmap(self, include_node: StorageNode, alive_node: StorageNode): + include_node_to_network_map(include_node, alive_node, self.shell, self.cluster) + self.excluded_from_netmap.pop(self.excluded_from_netmap.index(include_node)) + + def include_all_excluded_nodes(self): + if not self.excluded_from_netmap: + return + alive_node = list(set(self.cluster.storage_nodes) - set(self.excluded_from_netmap))[0] + if not alive_node: + return + + for exclude_node in self.excluded_from_netmap.copy(): + self.include_node_to_netmap(exclude_node, alive_node) + def _get_cli( self, local_shell: Shell, local_wallet: WalletInfo, cluster_node: ClusterNode ) -> tuple[FrostfsAdm, FrostfsCli, FrostfsCli]: @@ -469,11 +479,7 @@ class ClusterStateController: frostfs_adm = FrostfsAdm(shell=shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH) frostfs_cli = FrostfsCli(local_shell, FROSTFS_CLI_EXEC, local_wallet.config_path) - frostfs_cli_remote = FrostfsCli( - shell=shell, - frostfs_cli_exec_path=FROSTFS_CLI_EXEC, - config_file=wallet_config_path, - ) + frostfs_cli_remote = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config_path) return frostfs_adm, frostfs_cli, frostfs_cli_remote def _enable_date_synchronizer(self, cluster_node: ClusterNode): @@ -536,8 +542,5 @@ class ClusterStateController: @reporter.step("Get contract by domain - {domain_name}") def get_domain_contracts(self, cluster_node: ClusterNode, domain_name: str): - frostfs_adm = FrostfsAdm( - shell=cluster_node.host.get_shell(), - frostfs_adm_exec_path=FROSTFS_ADM_EXEC, - ) + frostfs_adm = FrostfsAdm(shell=cluster_node.host.get_shell(), frostfs_adm_exec_path=FROSTFS_ADM_EXEC) return frostfs_adm.morph.dump_hashes(cluster_node.morph_chain.get_http_endpoint(), domain_name).stdout diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index d192de5..55a8388 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -90,3 +90,6 @@ class Chunk: def __str__(self) -> str: return self.object_id + + def __repr__(self) -> str: + return self.object_id diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py index d1bba9f..7f3161c 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py @@ -8,6 +8,7 @@ from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.controllers.shards_watcher import ShardsWatcher from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo from frostfs_testlib.storage.grpc_operations import interfaces +from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils.cli_utils import parse_netmap_output @@ -42,6 +43,7 @@ class ChunksOperations(interfaces.ChunksInterface): if cluster_node.host_ip == node_info.node: return (cluster_node, node_info) + @wait_for_success(300, 5, fail_testcase=None) @reporter.step("Search shard with chunk {chunk}") def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str: oid_path = f"{chunk.object_id[0]}/{chunk.object_id[1]}/{chunk.object_id[2]}/{chunk.object_id[3]}" @@ -63,7 +65,7 @@ class ChunksOperations(interfaces.ChunksInterface): address: Optional[str] = None, bearer: Optional[str] = None, generate_key: Optional[bool] = None, - trace: bool = False, + trace: bool = True, root: bool = False, verify_presence_all: bool = False, json: bool = True, @@ -86,7 +88,7 @@ class ChunksOperations(interfaces.ChunksInterface): xhdr=xhdr, timeout=timeout, ) - return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[0] + return self._parse_object_nodes(object_nodes.stdout.split("\n")[0]) @reporter.step("Get last parity chunk") def get_parity( @@ -97,7 +99,7 @@ class ChunksOperations(interfaces.ChunksInterface): bearer: Optional[str] = None, generate_key: Optional[bool] = None, oid: Optional[str] = None, - trace: bool = False, + trace: bool = True, root: bool = False, verify_presence_all: bool = False, json: bool = True, @@ -120,7 +122,7 @@ class ChunksOperations(interfaces.ChunksInterface): xhdr=xhdr, timeout=timeout, ) - return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[0] + return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[-1] @reporter.step("Get first data chunk") def get_first_data( diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py index c8360ea..7a637d7 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py @@ -8,7 +8,7 @@ from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli from frostfs_testlib.plugins import load_plugin from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.s3.interfaces import BucketContainerResolver -from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.grpc_operations import interfaces from frostfs_testlib.utils import json_utils @@ -266,6 +266,7 @@ class ContainerOperations(interfaces.ContainerInterface): self, endpoint: str, cid: str, + cluster: Cluster, address: Optional[str] = None, ttl: Optional[int] = None, from_file: Optional[str] = None, diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py index 63a2922..0e14aec 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py @@ -509,6 +509,7 @@ class ObjectOperations(interfaces.ObjectInterface): cid: str, endpoint: str, bearer: str = "", + oid: Optional[str] = None, filters: Optional[dict] = None, expected_objects_list: Optional[list] = None, xhdr: Optional[dict] = None, @@ -516,6 +517,9 @@ class ObjectOperations(interfaces.ObjectInterface): phy: bool = False, root: bool = False, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + ttl: Optional[int] = None, ) -> list: """ SEARCH an Object. @@ -541,11 +545,15 @@ class ObjectOperations(interfaces.ObjectInterface): rpc_endpoint=endpoint, cid=cid, bearer=bearer, + oid=oid, xhdr=xhdr, filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] if filters else None, session=session, phy=phy, root=root, + address=address, + generate_key=generate_key, + ttl=ttl, timeout=timeout, ) diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces.py b/src/frostfs_testlib/storage/grpc_operations/interfaces.py index 1947435..c293c2d 100644 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces.py +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces.py @@ -235,6 +235,7 @@ class ObjectInterface(ABC): cid: str, endpoint: str, bearer: str = "", + oid: Optional[str] = None, filters: Optional[dict] = None, expected_objects_list: Optional[list] = None, xhdr: Optional[dict] = None, @@ -242,6 +243,9 @@ class ObjectInterface(ABC): phy: bool = False, root: bool = False, timeout: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + ttl: Optional[int] = None, ) -> List: pass @@ -368,6 +372,7 @@ class ContainerInterface(ABC): self, endpoint: str, cid: str, + cluster: Cluster, address: Optional[str] = None, ttl: Optional[int] = None, from_file: Optional[str] = None, @@ -376,7 +381,7 @@ class ContainerInterface(ABC): xhdr: Optional[dict] = None, generate_key: Optional[bool] = None, timeout: Optional[str] = None, - ) -> List[str]: + ) -> List[ClusterNode]: """Show the nodes participating in the container in the current epoch.""" raise NotImplementedError("No implemethed method nodes") From cef64e315ee5e872f1f1ebc9eaefcd4b5bfefc9c Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Wed, 11 Sep 2024 19:39:25 +0300 Subject: [PATCH 291/363] [#267] add no rule found object and morph chain --- src/frostfs_testlib/cli/frostfs_adm/morph.py | 126 ++++++++++++++++++ .../resources/error_patterns.py | 1 + 2 files changed, 127 insertions(+) diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index d8fd61c..5b808ca 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -350,3 +350,129 @@ class FrostfsAdmMorph(CliCommand): if param not in ["self", "node_netmap_keys"] }, ) + + def add_rule( + self, + endpoint: str, + chain_id: str, + target_name: str, + target_type: str, + rule: Optional[list[str]] = None, + path: Optional[str] = None, + chain_id_hex: Optional[bool] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address: Address of wallet account + chain-id: Assign ID to the parsed chain + chain-id-hex: Flag to parse chain ID as hex + endpoint: Remote node control address (as 'multiaddr' or ':') + path: Path to encoded chain in JSON or binary format + rule: Rule statement + target-name: Resource name in APE resource name format + target-type: Resource type(container/namespace) + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control add-rule", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def get_rule( + self, + endpoint: str, + chain_id: str, + target_name: str, + target_type: str, + chain_id_hex: Optional[bool] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address string Address of wallet account + chain-id string Chain id + chain-id-hex Flag to parse chain ID as hex + endpoint string Remote node control address (as 'multiaddr' or ':') + target-name string Resource name in APE resource name format + target-type string Resource type(container/namespace) + timeout duration Timeout for an operation (default 15s) + wallet string Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control get-rule", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def list_rules( + self, + target_type: str, + target_name: Optional[str] = None, + rpc_endpoint: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + target-name: Resource name in APE resource name format + target-type: Resource type(container/namespace) + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "morph ape list-rule-chains", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + + def remove_rule( + self, + endpoint: str, + chain_id: str, + target_name: str, + target_type: str, + all: Optional[bool] = None, + chain_id_hex: Optional[bool] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + """Drop objects from the node's local storage + + Args: + address: Address of wallet account + all: Remove all chains + chain-id: Assign ID to the parsed chain + chain-id-hex: Flag to parse chain ID as hex + endpoint: Remote node control address (as 'multiaddr' or ':') + target-name: Resource name in APE resource name format + target-type: Resource type(container/namespace) + timeout: Timeout for an operation (default 15s) + wallet: Path to the wallet or binary key + + Returns: + Command`s result. + """ + return self._execute( + "control remove-rule", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) \ No newline at end of file diff --git a/src/frostfs_testlib/resources/error_patterns.py b/src/frostfs_testlib/resources/error_patterns.py index 3b9231e..3ba5f13 100644 --- a/src/frostfs_testlib/resources/error_patterns.py +++ b/src/frostfs_testlib/resources/error_patterns.py @@ -29,3 +29,4 @@ S3_MALFORMED_XML_REQUEST = "The XML you provided was not well-formed or did not RULE_ACCESS_DENIED_CONTAINER = "access to container operation {operation} is denied by access policy engine: Access denied" RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied" NO_RULE_FOUND_CONTAINER = "access to container operation {operation} is denied by access policy engine: NoRuleFound" +NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request: method {operation}: NoRuleFound" From 24b8ca73d74fbf7a52c733e72dc1e4127f55ceac Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Wed, 11 Sep 2024 22:00:21 +0300 Subject: [PATCH 292/363] [#291] get namespace endpoint --- src/frostfs_testlib/storage/dataclasses/frostfs_services.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py index 1420356..4f5c348 100644 --- a/src/frostfs_testlib/storage/dataclasses/frostfs_services.py +++ b/src/frostfs_testlib/storage/dataclasses/frostfs_services.py @@ -39,6 +39,9 @@ class S3Gate(NodeBase): def get_endpoint(self) -> str: return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0) + def get_ns_endpoint(self, ns_name: str) -> str: + return self._get_attribute(f"{ConfigAttributes.ENDPOINT_DATA_0}_namespace").format(namespace=ns_name) + def get_all_endpoints(self) -> list[str]: return [ self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0), From 2976e30b75d25ad00d62529e0a68beda490ce795 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Mon, 7 Oct 2024 15:59:00 +0300 Subject: [PATCH 293/363] [#299] Add fuse to prevent similar names generation Signed-off-by: a.berezin --- src/frostfs_testlib/utils/string_utils.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/utils/string_utils.py b/src/frostfs_testlib/utils/string_utils.py index 80efa65..726c792 100644 --- a/src/frostfs_testlib/utils/string_utils.py +++ b/src/frostfs_testlib/utils/string_utils.py @@ -1,3 +1,4 @@ +import itertools import random import re import string @@ -7,6 +8,8 @@ ONLY_ASCII_LETTERS = string.ascii_letters DIGITS_AND_ASCII_LETTERS = string.ascii_letters + string.digits NON_DIGITS_AND_LETTERS = string.punctuation +FUSE = itertools.cycle(range(5)) + def unique_name(prefix: str = "", postfix: str = ""): """ @@ -18,7 +21,7 @@ def unique_name(prefix: str = "", postfix: str = ""): Returns: unique name string """ - return f"{prefix}{hex(int(datetime.now().timestamp() * 1000000))}{postfix}" + return f"{prefix}{hex(int(datetime.now().timestamp() * 1000000))}{next(FUSE)}{postfix}" def random_string(length: int = 5, source: str = ONLY_ASCII_LETTERS): From a04eba8aecdbbc9285141c82328291eb0bf0e9b9 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Fri, 11 Oct 2024 12:23:32 +0300 Subject: [PATCH 294/363] [#302] Autoadd marks for frostfs Signed-off-by: a.berezin --- src/frostfs_testlib/__init__.py | 1 + src/frostfs_testlib/hooks.py | 12 ++++++++++++ src/frostfs_testlib/utils/string_utils.py | 1 + 3 files changed, 14 insertions(+) create mode 100644 src/frostfs_testlib/hooks.py diff --git a/src/frostfs_testlib/__init__.py b/src/frostfs_testlib/__init__.py index f3143e6..1ceb972 100644 --- a/src/frostfs_testlib/__init__.py +++ b/src/frostfs_testlib/__init__.py @@ -1,3 +1,4 @@ __version__ = "2.0.1" from .fixtures import configure_testlib, hosting, temp_directory +from .hooks import pytest_collection_modifyitems diff --git a/src/frostfs_testlib/hooks.py b/src/frostfs_testlib/hooks.py new file mode 100644 index 0000000..df89bff --- /dev/null +++ b/src/frostfs_testlib/hooks.py @@ -0,0 +1,12 @@ +import pytest + + +@pytest.hookimpl +def pytest_collection_modifyitems(items: list[pytest.Item]): + # All tests which reside in frostfs nodeid are granted with frostfs marker, excluding + # nodeid = full path of the test + # 1. plugins + # 2. testlib itself + for item in items: + if "frostfs" in item.nodeid and "plugin" not in item.nodeid and "testlib" not in item.nodeid: + item.add_marker("frostfs") diff --git a/src/frostfs_testlib/utils/string_utils.py b/src/frostfs_testlib/utils/string_utils.py index 726c792..acbca92 100644 --- a/src/frostfs_testlib/utils/string_utils.py +++ b/src/frostfs_testlib/utils/string_utils.py @@ -8,6 +8,7 @@ ONLY_ASCII_LETTERS = string.ascii_letters DIGITS_AND_ASCII_LETTERS = string.ascii_letters + string.digits NON_DIGITS_AND_LETTERS = string.punctuation +# if unique_name is called multiple times within the same microsecond, append 0-4 to the name so it surely unique FUSE = itertools.cycle(range(5)) From 2a41f2b0f64316efd83889b88b19ad7d966cb948 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Mon, 23 Sep 2024 17:54:40 +0300 Subject: [PATCH 295/363] [#301] Added interfaces for put/get lifecycle configuration to s3 clients --- pyproject.toml | 4 +-- requirements.txt | 4 +-- src/frostfs_testlib/cli/frostfs_adm/morph.py | 12 ++------ src/frostfs_testlib/s3/aws_cli_client.py | 30 +++++++++++++++++++ src/frostfs_testlib/s3/boto3_client.py | 21 +++++++++++++ src/frostfs_testlib/s3/interfaces.py | 12 ++++++++ src/frostfs_testlib/steps/epoch.py | 11 +++++-- .../testing/cluster_test_base.py | 8 ++--- 8 files changed, 80 insertions(+), 22 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 296ce65..3faa637 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -27,8 +27,8 @@ dependencies = [ "testrail-api>=1.12.0", "pytest==7.1.2", "tenacity==8.0.1", - "boto3==1.16.33", - "boto3-stubs[essential]==1.16.33", + "boto3==1.35.30", + "boto3-stubs[essential]==1.35.30", ] requires-python = ">=3.10" diff --git a/requirements.txt b/requirements.txt index 32e604f..e012366 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,8 +8,8 @@ docstring_parser==0.15 testrail-api==1.12.0 tenacity==8.0.1 pytest==7.1.2 -boto3==1.16.33 -boto3-stubs[essential]==1.16.33 +boto3==1.35.30 +boto3-stubs[essential]==1.35.30 # Dev dependencies black==22.8.0 diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index 5b808ca..eea0985 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -69,9 +69,7 @@ class FrostfsAdmMorph(CliCommand): **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) - def set_config( - self, set_key_value: str, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None - ) -> CommandResult: + def set_config(self, set_key_value: str, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None) -> CommandResult: """Add/update global config value in the FrostFS network. Args: @@ -125,7 +123,7 @@ class FrostfsAdmMorph(CliCommand): ) def force_new_epoch( - self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None + self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None, delta: Optional[int] = None ) -> CommandResult: """Create new FrostFS epoch event in the side chain. @@ -344,11 +342,7 @@ class FrostfsAdmMorph(CliCommand): return self._execute( f"morph remove-nodes {' '.join(node_netmap_keys)}", - **{ - param: param_value - for param, param_value in locals().items() - if param not in ["self", "node_netmap_keys"] - }, + **{param: param_value for param, param_value in locals().items() if param not in ["self", "node_netmap_keys"]}, ) def add_rule( diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 8169afe..2482376 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -754,6 +754,36 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response.get("ObjectLockConfiguration") + @reporter.step("Put bucket lifecycle configuration") + def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict: + cmd = ( + f"aws {self.common_flags} s3api put-bucket-lifecycle-configuration --bucket {bucket} " + f"--endpoint-url {self.s3gate_endpoint} --lifecycle-configuration file://{dumped_configuration} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response + + @reporter.step("Get bucket lifecycle configuration") + def get_bucket_lifecycle_configuration(self, bucket: str) -> dict: + cmd = ( + f"aws {self.common_flags} s3api get-bucket-lifecycle-configuration --bucket {bucket} " + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response + + @reporter.step("Delete bucket lifecycle configuration") + def delete_bucket_lifecycle(self, bucket: str) -> dict: + cmd = ( + f"aws {self.common_flags} s3api delete-bucket-lifecycle --bucket {bucket} " + f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + ) + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + return response + @staticmethod def _to_json(output: str) -> dict: json_output = {} diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index a644a6f..b638939 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -296,6 +296,27 @@ class Boto3ClientWrapper(S3ClientWrapper): response = self.boto3_client.delete_bucket_cors(Bucket=bucket) log_command_execution(self.s3gate_endpoint, "S3 delete_bucket_cors result", response, {"Bucket": bucket}) + @reporter.step("Put bucket lifecycle configuration") + @report_error + def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict: + response = self.boto3_client.put_bucket_lifecycle_configuration(Bucket=bucket, LifecycleConfiguration=lifecycle_configuration) + log_command_execution(self.s3gate_endpoint, "S3 put_bucket_lifecycle_configuration result", response, {"Bucket": bucket}) + return response + + @reporter.step("Get bucket lifecycle configuration") + @report_error + def get_bucket_lifecycle_configuration(self, bucket: str) -> dict: + response = self.boto3_client.get_bucket_lifecycle_configuration(Bucket=bucket) + log_command_execution(self.s3gate_endpoint, "S3 get_bucket_lifecycle_configuration result", response, {"Bucket": bucket}) + return {"Rules": response.get("Rules")} + + @reporter.step("Delete bucket lifecycle configuration") + @report_error + def delete_bucket_lifecycle(self, bucket: str) -> dict: + response = self.boto3_client.delete_bucket_lifecycle(Bucket=bucket) + log_command_execution(self.s3gate_endpoint, "S3 delete_bucket_lifecycle result", response, {"Bucket": bucket}) + return response + # END OF BUCKET METHODS # # OBJECT METHODS # diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index b1825d5..da4fc6b 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -366,6 +366,18 @@ class S3ClientWrapper(HumanReadableABC): def delete_object_tagging(self, bucket: str, key: str) -> None: """Removes the entire tag set from the specified object.""" + @abstractmethod + def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict: + """Adds or updates bucket lifecycle configuration""" + + @abstractmethod + def get_bucket_lifecycle_configuration(self, bucket: str) -> dict: + """Gets bucket lifecycle configuration""" + + @abstractmethod + def delete_bucket_lifecycle(self, bucket: str) -> dict: + """Deletes bucket lifecycle""" + @abstractmethod def get_object_attributes( self, diff --git a/src/frostfs_testlib/steps/epoch.py b/src/frostfs_testlib/steps/epoch.py index ce7ed12..6ec5483 100644 --- a/src/frostfs_testlib/steps/epoch.py +++ b/src/frostfs_testlib/steps/epoch.py @@ -69,7 +69,7 @@ def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] @reporter.step("Tick Epoch") -def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None): +def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None, delta: Optional[int] = None): """ Tick epoch using frostfs-adm or NeoGo if frostfs-adm is not available (DevEnv) Args: @@ -88,12 +88,17 @@ def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH, ) - frostfs_adm.morph.force_new_epoch() + frostfs_adm.morph.force_new_epoch(delta=delta) return # Otherwise we tick epoch using transaction cur_epoch = get_epoch(shell, cluster) + if delta: + next_epoch = cur_epoch + delta + else: + next_epoch = cur_epoch + 1 + # Use first node by default ir_node = cluster.services(InnerRing)[0] # In case if no local_wallet_path is provided, we use wallet_path @@ -110,7 +115,7 @@ def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] wallet_password=ir_wallet_pass, scripthash=get_contract_hash(morph_chain, "netmap.frostfs", shell=shell), method="newEpoch", - arguments=f"int:{cur_epoch + 1}", + arguments=f"int:{next_epoch}", multisig_hash=f"{ir_address}:Global", address=ir_address, rpc_endpoint=morph_endpoint, diff --git a/src/frostfs_testlib/testing/cluster_test_base.py b/src/frostfs_testlib/testing/cluster_test_base.py index f2e10ad..50c8eb6 100644 --- a/src/frostfs_testlib/testing/cluster_test_base.py +++ b/src/frostfs_testlib/testing/cluster_test_base.py @@ -25,12 +25,8 @@ class ClusterTestBase: for _ in range(epochs_to_tick): self.tick_epoch(alive_node, wait_block) - def tick_epoch( - self, - alive_node: Optional[StorageNode] = None, - wait_block: int = None, - ): - epoch.tick_epoch(self.shell, self.cluster, alive_node=alive_node) + def tick_epoch(self, alive_node: Optional[StorageNode] = None, wait_block: int = None, delta: Optional[int] = None): + epoch.tick_epoch(self.shell, self.cluster, alive_node=alive_node, delta=delta) if wait_block: self.wait_for_blocks(wait_block) From cf48f474ebb8aea4798e007c931ca157eb8fd7ea Mon Sep 17 00:00:00 2001 From: "m.malygina" Date: Thu, 10 Oct 2024 10:39:54 +0300 Subject: [PATCH 296/363] [#303] add check if registry is on hdd Signed-off-by: m.malygina --- src/frostfs_testlib/load/interfaces/scenario_runner.py | 5 +++++ src/frostfs_testlib/load/runners.py | 6 ++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/load/interfaces/scenario_runner.py b/src/frostfs_testlib/load/interfaces/scenario_runner.py index 45c1317..c0062a9 100644 --- a/src/frostfs_testlib/load/interfaces/scenario_runner.py +++ b/src/frostfs_testlib/load/interfaces/scenario_runner.py @@ -1,5 +1,6 @@ from abc import ABC, abstractmethod +from frostfs_testlib.load.interfaces.loader import Loader from frostfs_testlib.load.k6 import K6 from frostfs_testlib.load.load_config import LoadParams from frostfs_testlib.storage.cluster import ClusterNode @@ -48,3 +49,7 @@ class ScenarioRunner(ABC): @abstractmethod def get_results(self) -> dict: """Get results from K6 run""" + + @abstractmethod + def get_loaders(self) -> list[Loader]: + """Return loaders""" diff --git a/src/frostfs_testlib/load/runners.py b/src/frostfs_testlib/load/runners.py index a34786f..1ceac09 100644 --- a/src/frostfs_testlib/load/runners.py +++ b/src/frostfs_testlib/load/runners.py @@ -30,6 +30,7 @@ from frostfs_testlib.utils.file_keeper import FileKeeper class RunnerBase(ScenarioRunner): k6_instances: list[K6] + loaders: list[Loader] @reporter.step("Run preset on loaders") def preset(self): @@ -49,9 +50,11 @@ class RunnerBase(ScenarioRunner): def get_k6_instances(self): return self.k6_instances + def get_loaders(self) -> list[Loader]: + return self.loaders + class DefaultRunner(RunnerBase): - loaders: list[Loader] user: User def __init__( @@ -228,7 +231,6 @@ class DefaultRunner(RunnerBase): class LocalRunner(RunnerBase): - loaders: list[Loader] cluster_state_controller: ClusterStateController file_keeper: FileKeeper user: User From 738cfacbb7416d792c95e034bb8355acd7b1c7dd Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Mon, 7 Oct 2024 17:33:45 +0300 Subject: [PATCH 297/363] [#300] Refactor tests: use `unique_name` instead `hex + timestamp` Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/credentials/authmate_s3_provider.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/credentials/authmate_s3_provider.py b/src/frostfs_testlib/credentials/authmate_s3_provider.py index 66c5015..ed6454b 100644 --- a/src/frostfs_testlib/credentials/authmate_s3_provider.py +++ b/src/frostfs_testlib/credentials/authmate_s3_provider.py @@ -1,5 +1,4 @@ import re -from datetime import datetime from typing import Optional from frostfs_testlib import reporter @@ -10,6 +9,7 @@ from frostfs_testlib.shell import LocalShell from frostfs_testlib.steps.cli.container import list_containers from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate +from frostfs_testlib.utils import string_utils class AuthmateS3CredentialsProvider(S3CredentialsProvider): @@ -22,7 +22,7 @@ class AuthmateS3CredentialsProvider(S3CredentialsProvider): gate_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes] # unique short bucket name - bucket = f"bucket-{hex(int(datetime.now().timestamp()*1000000))}" + bucket = string_utils.unique_name("bucket-") frostfs_authmate: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC) issue_secret_output = frostfs_authmate.secret.issue( From 5fa58a55c05f006b81954bc571e7a9e1cca1ffed Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 18 Oct 2024 13:25:12 +0300 Subject: [PATCH 298/363] [#304] Improve logging Boto3 IAM methods Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/s3/boto3_client.py | 174 +++++++++++++++++++------ 1 file changed, 135 insertions(+), 39 deletions(-) diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index b638939..a99b866 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -68,6 +68,7 @@ class Boto3ClientWrapper(S3ClientWrapper): self.access_key_id: str = access_key_id self.secret_access_key: str = secret_access_key self.s3gate_endpoint: str = "" + self.iam_endpoint: str = "" self.boto3_iam_client: S3Client = None self.set_endpoint(s3gate_endpoint) @@ -90,11 +91,16 @@ class Boto3ClientWrapper(S3ClientWrapper): @reporter.step("Set endpoint IAM to {iam_endpoint}") def set_iam_endpoint(self, iam_endpoint: str): + if self.iam_endpoint == iam_endpoint: + return + + self.iam_endpoint = iam_endpoint + self.boto3_iam_client = self.session.client( service_name="iam", aws_access_key_id=self.access_key_id, aws_secret_access_key=self.secret_access_key, - endpoint_url=iam_endpoint, + endpoint_url=self.iam_endpoint, verify=False, ) @@ -687,25 +693,36 @@ class Boto3ClientWrapper(S3ClientWrapper): # Some methods don't have checks because boto3 is silent in some cases (delete, attach, etc.) @reporter.step("Adds the specified user to the specified group") + @report_error def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: - response = self.boto3_iam_client.add_user_to_group(UserName=user_name, GroupName=group_name) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.add_user_to_group(**params) + log_command_execution(self.iam_endpoint, "IAM Add User to Group", response, params) return response @reporter.step("Attaches the specified managed policy to the specified IAM group") + @report_error def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: - response = self.boto3_iam_client.attach_group_policy(GroupName=group_name, PolicyArn=policy_arn) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.attach_group_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Attach Group Policy", response, params) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Attaches the specified managed policy to the specified user") + @report_error def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: - response = self.boto3_iam_client.attach_user_policy(UserName=user_name, PolicyArn=policy_arn) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.attach_user_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Attach User Policy", response, params) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Creates a new AWS secret access key and access key ID for the specified user") + @report_error def iam_create_access_key(self, user_name: str) -> dict: response = self.boto3_iam_client.create_access_key(UserName=user_name) + log_command_execution(self.iam_endpoint, "IAM Create Access Key", response, {"UserName": user_name}) access_key_id = response["AccessKey"].get("AccessKeyId") secret_access_key = response["AccessKey"].get("SecretAccessKey") @@ -715,138 +732,190 @@ class Boto3ClientWrapper(S3ClientWrapper): return access_key_id, secret_access_key @reporter.step("Creates a new group") + @report_error def iam_create_group(self, group_name: str) -> dict: response = self.boto3_iam_client.create_group(GroupName=group_name) + log_command_execution(self.iam_endpoint, "IAM Create Group", response, {"GroupName": group_name}) + assert response.get("Group"), f"Expected Group in response:\n{response}" assert response["Group"].get("GroupName") == group_name, f"GroupName should be equal to {group_name}" return response @reporter.step("Creates a new managed policy for your AWS account") + @report_error def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: - response = self.boto3_iam_client.create_policy(PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)) + params = self._convert_to_s3_params(locals().items()) + params["PolicyDocument"] = json.dumps(policy_document) + response = self.boto3_iam_client.create_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Create Policy", response, params) + assert response.get("Policy"), f"Expected Policy in response:\n{response}" assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}" return response @reporter.step("Creates a new IAM user for your AWS account") + @report_error def iam_create_user(self, user_name: str) -> dict: response = self.boto3_iam_client.create_user(UserName=user_name) + log_command_execution(self.iam_endpoint, "IAM Create User", response, {"UserName": user_name}) + assert response.get("User"), f"Expected User in response:\n{response}" assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" return response @reporter.step("Deletes the access key pair associated with the specified IAM user") + @report_error def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: - response = self.boto3_iam_client.delete_access_key(AccessKeyId=access_key_id, UserName=user_name) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.delete_access_key(**params) + log_command_execution(self.iam_endpoint, "IAM Delete Access Key", response, params) return response @reporter.step("Deletes the specified IAM group") + @report_error def iam_delete_group(self, group_name: str) -> dict: response = self.boto3_iam_client.delete_group(GroupName=group_name) + log_command_execution(self.iam_endpoint, "IAM Delete Group", response, {"GroupName": group_name}) return response @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group") + @report_error def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: - response = self.boto3_iam_client.delete_group_policy(GroupName=group_name, PolicyName=policy_name) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.delete_group_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Delete Group Policy", response, params) return response @reporter.step("Deletes the specified managed policy") + @report_error def iam_delete_policy(self, policy_arn: str) -> dict: response = self.boto3_iam_client.delete_policy(PolicyArn=policy_arn) + log_command_execution(self.iam_endpoint, "IAM Delete Policy", response, {"PolicyArn": policy_arn}) return response @reporter.step("Deletes the specified IAM user") + @report_error def iam_delete_user(self, user_name: str) -> dict: response = self.boto3_iam_client.delete_user(UserName=user_name) + log_command_execution(self.iam_endpoint, "IAM Delete User", response, {"UserName": user_name}) return response @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user") + @report_error def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: - response = self.boto3_iam_client.delete_user_policy(UserName=user_name, PolicyName=policy_name) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.delete_user_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Delete User Policy", response, params) return response @reporter.step("Removes the specified managed policy from the specified IAM group") + @report_error def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: - response = self.boto3_iam_client.detach_group_policy(GroupName=group_name, PolicyArn=policy_arn) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.detach_group_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Detach Group Policy", response, params) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Removes the specified managed policy from the specified user") + @report_error def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: - response = self.boto3_iam_client.detach_user_policy(UserName=user_name, PolicyArn=policy_arn) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.detach_user_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Detach User Policy", response, params) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Returns a list of IAM users that are in the specified IAM group") + @report_error def iam_get_group(self, group_name: str) -> dict: response = self.boto3_iam_client.get_group(GroupName=group_name) + log_command_execution(self.iam_endpoint, "IAM Get Group", response, {"GroupName": group_name}) assert response.get("Group").get("GroupName") == group_name, f"GroupName should be equal to {group_name}" - return response @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group") + @report_error def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: - response = self.boto3_iam_client.get_group_policy(GroupName=group_name, PolicyName=policy_name) - + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.get_group_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Get Group Policy", response, params) return response @reporter.step("Retrieves information about the specified managed policy") + @report_error def iam_get_policy(self, policy_arn: str) -> dict: response = self.boto3_iam_client.get_policy(PolicyArn=policy_arn) + log_command_execution(self.iam_endpoint, "IAM Get Policy", response, {"PolicyArn": policy_arn}) + assert response.get("Policy"), f"Expected Policy in response:\n{response}" assert response["Policy"].get("Arn") == policy_arn, f"PolicyArn should be equal to {policy_arn}" return response @reporter.step("Retrieves information about the specified version of the specified managed policy") + @report_error def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: - response = self.boto3_iam_client.get_policy_version(PolicyArn=policy_arn, VersionId=version_id) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.get_policy_version(**params) + log_command_execution(self.iam_endpoint, "IAM Get Policy Version", response, params) + assert response.get("PolicyVersion"), f"Expected PolicyVersion in response:\n{response}" assert response["PolicyVersion"].get("VersionId") == version_id, f"VersionId should be equal to {version_id}" return response @reporter.step("Retrieves information about the specified IAM user") + @report_error def iam_get_user(self, user_name: str) -> dict: response = self.boto3_iam_client.get_user(UserName=user_name) + log_command_execution(self.iam_endpoint, "IAM Get User", response, {"UserName": user_name}) + assert response.get("User"), f"Expected User in response:\n{response}" assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" return response @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user") + @report_error def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: - response = self.boto3_iam_client.get_user_policy(UserName=user_name, PolicyName=policy_name) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.get_user_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Get User Policy", response, params) assert response.get("UserName"), f"Expected UserName in response:\n{response}" - return response @reporter.step("Returns information about the access key IDs associated with the specified IAM user") + @report_error def iam_list_access_keys(self, user_name: str) -> dict: response = self.boto3_iam_client.list_access_keys(UserName=user_name) - + log_command_execution(self.iam_endpoint, "IAM List Access Keys", response, {"UserName": user_name}) return response @reporter.step("Lists all managed policies that are attached to the specified IAM group") + @report_error def iam_list_attached_group_policies(self, group_name: str) -> dict: response = self.boto3_iam_client.list_attached_group_policies(GroupName=group_name) + log_command_execution(self.iam_endpoint, "IAM List Attached Group Policies", response, {"GroupName": group_name}) assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" - return response @reporter.step("Lists all managed policies that are attached to the specified IAM user") + @report_error def iam_list_attached_user_policies(self, user_name: str) -> dict: response = self.boto3_iam_client.list_attached_user_policies(UserName=user_name) + log_command_execution(self.iam_endpoint, "IAM List Attached User Policies", response, {"UserName": user_name}) assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" - return response @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") + @report_error def iam_list_entities_for_policy(self, policy_arn: str) -> dict: response = self.boto3_iam_client.list_entities_for_policy(PolicyArn=policy_arn) + log_command_execution(self.iam_endpoint, "IAM List Entities For Policy", response, {"PolicyArn": policy_arn}) assert response.get("PolicyGroups"), f"Expected PolicyGroups in response:\n{response}" assert response.get("PolicyUsers"), f"Expected PolicyUsers in response:\n{response}" @@ -854,98 +923,125 @@ class Boto3ClientWrapper(S3ClientWrapper): return response @reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group") + @report_error def iam_list_group_policies(self, group_name: str) -> dict: response = self.boto3_iam_client.list_group_policies(GroupName=group_name) + log_command_execution(self.iam_endpoint, "IAM List Group Policies", response, {"GroupName": group_name}) assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" - return response @reporter.step("Lists the IAM groups") + @report_error def iam_list_groups(self) -> dict: response = self.boto3_iam_client.list_groups() + log_command_execution(self.iam_endpoint, "IAM List Groups", response) assert response.get("Groups"), f"Expected Groups in response:\n{response}" - return response @reporter.step("Lists the IAM groups that the specified IAM user belongs to") + @report_error def iam_list_groups_for_user(self, user_name: str) -> dict: response = self.boto3_iam_client.list_groups_for_user(UserName=user_name) + log_command_execution(self.iam_endpoint, "IAM List Groups For User", response, {"UserName": user_name}) assert response.get("Groups"), f"Expected Groups in response:\n{response}" - return response @reporter.step("Lists all the managed policies that are available in your AWS account") + @report_error def iam_list_policies(self) -> dict: response = self.boto3_iam_client.list_policies() + log_command_execution(self.iam_endpoint, "IAM List Policies", response) assert response.get("Policies"), f"Expected Policies in response:\n{response}" - return response @reporter.step("Lists information about the versions of the specified managed policy") + @report_error def iam_list_policy_versions(self, policy_arn: str) -> dict: response = self.boto3_iam_client.list_policy_versions(PolicyArn=policy_arn) + log_command_execution(self.iam_endpoint, "IAM List Policy Versions", response, {"PolicyArn": policy_arn}) assert response.get("Versions"), f"Expected Versions in response:\n{response}" - return response @reporter.step("Lists the names of the inline policies embedded in the specified IAM user") + @report_error def iam_list_user_policies(self, user_name: str) -> dict: response = self.boto3_iam_client.list_user_policies(UserName=user_name) + log_command_execution(self.iam_endpoint, "IAM List User Policies", response, {"UserName": user_name}) assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" - return response @reporter.step("Lists the IAM users") + @report_error def iam_list_users(self) -> dict: response = self.boto3_iam_client.list_users() + log_command_execution(self.iam_endpoint, "IAM List Users", response) assert response.get("Users"), f"Expected Users in response:\n{response}" - return response @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group") + @report_error def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: - response = self.boto3_iam_client.put_group_policy( - GroupName=group_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document) - ) + params = self._convert_to_s3_params(locals().items()) + params["PolicyDocument"] = json.dumps(policy_document) + response = self.boto3_iam_client.put_group_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Put Group Policy", response, params) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") + @report_error def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: - response = self.boto3_iam_client.put_user_policy( - UserName=user_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document) - ) + params = self._convert_to_s3_params(locals().items()) + params["PolicyDocument"] = json.dumps(policy_document) + response = self.boto3_iam_client.put_user_policy(**params) + log_command_execution(self.iam_endpoint, "IAM Put User Policy", response, params) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Removes the specified user from the specified group") + @report_error def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: - response = self.boto3_iam_client.remove_user_from_group(GroupName=group_name, UserName=user_name) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.remove_user_from_group(**params) + log_command_execution(self.iam_endpoint, "IAM Remove User From Group", response, params) return response @reporter.step("Updates the name and/or the path of the specified IAM group") + @report_error def iam_update_group(self, group_name: str, new_name: str, new_path: Optional[str] = None) -> dict: - response = self.boto3_iam_client.update_group(GroupName=group_name, NewGroupName=new_name, NewPath="/") - + params = {"GroupName": group_name, "NewGroupName": new_name, "NewPath": "/"} + response = self.boto3_iam_client.update_group(**params) + log_command_execution(self.iam_endpoint, "IAM Update Group", response, params) return response @reporter.step("Updates the name and/or the path of the specified IAM user") + @report_error def iam_update_user(self, user_name: str, new_name: str, new_path: Optional[str] = None) -> dict: - response = self.boto3_iam_client.update_user(UserName=user_name, NewUserName=new_name, NewPath="/") + params = {"UserName": user_name, "NewUserName": new_name, "NewPath": "/"} + response = self.boto3_iam_client.update_user(**params) + log_command_execution(self.iam_endpoint, "IAM Update User", response, params) return response @reporter.step("Adds one or more tags to an IAM user") + @report_error def iam_tag_user(self, user_name: str, tags: list) -> dict: - tags_json = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] - response = self.boto3_iam_client.tag_user(UserName=user_name, Tags=tags_json) + params = self._convert_to_s3_params(locals().items()) + params["Tags"] = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] + response = self.boto3_iam_client.tag_user(**params) + log_command_execution(self.iam_endpoint, "IAM Tag User", response, params) return response @reporter.step("List tags of IAM user") + @report_error def iam_list_user_tags(self, user_name: str) -> dict: response = self.boto3_iam_client.list_user_tags(UserName=user_name) + log_command_execution(self.iam_endpoint, "IAM List User Tags", response, {"UserName": user_name}) return response @reporter.step("Removes the specified tags from the user") + @report_error def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: - response = self.boto3_iam_client.untag_user(UserName=user_name, TagKeys=tag_keys) + params = self._convert_to_s3_params(locals().items()) + response = self.boto3_iam_client.untag_user(**params) + log_command_execution(self.iam_endpoint, "IAM Untag User", response, params) return response From 3f3be83d90cb3226268f00746e67f433b63c90be Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Mon, 21 Oct 2024 09:01:37 +0300 Subject: [PATCH 299/363] [#305] Added IAM abstract method --- src/frostfs_testlib/s3/interfaces.py | 4 ++++ src/frostfs_testlib/steps/metrics.py | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index da4fc6b..c084484 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -58,6 +58,10 @@ class S3ClientWrapper(HumanReadableABC): def set_endpoint(self, s3gate_endpoint: str): """Set endpoint""" + @abstractmethod + def set_iam_endpoint(self, iam_endpoint: str): + """Set iam endpoint""" + @abstractmethod def create_bucket( self, diff --git a/src/frostfs_testlib/steps/metrics.py b/src/frostfs_testlib/steps/metrics.py index 29e49d4..a9e545a 100644 --- a/src/frostfs_testlib/steps/metrics.py +++ b/src/frostfs_testlib/steps/metrics.py @@ -1,8 +1,8 @@ import re from frostfs_testlib import reporter -from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.storage.cluster import ClusterNode +from frostfs_testlib.testing.test_control import wait_for_success @reporter.step("Check metrics result") @@ -19,7 +19,7 @@ def check_metrics_counter( counter_act += get_metrics_value(cluster_node, parse_from_command, **metrics_greps) assert eval( f"{counter_act} {operator} {counter_exp}" - ), f"Expected: {counter_exp} {operator} Actual: {counter_act} in node: {cluster_node}" + ), f"Expected: {counter_exp} {operator} Actual: {counter_act} in nodes: {cluster_nodes}" @reporter.step("Get metrics value from node: {node}") From b2bf6677f184fdb2d92045d753722fd651091e46 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Fri, 25 Oct 2024 18:52:43 +0300 Subject: [PATCH 300/363] [#310] Update test marking Signed-off-by: a.berezin --- src/frostfs_testlib/hooks.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/hooks.py b/src/frostfs_testlib/hooks.py index df89bff..6830e78 100644 --- a/src/frostfs_testlib/hooks.py +++ b/src/frostfs_testlib/hooks.py @@ -8,5 +8,6 @@ def pytest_collection_modifyitems(items: list[pytest.Item]): # 1. plugins # 2. testlib itself for item in items: - if "frostfs" in item.nodeid and "plugin" not in item.nodeid and "testlib" not in item.nodeid: + location = item.location[0] + if "frostfs" in location and "plugin" not in location and "testlib" not in location: item.add_marker("frostfs") From e6faddedeb008950583174659eb52374bd475e5d Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Mon, 21 Oct 2024 23:47:47 +0300 Subject: [PATCH 301/363] [#297] add morph rule chain --- src/frostfs_testlib/cli/frostfs_adm/morph.py | 28 ++++++------------- .../storage/dataclasses/ape.py | 15 ++++++++++ 2 files changed, 23 insertions(+), 20 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index eea0985..7228692 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -122,9 +122,7 @@ class FrostfsAdmMorph(CliCommand): **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) - def force_new_epoch( - self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None, delta: Optional[int] = None - ) -> CommandResult: + def force_new_epoch(self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None) -> CommandResult: """Create new FrostFS epoch event in the side chain. Args: @@ -343,11 +341,11 @@ class FrostfsAdmMorph(CliCommand): return self._execute( f"morph remove-nodes {' '.join(node_netmap_keys)}", **{param: param_value for param, param_value in locals().items() if param not in ["self", "node_netmap_keys"]}, + **{param: param_value for param, param_value in locals().items() if param not in ["self", "node_netmap_keys"]}, ) - + def add_rule( self, - endpoint: str, chain_id: str, target_name: str, target_type: str, @@ -361,10 +359,8 @@ class FrostfsAdmMorph(CliCommand): """Drop objects from the node's local storage Args: - address: Address of wallet account chain-id: Assign ID to the parsed chain chain-id-hex: Flag to parse chain ID as hex - endpoint: Remote node control address (as 'multiaddr' or ':') path: Path to encoded chain in JSON or binary format rule: Rule statement target-name: Resource name in APE resource name format @@ -376,13 +372,12 @@ class FrostfsAdmMorph(CliCommand): Command`s result. """ return self._execute( - "control add-rule", + "morph ape add-rule-chain", **{param: value for param, value in locals().items() if param not in ["self"]}, ) def get_rule( self, - endpoint: str, chain_id: str, target_name: str, target_type: str, @@ -394,10 +389,8 @@ class FrostfsAdmMorph(CliCommand): """Drop objects from the node's local storage Args: - address string Address of wallet account chain-id string Chain id chain-id-hex Flag to parse chain ID as hex - endpoint string Remote node control address (as 'multiaddr' or ':') target-name string Resource name in APE resource name format target-type string Resource type(container/namespace) timeout duration Timeout for an operation (default 15s) @@ -407,7 +400,7 @@ class FrostfsAdmMorph(CliCommand): Command`s result. """ return self._execute( - "control get-rule", + "morph ape get-rule-chain", **{param: value for param, value in locals().items() if param not in ["self"]}, ) @@ -423,8 +416,6 @@ class FrostfsAdmMorph(CliCommand): """Drop objects from the node's local storage Args: - address: Address of wallet account - endpoint: Remote node control address (as 'multiaddr' or ':') target-name: Resource name in APE resource name format target-type: Resource type(container/namespace) timeout: Timeout for an operation (default 15s) @@ -437,10 +428,9 @@ class FrostfsAdmMorph(CliCommand): "morph ape list-rule-chains", **{param: value for param, value in locals().items() if param not in ["self"]}, ) - + def remove_rule( self, - endpoint: str, chain_id: str, target_name: str, target_type: str, @@ -453,11 +443,9 @@ class FrostfsAdmMorph(CliCommand): """Drop objects from the node's local storage Args: - address: Address of wallet account all: Remove all chains chain-id: Assign ID to the parsed chain chain-id-hex: Flag to parse chain ID as hex - endpoint: Remote node control address (as 'multiaddr' or ':') target-name: Resource name in APE resource name format target-type: Resource type(container/namespace) timeout: Timeout for an operation (default 15s) @@ -467,6 +455,6 @@ class FrostfsAdmMorph(CliCommand): Command`s result. """ return self._execute( - "control remove-rule", + "morph ape rm-rule-chain", **{param: value for param, value in locals().items() if param not in ["self"]}, - ) \ No newline at end of file + ) diff --git a/src/frostfs_testlib/storage/dataclasses/ape.py b/src/frostfs_testlib/storage/dataclasses/ape.py index b6563f4..f0f1758 100644 --- a/src/frostfs_testlib/storage/dataclasses/ape.py +++ b/src/frostfs_testlib/storage/dataclasses/ape.py @@ -26,6 +26,21 @@ class ObjectOperations(HumanReadableEnum): return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL] +@dataclass +class Operations: + GET_CONTAINER = "GetContainer" + PUT_CONTAINER = "PutContainer" + DELETE_CONTAINER = "DeleteContainer" + LIST_CONTAINER = "ListContainers" + GET_OBJECT = "GetObject" + DELETE_OBJECT = "DeleteObject" + HASH_OBJECT = "HashObject" + RANGE_OBJECT = "RangeObject" + SEARCH_OBJECT = "SearchObject" + HEAD_OBJECT = "HeadObject" + PUT_OBJECT = "PutObject" + + class Verb(HumanReadableEnum): ALLOW = "allow" DENY = "deny" From 3d6a356e20b5ce13350b1507d7d45e74749b37d7 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 18 Oct 2024 15:57:40 +0300 Subject: [PATCH 302/363] [#306] Fix handling of bucket names in AWS CLI - Add quotes around container names if they contain spaces or `-`. Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/s3/aws_cli_client.py | 154 +++++++++++++++++++++++ 1 file changed, 154 insertions(+) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 2482376..ff4e329 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -70,6 +70,9 @@ class AwsCliClient(S3ClientWrapper): if bucket is None: bucket = string_utils.unique_name("bucket-") + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + if object_lock_enabled_for_bucket is None: object_lock = "" elif object_lock_enabled_for_bucket: @@ -103,16 +106,25 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete bucket S3") def delete_bucket(self, bucket: str) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" self.local_shell.exec(cmd, command_options) @reporter.step("Head bucket S3") def head_bucket(self, bucket: str) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = f"aws {self.common_flags} s3api head-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}" self.local_shell.exec(cmd) @reporter.step("Put bucket versioning status") def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api put-bucket-versioning --bucket {bucket} " f"--versioning-configuration Status={status.value} " @@ -122,6 +134,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get bucket versioning status") def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api get-bucket-versioning --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -132,6 +147,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Put bucket tagging") def put_bucket_tagging(self, bucket: str, tags: list) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + tags_json = {"TagSet": [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]} cmd = ( f"aws {self.common_flags} s3api put-bucket-tagging --bucket {bucket} " @@ -141,6 +159,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get bucket tagging") def get_bucket_tagging(self, bucket: str) -> list: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -151,6 +172,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get bucket acl") def get_bucket_acl(self, bucket: str) -> list: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) @@ -160,6 +184,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get bucket location") def get_bucket_location(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -170,6 +197,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("List objects S3") def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) @@ -181,6 +211,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("List objects S3 v2") def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -195,6 +228,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("List objects versions S3") def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -205,6 +241,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("List objects delete markers S3") def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -228,8 +267,13 @@ class AwsCliClient(S3ClientWrapper): ) -> str: if bucket is None: bucket = source_bucket + + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + if key is None: key = string_utils.unique_name("copy-object-") + copy_source = f"{source_bucket}/{source_key}" cmd = ( @@ -266,6 +310,9 @@ class AwsCliClient(S3ClientWrapper): grant_full_control: Optional[str] = None, grant_read: Optional[str] = None, ) -> str: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + if key is None: key = os.path.basename(filepath) @@ -297,6 +344,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Head object S3") def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api head-object --bucket {bucket} --key {key} " @@ -315,6 +365,9 @@ class AwsCliClient(S3ClientWrapper): object_range: Optional[tuple[int, int]] = None, full_output: bool = False, ) -> dict | TestFile: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, string_utils.unique_name("dl-object-"))) version = f" --version-id {version_id}" if version_id else "" cmd = ( @@ -329,6 +382,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get object ACL") def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api get-object-acl --bucket {bucket} --key {key} " @@ -347,6 +403,9 @@ class AwsCliClient(S3ClientWrapper): grant_write: Optional[str] = None, grant_read: Optional[str] = None, ) -> list: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api put-object-acl --bucket {bucket} --key {key} " f" --endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -369,6 +428,9 @@ class AwsCliClient(S3ClientWrapper): grant_write: Optional[str] = None, grant_read: Optional[str] = None, ) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} " f" --endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -383,6 +445,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete objects S3") def delete_objects(self, bucket: str, keys: list[str]) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + file_path = os.path.join(os.getcwd(), ASSETS_DIR, "delete.json") delete_structure = json.dumps(_make_objs_dict(keys)) with open(file_path, "w") as out_file: @@ -399,6 +464,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete object S3") def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api delete-object --bucket {bucket} " @@ -409,6 +477,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete object versions S3") def delete_object_versions(self, bucket: str, object_versions: list) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + # Build deletion list in S3 format delete_list = { "Objects": [ @@ -435,6 +506,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete object versions S3 without delete markers") def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + # Delete objects without creating delete markers for object_version in object_versions: self.delete_object(bucket=bucket, key=object_version["Key"], version_id=object_version["VersionId"]) @@ -450,6 +524,8 @@ class AwsCliClient(S3ClientWrapper): part_number: int = 0, full_output: bool = True, ) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' attrs = ",".join(attributes) version = f" --version-id {version_id}" if version_id else "" @@ -473,6 +549,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get bucket policy") def get_bucket_policy(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -483,6 +562,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete bucket policy") def delete_bucket_policy(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api delete-bucket-policy --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -493,6 +575,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Put bucket policy") def put_bucket_policy(self, bucket: str, policy: dict) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + # Leaving it as is was in test repo. Double dumps to escape resulting string # Example: # policy = {"a": 1} @@ -508,6 +593,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get bucket cors") def get_bucket_cors(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -518,6 +606,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Put bucket cors") def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api put-bucket-cors --bucket {bucket} " f"--cors-configuration '{json.dumps(cors_configuration)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -526,6 +617,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete bucket cors") def delete_bucket_cors(self, bucket: str) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -534,6 +628,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete bucket tagging") def delete_bucket_tagging(self, bucket: str) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" @@ -549,6 +646,9 @@ class AwsCliClient(S3ClientWrapper): version_id: Optional[str] = None, bypass_governance_retention: Optional[bool] = None, ) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api put-object-retention --bucket {bucket} --key {key} " @@ -566,6 +666,9 @@ class AwsCliClient(S3ClientWrapper): legal_hold_status: Literal["ON", "OFF"], version_id: Optional[str] = None, ) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + version = f" --version-id {version_id}" if version_id else "" legal_hold = json.dumps({"Status": legal_hold_status}) cmd = ( @@ -576,6 +679,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Put object tagging") def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = "") -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tagging = {"TagSet": tags} version = f" --version-id {version_id}" if version_id else "" @@ -587,6 +693,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get object tagging") def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api get-object-tagging --bucket {bucket} --key {key} " @@ -598,6 +707,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete object tagging") def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + version = f" --version-id {version_id}" if version_id else "" cmd = ( f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} " @@ -613,6 +725,9 @@ class AwsCliClient(S3ClientWrapper): acl: Optional[str] = None, metadata: Optional[dict] = None, ) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" ) @@ -633,6 +748,9 @@ class AwsCliClient(S3ClientWrapper): acl: Optional[str] = None, metadata: Optional[dict] = None, ) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3 cp {dir_path} s3://{bucket} " f"--endpoint-url {self.s3gate_endpoint} --recursive --profile {self.profile}" @@ -648,6 +766,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Create multipart upload S3") def create_multipart_upload(self, bucket: str, key: str) -> str: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api create-multipart-upload --bucket {bucket} " f"--key {key} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" @@ -661,6 +782,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("List multipart uploads S3") def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api list-multipart-uploads --bucket {bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" @@ -671,6 +795,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Abort multipart upload S3") def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api abort-multipart-upload --bucket {bucket} " f"--key {key} --upload-id {upload_id} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" @@ -679,6 +806,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Upload part S3") def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api upload-part --bucket {bucket} --key {key} " f"--upload-id {upload_id} --part-number {part_num} --body {filepath} " @@ -691,6 +821,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Upload copy part S3") def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api upload-part-copy --bucket {bucket} --key {key} " f"--upload-id {upload_id} --part-number {part_num} --copy-source {copy_source} " @@ -704,6 +837,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("List parts S3") def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api list-parts --bucket {bucket} --key {key} " f"--upload-id {upload_id} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" @@ -717,6 +853,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Complete multipart upload S3") def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + file_path = os.path.join(os.getcwd(), ASSETS_DIR, "parts.json") parts_dict = {"Parts": [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]} @@ -737,6 +876,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Put object lock configuration") def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api put-object-lock-configuration --bucket {bucket} " f"--object-lock-configuration '{json.dumps(configuration)}' --endpoint-url {self.s3gate_endpoint} --profile {self.profile}" @@ -746,6 +888,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get object lock configuration") def get_object_lock_configuration(self, bucket: str): + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api get-object-lock-configuration --bucket {bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" @@ -756,6 +901,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Put bucket lifecycle configuration") def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api put-bucket-lifecycle-configuration --bucket {bucket} " f"--endpoint-url {self.s3gate_endpoint} --lifecycle-configuration file://{dumped_configuration} --profile {self.profile}" @@ -766,6 +914,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Get bucket lifecycle configuration") def get_bucket_lifecycle_configuration(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api get-bucket-lifecycle-configuration --bucket {bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" @@ -776,6 +927,9 @@ class AwsCliClient(S3ClientWrapper): @reporter.step("Delete bucket lifecycle configuration") def delete_bucket_lifecycle(self, bucket: str) -> dict: + if bucket.startswith("-") or " " in bucket: + bucket = f'"{bucket}"' + cmd = ( f"aws {self.common_flags} s3api delete-bucket-lifecycle --bucket {bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" From 26139767f4118f1655c067ffa316e6ae9ebf6064 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Wed, 23 Oct 2024 14:08:54 +0300 Subject: [PATCH 303/363] [#311] Add AWS CLI command to report from Boto3 request Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/s3/boto3_client.py | 1000 +++++++++++++++--------- src/frostfs_testlib/utils/cli_utils.py | 72 +- 2 files changed, 672 insertions(+), 400 deletions(-) diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index a99b866..91d8c5a 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -1,8 +1,8 @@ import json import logging import os +from collections.abc import Callable from datetime import datetime -from functools import wraps from time import sleep from typing import Literal, Optional, Union @@ -28,48 +28,32 @@ logger = logging.getLogger("NeoLogger") urllib3.disable_warnings() -def report_error(func): - @wraps(func) - def deco(*a, **kw): - try: - return func(*a, **kw) - except ClientError as err: - url = None - params = {"args": a, "kwargs": kw} - - if isinstance(a[0], Boto3ClientWrapper): - client: Boto3ClientWrapper = a[0] - url = client.s3gate_endpoint - params = {"args": a[1:], "kwargs": kw} - - log_command_execution(url, f"Failed {err.operation_name}", err.response, params) - raise - - return deco - - class Boto3ClientWrapper(S3ClientWrapper): __repr_name__: str = "Boto3 client" @reporter.step("Configure S3 client (boto3)") - @report_error def __init__( self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1" ) -> None: self.boto3_client: S3Client = None - self.session = boto3.Session() + self.s3gate_endpoint: str = "" + + self.boto3_iam_client: S3Client = None + self.iam_endpoint: str = "" + + self.access_key_id: str = access_key_id + self.secret_access_key: str = secret_access_key + self.profile = profile self.region = region + + self.session = boto3.Session() self.config = Config( retries={ "max_attempts": MAX_REQUEST_ATTEMPTS, "mode": RETRY_MODE, } ) - self.access_key_id: str = access_key_id - self.secret_access_key: str = secret_access_key - self.s3gate_endpoint: str = "" - self.iam_endpoint: str = "" - self.boto3_iam_client: S3Client = None + self.set_endpoint(s3gate_endpoint) @reporter.step("Set endpoint S3 to {s3gate_endpoint}") @@ -116,13 +100,24 @@ class Boto3ClientWrapper(S3ClientWrapper): return result def _convert_to_s3_params(self, scope: dict, exclude: Optional[list[str]] = None) -> dict: - if not exclude: - exclude = ["self"] - return {self._to_s3_param(param): value for param, value in scope if param not in exclude and value is not None} + exclude = ["self", "cls"] if not exclude else exclude + ["self", "cls"] + return {self._to_s3_param(param): value for param, value in scope.items() if param not in exclude and value is not None} + + def _exec_request(self, method: Callable, params: Optional[dict] = None, **kwargs): + if not params: + params = {} + + try: + result = method(**params) + except ClientError as err: + log_command_execution(method.__name__, err.response, params, **kwargs) + raise + + log_command_execution(method.__name__, result, params, **kwargs) + return result # BUCKET METHODS # @reporter.step("Create bucket S3") - @report_error def create_bucket( self, bucket: Optional[str] = None, @@ -151,81 +146,98 @@ class Boto3ClientWrapper(S3ClientWrapper): if location_constraint: params.update({"CreateBucketConfiguration": {"LocationConstraint": location_constraint}}) - s3_bucket = self.boto3_client.create_bucket(**params) - log_command_execution(self.s3gate_endpoint, f"Created S3 bucket {bucket}", s3_bucket, params) + self._exec_request(self.boto3_client.create_bucket, params, endpoint=self.s3gate_endpoint, profile=self.profile) return bucket @reporter.step("List buckets S3") - @report_error def list_buckets(self) -> list[str]: - found_buckets = [] - - response = self.boto3_client.list_buckets() - log_command_execution(self.s3gate_endpoint, "S3 List buckets result", response) - - for bucket in response["Buckets"]: - found_buckets.append(bucket["Name"]) - - return found_buckets + response = self._exec_request( + self.boto3_client.list_buckets, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return [bucket["Name"] for bucket in response["Buckets"]] @reporter.step("Delete bucket S3") - @report_error def delete_bucket(self, bucket: str) -> None: - response = self.boto3_client.delete_bucket(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 Delete bucket result", response, {"Bucket": bucket}) + self._exec_request( + self.boto3_client.delete_bucket, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Head bucket S3") - @report_error def head_bucket(self, bucket: str) -> None: - response = self.boto3_client.head_bucket(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 Head bucket result", response, {"Bucket": bucket}) + self._exec_request( + self.boto3_client.head_bucket, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Put bucket versioning status") - @report_error def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None: params = {"Bucket": bucket, "VersioningConfiguration": {"Status": status.value}} - response = self.boto3_client.put_bucket_versioning(**params) - log_command_execution(self.s3gate_endpoint, "S3 Set bucket versioning to", response, params) + self._exec_request( + self.boto3_client.put_bucket_versioning, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Get bucket versioning status") - @report_error def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]: - response = self.boto3_client.get_bucket_versioning(Bucket=bucket) - status = response.get("Status") - log_command_execution(self.s3gate_endpoint, "S3 Got bucket versioning status", response, {"Bucket": bucket}) - return status + response = self._exec_request( + self.boto3_client.get_bucket_versioning, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response.get("Status") @reporter.step("Put bucket tagging") - @report_error def put_bucket_tagging(self, bucket: str, tags: list) -> None: tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tagging = {"TagSet": tags} - params = self._convert_to_s3_params(locals().items(), exclude=["self", "tags"]) - response = self.boto3_client.put_bucket_tagging(**params) - log_command_execution(self.s3gate_endpoint, "S3 Put bucket tagging", response, params) + params = self._convert_to_s3_params(locals(), exclude=["tags"]) + self._exec_request( + self.boto3_client.put_bucket_tagging, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Get bucket tagging") - @report_error def get_bucket_tagging(self, bucket: str) -> list: - response = self.boto3_client.get_bucket_tagging(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 Get bucket tagging", response, {"Bucket": bucket}) + response = self._exec_request( + self.boto3_client.get_bucket_tagging, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("TagSet") @reporter.step("Get bucket acl") - @report_error def get_bucket_acl(self, bucket: str) -> list: - response = self.boto3_client.get_bucket_acl(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 Get bucket acl", response, {"Bucket": bucket}) + response = self._exec_request( + self.boto3_client.get_bucket_acl, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("Grants") @reporter.step("Delete bucket tagging") - @report_error def delete_bucket_tagging(self, bucket: str) -> None: - response = self.boto3_client.delete_bucket_tagging(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 Delete bucket tagging", response, {"Bucket": bucket}) + self._exec_request( + self.boto3_client.delete_bucket_tagging, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Put bucket ACL") - @report_error def put_bucket_acl( self, bucket: str, @@ -233,141 +245,181 @@ class Boto3ClientWrapper(S3ClientWrapper): grant_write: Optional[str] = None, grant_read: Optional[str] = None, ) -> None: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.put_bucket_acl(**params) - log_command_execution(self.s3gate_endpoint, "S3 ACL bucket result", response, params) + params = self._convert_to_s3_params(locals()) + self._exec_request( + self.boto3_client.put_bucket_acl, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Put object lock configuration") - @report_error def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict: params = {"Bucket": bucket, "ObjectLockConfiguration": configuration} - response = self.boto3_client.put_object_lock_configuration(**params) - log_command_execution(self.s3gate_endpoint, "S3 put_object_lock_configuration result", response, params) - return response + return self._exec_request( + self.boto3_client.put_object_lock_configuration, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Get object lock configuration") - @report_error def get_object_lock_configuration(self, bucket: str) -> dict: - response = self.boto3_client.get_object_lock_configuration(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 get_object_lock_configuration result", response, {"Bucket": bucket}) + response = self._exec_request( + self.boto3_client.get_object_lock_configuration, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("ObjectLockConfiguration") @reporter.step("Get bucket policy") - @report_error def get_bucket_policy(self, bucket: str) -> str: - response = self.boto3_client.get_bucket_policy(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 get_bucket_policy result", response, {"Bucket": bucket}) + response = self._exec_request( + self.boto3_client.get_bucket_policy, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("Policy") @reporter.step("Delete bucket policy") - @report_error def delete_bucket_policy(self, bucket: str) -> str: - response = self.boto3_client.delete_bucket_policy(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 delete_bucket_policy result", response, {"Bucket": bucket}) - return response + return self._exec_request( + self.boto3_client.delete_bucket_policy, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Put bucket policy") - @report_error def put_bucket_policy(self, bucket: str, policy: dict) -> None: params = {"Bucket": bucket, "Policy": json.dumps(policy)} - response = self.boto3_client.put_bucket_policy(**params) - log_command_execution(self.s3gate_endpoint, "S3 put_bucket_policy result", response, params) - return response + return self._exec_request( + self.boto3_client.put_bucket_policy, + params, + # Overriding option for AWS CLI + policy=policy, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Get bucket cors") - @report_error def get_bucket_cors(self, bucket: str) -> dict: - response = self.boto3_client.get_bucket_cors(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 get_bucket_cors result", response, {"Bucket": bucket}) + response = self._exec_request( + self.boto3_client.get_bucket_cors, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("CORSRules") @reporter.step("Get bucket location") - @report_error def get_bucket_location(self, bucket: str) -> str: - response = self.boto3_client.get_bucket_location(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 get_bucket_location result", response, {"Bucket": bucket}) + response = self._exec_request( + self.boto3_client.get_bucket_location, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("LocationConstraint") @reporter.step("Put bucket cors") - @report_error def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.put_bucket_cors(**params) - log_command_execution(self.s3gate_endpoint, "S3 put_bucket_cors result", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_client.put_bucket_cors, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Delete bucket cors") - @report_error def delete_bucket_cors(self, bucket: str) -> None: - response = self.boto3_client.delete_bucket_cors(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 delete_bucket_cors result", response, {"Bucket": bucket}) + self._exec_request( + self.boto3_client.delete_bucket_cors, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Put bucket lifecycle configuration") - @report_error def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict: - response = self.boto3_client.put_bucket_lifecycle_configuration(Bucket=bucket, LifecycleConfiguration=lifecycle_configuration) - log_command_execution(self.s3gate_endpoint, "S3 put_bucket_lifecycle_configuration result", response, {"Bucket": bucket}) - return response + params = self._convert_to_s3_params(locals(), exclude=["dumped_configuration"]) + return self._exec_request( + self.boto3_client.put_bucket_lifecycle_configuration, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Get bucket lifecycle configuration") - @report_error def get_bucket_lifecycle_configuration(self, bucket: str) -> dict: - response = self.boto3_client.get_bucket_lifecycle_configuration(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 get_bucket_lifecycle_configuration result", response, {"Bucket": bucket}) + response = self._exec_request( + self.boto3_client.get_bucket_lifecycle_configuration, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return {"Rules": response.get("Rules")} @reporter.step("Delete bucket lifecycle configuration") - @report_error def delete_bucket_lifecycle(self, bucket: str) -> dict: - response = self.boto3_client.delete_bucket_lifecycle(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 delete_bucket_lifecycle result", response, {"Bucket": bucket}) - return response + return self._exec_request( + self.boto3_client.delete_bucket_lifecycle, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) # END OF BUCKET METHODS # # OBJECT METHODS # @reporter.step("List objects S3 v2") - @report_error def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.list_objects_v2(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 v2 List objects result", response, params) - + response = self._exec_request( + self.boto3_client.list_objects_v2, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) obj_list = [obj["Key"] for obj in response.get("Contents", [])] logger.info(f"Found s3 objects: {obj_list}") - return response if full_output else obj_list @reporter.step("List objects S3") - @report_error def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.list_objects(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 List objects result", response, params) - + response = self._exec_request( + self.boto3_client.list_objects, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) obj_list = [obj["Key"] for obj in response.get("Contents", [])] logger.info(f"Found s3 objects: {obj_list}") - return response if full_output else obj_list @reporter.step("List objects versions S3") - @report_error def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.list_object_versions(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 List objects versions result", response, params) + response = self._exec_request( + self.boto3_client.list_object_versions, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response if full_output else response.get("Versions", []) @reporter.step("List objects delete markers S3") - @report_error def list_delete_markers(self, bucket: str, full_output: bool = False) -> list: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.list_object_versions(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 List objects delete markers result", response, params) + response = self._exec_request( + self.boto3_client.list_object_versions, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response if full_output else response.get("DeleteMarkers", []) @reporter.step("Put object S3") - @report_error def put_object( self, bucket: str, @@ -388,40 +440,53 @@ class Boto3ClientWrapper(S3ClientWrapper): with open(filepath, "rb") as put_file: body = put_file.read() - params = self._convert_to_s3_params(locals().items(), exclude=["self", "filepath", "put_file", "body"]) - response = self.boto3_client.put_object(Body=body, **params) - log_command_execution(self.s3gate_endpoint, "S3 Put object result", response, params) + params = self._convert_to_s3_params(locals(), exclude=["filepath", "put_file"]) + response = self._exec_request( + self.boto3_client.put_object, + params, + body=filepath, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("VersionId") @reporter.step("Head object S3") - @report_error def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.head_object(**params) - log_command_execution(self.s3gate_endpoint, "S3 Head object result", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_client.head_object, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Delete object S3") - @report_error def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.delete_object(**params) - log_command_execution(self.s3gate_endpoint, "S3 Delete object result", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_client.delete_object, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Delete objects S3") - @report_error def delete_objects(self, bucket: str, keys: list[str]) -> dict: params = {"Bucket": bucket, "Delete": _make_objs_dict(keys)} - response = self.boto3_client.delete_objects(**params) - log_command_execution(self.s3gate_endpoint, "S3 Delete objects result", response, params) + response = self._exec_request( + self.boto3_client.delete_objects, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + assert ( "Errors" not in response ), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}' + return response @reporter.step("Delete object versions S3") - @report_error def delete_object_versions(self, bucket: str, object_versions: list) -> dict: # Build deletion list in S3 format delete_list = { @@ -434,21 +499,26 @@ class Boto3ClientWrapper(S3ClientWrapper): ] } params = {"Bucket": bucket, "Delete": delete_list} - response = self.boto3_client.delete_objects(**params) - log_command_execution(self.s3gate_endpoint, "S3 Delete objects result", response, params) - return response + return self._exec_request( + self.boto3_client.delete_objects, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Delete object versions S3 without delete markers") - @report_error def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None: # Delete objects without creating delete markers for object_version in object_versions: params = {"Bucket": bucket, "Key": object_version["Key"], "VersionId": object_version["VersionId"]} - response = self.boto3_client.delete_object(**params) - log_command_execution(self.s3gate_endpoint, "S3 Delete object result", response, params) + self._exec_request( + self.boto3_client.delete_object, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Put object ACL") - @report_error def put_object_acl( self, bucket: str, @@ -457,21 +527,27 @@ class Boto3ClientWrapper(S3ClientWrapper): grant_write: Optional[str] = None, grant_read: Optional[str] = None, ) -> list: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.put_object_acl(**params) - log_command_execution(self.s3gate_endpoint, "S3 put object ACL", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_client.put_object_acl, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("Grants") @reporter.step("Get object ACL") - @report_error def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.get_object_acl(**params) - log_command_execution(self.s3gate_endpoint, "S3 ACL objects result", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_client.get_object_acl, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("Grants") @reporter.step("Copy object S3") - @report_error def copy_object( self, source_bucket: str, @@ -486,17 +562,22 @@ class Boto3ClientWrapper(S3ClientWrapper): ) -> str: if bucket is None: bucket = source_bucket + if key is None: key = string_utils.unique_name("copy-object-") - copy_source = f"{source_bucket}/{source_key}" - params = self._convert_to_s3_params(locals().items(), exclude=["self", "source_bucket", "source_key"]) - response = self.boto3_client.copy_object(**params) - log_command_execution(self.s3gate_endpoint, "S3 Copy objects result", response, params) + copy_source = f"{source_bucket}/{source_key}" + params = self._convert_to_s3_params(locals(), exclude=["source_bucket", "source_key"]) + + self._exec_request( + self.boto3_client.copy_object, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return key @reporter.step("Get object S3") - @report_error def get_object( self, bucket: str, @@ -509,12 +590,15 @@ class Boto3ClientWrapper(S3ClientWrapper): if object_range: range_str = f"bytes={object_range[0]}-{object_range[1]}" - params = self._convert_to_s3_params( - {**locals(), **{"Range": range_str}}.items(), - exclude=["self", "object_range", "full_output", "range_str"], + params = locals() + params.update({"Range": f"bytes={object_range[0]}-{object_range[1]}"} if object_range else {}) + params = self._convert_to_s3_params(params, exclude=["object_range", "full_output", "range_str"]) + response = self._exec_request( + self.boto3_client.get_object, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, ) - response = self.boto3_client.get_object(**params) - log_command_execution(self.s3gate_endpoint, "S3 Get objects result", response, params) if full_output: return response @@ -528,78 +612,93 @@ class Boto3ClientWrapper(S3ClientWrapper): return test_file @reporter.step("Create multipart upload S3") - @report_error def create_multipart_upload(self, bucket: str, key: str) -> str: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.create_multipart_upload(**params) - log_command_execution(self.s3gate_endpoint, "S3 Created multipart upload", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_client.create_multipart_upload, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) assert response.get("UploadId"), f"Expected UploadId in response:\n{response}" - return response["UploadId"] @reporter.step("List multipart uploads S3") - @report_error def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]: - response = self.boto3_client.list_multipart_uploads(Bucket=bucket) - log_command_execution(self.s3gate_endpoint, "S3 List multipart upload", response, {"Bucket": bucket}) - + response = self._exec_request( + self.boto3_client.list_multipart_uploads, + params={"Bucket": bucket}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("Uploads") @reporter.step("Abort multipart upload S3") - @report_error def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.abort_multipart_upload(**params) - log_command_execution(self.s3gate_endpoint, "S3 Abort multipart upload", response, params) + params = self._convert_to_s3_params(locals()) + self._exec_request( + self.boto3_client.abort_multipart_upload, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Upload part S3") - @report_error def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str: with open(filepath, "rb") as put_file: body = put_file.read() - params = self._convert_to_s3_params(locals().items(), exclude=["self", "put_file", "part_num", "filepath", "body"]) + params = self._convert_to_s3_params(locals(), exclude=["put_file", "part_num", "filepath"]) params["PartNumber"] = part_num - response = self.boto3_client.upload_part(Body=body, **params) - log_command_execution(self.s3gate_endpoint, "S3 Upload part", response, params) - assert response.get("ETag"), f"Expected ETag in response:\n{response}" + response = self._exec_request( + self.boto3_client.upload_part, + params, + body=filepath, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + assert response.get("ETag"), f"Expected ETag in response:\n{response}" return response["ETag"] @reporter.step("Upload copy part S3") - @report_error def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str: - params = self._convert_to_s3_params(locals().items(), exclude=["self", "put_file", "part_num", "filepath"]) + params = self._convert_to_s3_params(locals(), exclude=["put_file", "part_num", "filepath"]) params["PartNumber"] = part_num - response = self.boto3_client.upload_part_copy(**params) - log_command_execution(self.s3gate_endpoint, "S3 Upload copy part", response, params) + response = self._exec_request( + self.boto3_client.upload_part_copy, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) assert response.get("CopyPartResult", []).get("ETag"), f"Expected ETag in response:\n{response}" - return response["CopyPartResult"]["ETag"] @reporter.step("List parts S3") - @report_error def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.list_parts(**params) - log_command_execution(self.s3gate_endpoint, "S3 List part", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_client.list_parts, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) assert response.get("Parts"), f"Expected Parts in response:\n{response}" - return response["Parts"] @reporter.step("Complete multipart upload S3") - @report_error def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts] - params = self._convert_to_s3_params(locals().items(), exclude=["self", "parts"]) + params = self._convert_to_s3_params(locals(), exclude=["parts"]) params["MultipartUpload"] = {"Parts": parts} - response = self.boto3_client.complete_multipart_upload(**params) - log_command_execution(self.s3gate_endpoint, "S3 Complete multipart upload", response, params) - - return response + return self._exec_request( + self.boto3_client.complete_multipart_upload, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Put object retention") - @report_error def put_object_retention( self, bucket: str, @@ -608,12 +707,15 @@ class Boto3ClientWrapper(S3ClientWrapper): version_id: Optional[str] = None, bypass_governance_retention: Optional[bool] = None, ) -> None: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.put_object_retention(**params) - log_command_execution(self.s3gate_endpoint, "S3 Put object retention ", response, params) + params = self._convert_to_s3_params(locals()) + self._exec_request( + self.boto3_client.put_object_retention, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Put object legal hold") - @report_error def put_object_legal_hold( self, bucket: str, @@ -622,36 +724,48 @@ class Boto3ClientWrapper(S3ClientWrapper): version_id: Optional[str] = None, ) -> None: legal_hold = {"Status": legal_hold_status} - params = self._convert_to_s3_params(locals().items(), exclude=["self", "legal_hold_status"]) - response = self.boto3_client.put_object_legal_hold(**params) - log_command_execution(self.s3gate_endpoint, "S3 Put object legal hold ", response, params) + params = self._convert_to_s3_params(locals(), exclude=["legal_hold_status"]) + self._exec_request( + self.boto3_client.put_object_legal_hold, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Put object tagging") - @report_error def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = "") -> None: tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] tagging = {"TagSet": tags} - params = self._convert_to_s3_params(locals().items(), exclude=["self", "tags"]) - response = self.boto3_client.put_object_tagging(**params) - log_command_execution(self.s3gate_endpoint, "S3 Put object tagging", response, params) + params = self._convert_to_s3_params(locals(), exclude=["tags"]) + self._exec_request( + self.boto3_client.put_object_tagging, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Get object tagging") - @report_error def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.get_object_tagging(**params) - log_command_execution(self.s3gate_endpoint, "S3 Get object tagging", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_client.get_object_tagging, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) return response.get("TagSet") @reporter.step("Delete object tagging") - @report_error def delete_object_tagging(self, bucket: str, key: str) -> None: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_client.delete_object_tagging(**params) - log_command_execution(self.s3gate_endpoint, "S3 Delete object tagging", response, params) + params = self._convert_to_s3_params(locals()) + self._exec_request( + self.boto3_client.delete_object_tagging, + params, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) @reporter.step("Get object attributes") - @report_error def get_object_attributes( self, bucket: str, @@ -666,7 +780,6 @@ class Boto3ClientWrapper(S3ClientWrapper): return {} @reporter.step("Sync directory S3") - @report_error def sync( self, bucket: str, @@ -677,7 +790,6 @@ class Boto3ClientWrapper(S3ClientWrapper): raise NotImplementedError("Sync is not supported for boto3 client") @reporter.step("CP directory S3") - @report_error def cp( self, bucket: str, @@ -693,36 +805,47 @@ class Boto3ClientWrapper(S3ClientWrapper): # Some methods don't have checks because boto3 is silent in some cases (delete, attach, etc.) @reporter.step("Adds the specified user to the specified group") - @report_error def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.add_user_to_group(**params) - log_command_execution(self.iam_endpoint, "IAM Add User to Group", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.add_user_to_group, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Attaches the specified managed policy to the specified IAM group") - @report_error def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.attach_group_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Attach Group Policy", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.attach_group_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Attaches the specified managed policy to the specified user") - @report_error def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.attach_user_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Attach User Policy", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.attach_user_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Creates a new AWS secret access key and access key ID for the specified user") - @report_error def iam_create_access_key(self, user_name: str) -> dict: - response = self.boto3_iam_client.create_access_key(UserName=user_name) - log_command_execution(self.iam_endpoint, "IAM Create Access Key", response, {"UserName": user_name}) + response = self._exec_request( + self.boto3_iam_client.create_access_key, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) access_key_id = response["AccessKey"].get("AccessKeyId") secret_access_key = response["AccessKey"].get("SecretAccessKey") @@ -732,10 +855,13 @@ class Boto3ClientWrapper(S3ClientWrapper): return access_key_id, secret_access_key @reporter.step("Creates a new group") - @report_error def iam_create_group(self, group_name: str) -> dict: - response = self.boto3_iam_client.create_group(GroupName=group_name) - log_command_execution(self.iam_endpoint, "IAM Create Group", response, {"GroupName": group_name}) + response = self._exec_request( + self.boto3_iam_client.create_group, + params={"GroupName": group_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("Group"), f"Expected Group in response:\n{response}" assert response["Group"].get("GroupName") == group_name, f"GroupName should be equal to {group_name}" @@ -743,12 +869,17 @@ class Boto3ClientWrapper(S3ClientWrapper): return response @reporter.step("Creates a new managed policy for your AWS account") - @report_error def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict: - params = self._convert_to_s3_params(locals().items()) + params = self._convert_to_s3_params(locals()) params["PolicyDocument"] = json.dumps(policy_document) - response = self.boto3_iam_client.create_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Create Policy", response, params) + response = self._exec_request( + self.boto3_iam_client.create_policy, + params, + # Overriding option for AWS CLI + policy_document=policy_document, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("Policy"), f"Expected Policy in response:\n{response}" assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}" @@ -756,10 +887,13 @@ class Boto3ClientWrapper(S3ClientWrapper): return response @reporter.step("Creates a new IAM user for your AWS account") - @report_error def iam_create_user(self, user_name: str) -> dict: - response = self.boto3_iam_client.create_user(UserName=user_name) - log_command_execution(self.iam_endpoint, "IAM Create User", response, {"UserName": user_name}) + response = self._exec_request( + self.boto3_iam_client.create_user, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("User"), f"Expected User in response:\n{response}" assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" @@ -767,89 +901,115 @@ class Boto3ClientWrapper(S3ClientWrapper): return response @reporter.step("Deletes the access key pair associated with the specified IAM user") - @report_error def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.delete_access_key(**params) - log_command_execution(self.iam_endpoint, "IAM Delete Access Key", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.delete_access_key, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Deletes the specified IAM group") - @report_error def iam_delete_group(self, group_name: str) -> dict: - response = self.boto3_iam_client.delete_group(GroupName=group_name) - log_command_execution(self.iam_endpoint, "IAM Delete Group", response, {"GroupName": group_name}) - return response + return self._exec_request( + self.boto3_iam_client.delete_group, + params={"GroupName": group_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group") - @report_error def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.delete_group_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Delete Group Policy", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.delete_group_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Deletes the specified managed policy") - @report_error def iam_delete_policy(self, policy_arn: str) -> dict: - response = self.boto3_iam_client.delete_policy(PolicyArn=policy_arn) - log_command_execution(self.iam_endpoint, "IAM Delete Policy", response, {"PolicyArn": policy_arn}) - return response + return self._exec_request( + self.boto3_iam_client.delete_policy, + params={"PolicyArn": policy_arn}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Deletes the specified IAM user") - @report_error def iam_delete_user(self, user_name: str) -> dict: - response = self.boto3_iam_client.delete_user(UserName=user_name) - log_command_execution(self.iam_endpoint, "IAM Delete User", response, {"UserName": user_name}) - return response + return self._exec_request( + self.boto3_iam_client.delete_user, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user") - @report_error def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.delete_user_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Delete User Policy", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.delete_user_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Removes the specified managed policy from the specified IAM group") - @report_error def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.detach_group_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Detach Group Policy", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.detach_group_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Removes the specified managed policy from the specified user") - @report_error def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.detach_user_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Detach User Policy", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.detach_user_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Returns a list of IAM users that are in the specified IAM group") - @report_error def iam_get_group(self, group_name: str) -> dict: - response = self.boto3_iam_client.get_group(GroupName=group_name) - log_command_execution(self.iam_endpoint, "IAM Get Group", response, {"GroupName": group_name}) + response = self._exec_request( + self.boto3_iam_client.get_group, + params={"GroupName": group_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("Group").get("GroupName") == group_name, f"GroupName should be equal to {group_name}" return response @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group") - @report_error def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.get_group_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Get Group Policy", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.get_group_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Retrieves information about the specified managed policy") - @report_error def iam_get_policy(self, policy_arn: str) -> dict: - response = self.boto3_iam_client.get_policy(PolicyArn=policy_arn) - log_command_execution(self.iam_endpoint, "IAM Get Policy", response, {"PolicyArn": policy_arn}) + response = self._exec_request( + self.boto3_iam_client.get_policy, + params={"PolicyArn": policy_arn}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("Policy"), f"Expected Policy in response:\n{response}" assert response["Policy"].get("Arn") == policy_arn, f"PolicyArn should be equal to {policy_arn}" @@ -857,11 +1017,14 @@ class Boto3ClientWrapper(S3ClientWrapper): return response @reporter.step("Retrieves information about the specified version of the specified managed policy") - @report_error def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.get_policy_version(**params) - log_command_execution(self.iam_endpoint, "IAM Get Policy Version", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.get_policy_version, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("PolicyVersion"), f"Expected PolicyVersion in response:\n{response}" assert response["PolicyVersion"].get("VersionId") == version_id, f"VersionId should be equal to {version_id}" @@ -869,10 +1032,13 @@ class Boto3ClientWrapper(S3ClientWrapper): return response @reporter.step("Retrieves information about the specified IAM user") - @report_error def iam_get_user(self, user_name: str) -> dict: - response = self.boto3_iam_client.get_user(UserName=user_name) - log_command_execution(self.iam_endpoint, "IAM Get User", response, {"UserName": user_name}) + response = self._exec_request( + self.boto3_iam_client.get_user, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("User"), f"Expected User in response:\n{response}" assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}" @@ -880,42 +1046,56 @@ class Boto3ClientWrapper(S3ClientWrapper): return response @reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user") - @report_error def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.get_user_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Get User Policy", response, params) + params = self._convert_to_s3_params(locals()) + response = self._exec_request( + self.boto3_iam_client.get_user_policy, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("UserName"), f"Expected UserName in response:\n{response}" return response @reporter.step("Returns information about the access key IDs associated with the specified IAM user") - @report_error def iam_list_access_keys(self, user_name: str) -> dict: - response = self.boto3_iam_client.list_access_keys(UserName=user_name) - log_command_execution(self.iam_endpoint, "IAM List Access Keys", response, {"UserName": user_name}) - return response + return self._exec_request( + self.boto3_iam_client.list_access_keys, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Lists all managed policies that are attached to the specified IAM group") - @report_error def iam_list_attached_group_policies(self, group_name: str) -> dict: - response = self.boto3_iam_client.list_attached_group_policies(GroupName=group_name) - log_command_execution(self.iam_endpoint, "IAM List Attached Group Policies", response, {"GroupName": group_name}) + response = self._exec_request( + self.boto3_iam_client.list_attached_group_policies, + params={"GroupName": group_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" return response @reporter.step("Lists all managed policies that are attached to the specified IAM user") - @report_error def iam_list_attached_user_policies(self, user_name: str) -> dict: - response = self.boto3_iam_client.list_attached_user_policies(UserName=user_name) - log_command_execution(self.iam_endpoint, "IAM List Attached User Policies", response, {"UserName": user_name}) + response = self._exec_request( + self.boto3_iam_client.list_attached_user_policies, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" return response @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") - @report_error def iam_list_entities_for_policy(self, policy_arn: str) -> dict: - response = self.boto3_iam_client.list_entities_for_policy(PolicyArn=policy_arn) - log_command_execution(self.iam_endpoint, "IAM List Entities For Policy", response, {"PolicyArn": policy_arn}) + response = self._exec_request( + self.boto3_iam_client.list_entities_for_policy, + params={"PolicyArn": policy_arn}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("PolicyGroups"), f"Expected PolicyGroups in response:\n{response}" assert response.get("PolicyUsers"), f"Expected PolicyUsers in response:\n{response}" @@ -923,125 +1103,165 @@ class Boto3ClientWrapper(S3ClientWrapper): return response @reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group") - @report_error def iam_list_group_policies(self, group_name: str) -> dict: - response = self.boto3_iam_client.list_group_policies(GroupName=group_name) - log_command_execution(self.iam_endpoint, "IAM List Group Policies", response, {"GroupName": group_name}) + response = self._exec_request( + self.boto3_iam_client.list_group_policies, + params={"GroupName": group_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" return response @reporter.step("Lists the IAM groups") - @report_error def iam_list_groups(self) -> dict: - response = self.boto3_iam_client.list_groups() - log_command_execution(self.iam_endpoint, "IAM List Groups", response) + response = self._exec_request( + self.boto3_iam_client.list_groups, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("Groups"), f"Expected Groups in response:\n{response}" return response @reporter.step("Lists the IAM groups that the specified IAM user belongs to") - @report_error def iam_list_groups_for_user(self, user_name: str) -> dict: - response = self.boto3_iam_client.list_groups_for_user(UserName=user_name) - log_command_execution(self.iam_endpoint, "IAM List Groups For User", response, {"UserName": user_name}) + response = self._exec_request( + self.boto3_iam_client.list_groups_for_user, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("Groups"), f"Expected Groups in response:\n{response}" return response @reporter.step("Lists all the managed policies that are available in your AWS account") - @report_error def iam_list_policies(self) -> dict: - response = self.boto3_iam_client.list_policies() - log_command_execution(self.iam_endpoint, "IAM List Policies", response) + response = self._exec_request( + self.boto3_iam_client.list_policies, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("Policies"), f"Expected Policies in response:\n{response}" return response @reporter.step("Lists information about the versions of the specified managed policy") - @report_error def iam_list_policy_versions(self, policy_arn: str) -> dict: - response = self.boto3_iam_client.list_policy_versions(PolicyArn=policy_arn) - log_command_execution(self.iam_endpoint, "IAM List Policy Versions", response, {"PolicyArn": policy_arn}) + response = self._exec_request( + self.boto3_iam_client.list_policy_versions, + params={"PolicyArn": policy_arn}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("Versions"), f"Expected Versions in response:\n{response}" return response @reporter.step("Lists the names of the inline policies embedded in the specified IAM user") - @report_error def iam_list_user_policies(self, user_name: str) -> dict: - response = self.boto3_iam_client.list_user_policies(UserName=user_name) - log_command_execution(self.iam_endpoint, "IAM List User Policies", response, {"UserName": user_name}) + response = self._exec_request( + self.boto3_iam_client.list_user_policies, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" return response @reporter.step("Lists the IAM users") - @report_error def iam_list_users(self) -> dict: - response = self.boto3_iam_client.list_users() - log_command_execution(self.iam_endpoint, "IAM List Users", response) + response = self._exec_request( + self.boto3_iam_client.list_users, + endpoint=self.iam_endpoint, + profile=self.profile, + ) assert response.get("Users"), f"Expected Users in response:\n{response}" return response @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group") - @report_error def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict: - params = self._convert_to_s3_params(locals().items()) + params = self._convert_to_s3_params(locals()) params["PolicyDocument"] = json.dumps(policy_document) - response = self.boto3_iam_client.put_group_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Put Group Policy", response, params) + response = self._exec_request( + self.boto3_iam_client.put_group_policy, + params, + # Overriding option for AWS CLI + policy_document=policy_document, + endpoint=self.iam_endpoint, + profile=self.profile, + ) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") - @report_error def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict: - params = self._convert_to_s3_params(locals().items()) + params = self._convert_to_s3_params(locals()) params["PolicyDocument"] = json.dumps(policy_document) - response = self.boto3_iam_client.put_user_policy(**params) - log_command_execution(self.iam_endpoint, "IAM Put User Policy", response, params) + response = self._exec_request( + self.boto3_iam_client.put_user_policy, + params, + # Overriding option for AWS CLI + policy_document=policy_document, + endpoint=self.iam_endpoint, + profile=self.profile, + ) sleep(S3_SYNC_WAIT_TIME * 10) return response @reporter.step("Removes the specified user from the specified group") - @report_error def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.remove_user_from_group(**params) - log_command_execution(self.iam_endpoint, "IAM Remove User From Group", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.remove_user_from_group, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Updates the name and/or the path of the specified IAM group") - @report_error def iam_update_group(self, group_name: str, new_name: str, new_path: Optional[str] = None) -> dict: params = {"GroupName": group_name, "NewGroupName": new_name, "NewPath": "/"} - response = self.boto3_iam_client.update_group(**params) - log_command_execution(self.iam_endpoint, "IAM Update Group", response, params) - return response + return self._exec_request( + self.boto3_iam_client.update_group, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Updates the name and/or the path of the specified IAM user") - @report_error def iam_update_user(self, user_name: str, new_name: str, new_path: Optional[str] = None) -> dict: params = {"UserName": user_name, "NewUserName": new_name, "NewPath": "/"} - response = self.boto3_iam_client.update_user(**params) - log_command_execution(self.iam_endpoint, "IAM Update User", response, params) - return response + return self._exec_request( + self.boto3_iam_client.update_user, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Adds one or more tags to an IAM user") - @report_error def iam_tag_user(self, user_name: str, tags: list) -> dict: - params = self._convert_to_s3_params(locals().items()) + params = self._convert_to_s3_params(locals()) params["Tags"] = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags] - response = self.boto3_iam_client.tag_user(**params) - log_command_execution(self.iam_endpoint, "IAM Tag User", response, params) - return response + return self._exec_request( + self.boto3_iam_client.tag_user, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("List tags of IAM user") - @report_error def iam_list_user_tags(self, user_name: str) -> dict: - response = self.boto3_iam_client.list_user_tags(UserName=user_name) - log_command_execution(self.iam_endpoint, "IAM List User Tags", response, {"UserName": user_name}) - return response + return self._exec_request( + self.boto3_iam_client.list_user_tags, + params={"UserName": user_name}, + endpoint=self.iam_endpoint, + profile=self.profile, + ) @reporter.step("Removes the specified tags from the user") - @report_error def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: - params = self._convert_to_s3_params(locals().items()) - response = self.boto3_iam_client.untag_user(**params) - log_command_execution(self.iam_endpoint, "IAM Untag User", response, params) - return response + params = self._convert_to_s3_params(locals()) + return self._exec_request( + self.boto3_iam_client.untag_user, + params, + endpoint=self.iam_endpoint, + profile=self.profile, + ) diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index 8e019ea..32e4346 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -9,13 +9,12 @@ import csv import json import logging import re -import subprocess import sys from contextlib import suppress from datetime import datetime from io import StringIO from textwrap import shorten -from typing import Dict, List, Optional, TypedDict, Union +from typing import Any, Optional, Union import pexpect @@ -75,22 +74,75 @@ def _attach_allure_log(cmd: str, output: str, return_code: int, start_time: date reporter.attach(command_attachment, "Command execution") -def log_command_execution(url: str, cmd: str, output: Union[str, dict], params: Optional[dict] = None) -> None: +def log_command_execution(cmd: str, output: Union[str, dict], params: Optional[dict] = None, **kwargs) -> None: logger.info(f"{cmd}: {output}") - with suppress(Exception): - json_output = json.dumps(output, indent=4, sort_keys=True) - output = json_output + if not params: + params = {} + + output_params = params try: - json_params = json.dumps(params, indent=4, sort_keys=True) + json_params = json.dumps(params, indent=4, sort_keys=True, default=str) except TypeError as err: logger.warning(f"Failed to serialize '{cmd}' request parameters:\n{params}\nException: {err}") else: - params = json_params + output_params = json_params - command_attachment = f"COMMAND: '{cmd}'\n" f"URL: {url}\n" f"PARAMS:\n{params}\n" f"OUTPUT:\n{output}\n" - reporter.attach(command_attachment, "Command execution") + output = json.dumps(output, indent=4, sort_keys=True, default=str) + + command_execution = f"COMMAND: '{cmd}'\n" f"URL: {kwargs['endpoint']}\n" f"PARAMS:\n{output_params}\n" f"OUTPUT:\n{output}\n" + aws_command = _convert_request_to_aws_cli_command(cmd, params, **kwargs) + + reporter.attach(command_execution, "Command execution") + reporter.attach(aws_command, "AWS CLI Command") + + +def _convert_request_to_aws_cli_command(command: str, params: dict, **kwargs) -> str: + overriden_names = [_convert_json_name_to_aws_cli(name) for name in kwargs.keys()] + command = command.replace("_", "-") + options = [] + + for name, value in params.items(): + name = _convert_json_name_to_aws_cli(name) + + # To override parameters for AWS CLI + if name in overriden_names: + continue + + if option := _create_option(name, value): + options.append(option) + + for name, value in kwargs.items(): + name = _convert_json_name_to_aws_cli(name) + if option := _create_option(name, value): + options.append(option) + + options = " ".join(options) + api = "s3api" if "s3" in kwargs["endpoint"] else "iam" + return f"aws --no-verify-ssl --no-paginate {api} {command} {options}" + + +def _convert_json_name_to_aws_cli(name: str) -> str: + specific_names = {"CORSConfiguration": "cors-configuration"} + + if aws_cli_name := specific_names.get(name): + return aws_cli_name + return re.sub(r"([a-z])([A-Z])", r"\1 \2", name).lower().replace(" ", "-").replace("_", "-") + + +def _create_option(name: str, value: Any) -> str | None: + if isinstance(value, bool) and value: + return f"--{name}" + + if isinstance(value, dict): + value = json.dumps(value, indent=4, sort_keys=True, default=str) + return f"--{name} '{value}'" + + if value: + return f"--{name} {value}" + + return None def parse_netmap_output(output: str) -> list[NodeNetmapInfo]: From 6f1baf3cf6384f7adeb300bd9d6c9406f4abdcf3 Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Fri, 1 Nov 2024 15:50:17 +0300 Subject: [PATCH 304/363] [#312] update morph remove_nodes --- src/frostfs_testlib/cli/frostfs_adm/morph.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index 7228692..2958884 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -341,7 +341,6 @@ class FrostfsAdmMorph(CliCommand): return self._execute( f"morph remove-nodes {' '.join(node_netmap_keys)}", **{param: param_value for param, param_value in locals().items() if param not in ["self", "node_netmap_keys"]}, - **{param: param_value for param, param_value in locals().items() if param not in ["self", "node_netmap_keys"]}, ) def add_rule( From ea4094051413cf49b74277794a0e3b99221d05f6 Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Tue, 5 Nov 2024 12:37:56 +0300 Subject: [PATCH 305/363] [#313] update force_new_epoch --- src/frostfs_testlib/cli/frostfs_adm/morph.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index 2958884..f3e0137 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -122,7 +122,9 @@ class FrostfsAdmMorph(CliCommand): **{param: param_value for param, param_value in locals().items() if param not in ["self"]}, ) - def force_new_epoch(self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None) -> CommandResult: + def force_new_epoch( + self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None, delta: Optional[int] = None + ) -> CommandResult: """Create new FrostFS epoch event in the side chain. Args: From 55d8ee5da0cc7113fe864ebfadd028234891bf98 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 8 Nov 2024 15:46:02 +0300 Subject: [PATCH 306/363] [#315] Add http client Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/http/__init__.py | 0 src/frostfs_testlib/http/http_client.py | 95 +++++++++++++++++++++++++ 2 files changed, 95 insertions(+) create mode 100644 src/frostfs_testlib/http/__init__.py create mode 100644 src/frostfs_testlib/http/http_client.py diff --git a/src/frostfs_testlib/http/__init__.py b/src/frostfs_testlib/http/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/frostfs_testlib/http/http_client.py b/src/frostfs_testlib/http/http_client.py new file mode 100644 index 0000000..261b2a6 --- /dev/null +++ b/src/frostfs_testlib/http/http_client.py @@ -0,0 +1,95 @@ +import json +import logging +import logging.config + +import httpx + +from frostfs_testlib import reporter + +timeout = httpx.Timeout(60, read=150) +LOGGING_CONFIG = { + "disable_existing_loggers": False, + "version": 1, + "handlers": {"default": {"class": "logging.StreamHandler", "formatter": "http", "stream": "ext://sys.stderr"}}, + "formatters": { + "http": { + "format": "%(levelname)s [%(asctime)s] %(name)s - %(message)s", + "datefmt": "%Y-%m-%d %H:%M:%S", + } + }, + "loggers": { + "httpx": { + "handlers": ["default"], + "level": "DEBUG", + }, + "httpcore": { + "handlers": ["default"], + "level": "ERROR", + }, + }, +} + +logging.config.dictConfig(LOGGING_CONFIG) +logger = logging.getLogger("NeoLogger") + + +class HttpClient: + @reporter.step("Send {method} request to {url}") + def send(self, method: str, url: str, expected_status_code: int = None, **kwargs: dict) -> httpx.Response: + transport = httpx.HTTPTransport(verify=False, retries=5) + client = httpx.Client(timeout=timeout, transport=transport) + response = client.request(method, url, **kwargs) + + self._attach_response(response) + logger.info(f"Response: {response.status_code} => {response.text}") + + if expected_status_code: + assert response.status_code == expected_status_code, ( + f"Got {response.status_code} response code" f" while {expected_status_code} expected" + ) + + return response + + def _attach_response(self, response: httpx.Response): + request = response.request + + try: + request_headers = json.dumps(dict(request.headers), indent=4) + except json.JSONDecodeError: + request_headers = str(request.headers) + + try: + request_body = request.read() + try: + request_body = request_body.decode("utf-8") + except UnicodeDecodeError as e: + request_body = f"Unable to decode binary data to text using UTF-8 encoding: {str(e)}" + except Exception as e: + request_body = f"Error reading request body: {str(e)}" + + request_body = "" if request_body is None else request_body + + try: + response_headers = json.dumps(dict(response.headers), indent=4) + except json.JSONDecodeError: + response_headers = str(response.headers) + + report = ( + f"Method: {request.method}\n\n" + f"URL: {request.url}\n\n" + f"Request Headers: {request_headers}\n\n" + f"Request Body: {request_body}\n\n" + f"Response Status Code: {response.status_code}\n\n" + f"Response Headers: {response_headers}\n\n" + f"Response Body: {response.text}\n\n" + ) + curl_request = self._create_curl_request(request.url, request.method, request.headers, request_body) + + reporter.attach(report, "Requests Info") + reporter.attach(curl_request, "CURL") + + def _create_curl_request(self, url: str, method: str, headers: httpx.Headers, data: str) -> str: + headers = " ".join(f'-H "{name.title()}: {value}"' for name, value in headers.items()) + data = f" -d '{data}'" if data else "" + # Option -k means no verify SSL + return f"curl {url} -X {method} {headers}{data} -k" From 95b32a036a8043191f3cec6dd249ee95fa1aa3a6 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Tue, 12 Nov 2024 12:28:10 +0300 Subject: [PATCH 307/363] [#316] Extend parallel exception message output Signed-off-by: a.berezin --- src/frostfs_testlib/testing/parallel.py | 38 ++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/testing/parallel.py b/src/frostfs_testlib/testing/parallel.py index 0549e61..6c4f6e0 100644 --- a/src/frostfs_testlib/testing/parallel.py +++ b/src/frostfs_testlib/testing/parallel.py @@ -1,4 +1,5 @@ import itertools +import traceback from concurrent.futures import Future, ThreadPoolExecutor from contextlib import contextmanager from typing import Callable, Collection, Optional, Union @@ -55,7 +56,42 @@ def parallel( # Check for exceptions exceptions = [future.exception() for future in futures if future.exception()] if exceptions: - message = "\n".join([str(e) for e in exceptions]) + # Prettify exception in parallel with all underlying stack traces + # For example, we had 3 RuntimeError exceptions during parallel. This format will give us something like + # + # RuntimeError: The following exceptions occured during parallel run: + # 1) Exception one text + # 2) Exception two text + # 3) Exception three text + # TRACES: + # ==== 1 ==== + # Traceback (most recent call last): + # File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run + # result = self.fn(*self.args, **self.kwargs) + # File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service + # raise RuntimeError(f"Exception one text") + # RuntimeError: Exception one text + # + # ==== 2 ==== + # Traceback (most recent call last): + # File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run + # result = self.fn(*self.args, **self.kwargs) + # File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service + # raise RuntimeError(f"Exception two text") + # RuntimeError: Exception two text + # + # ==== 3 ==== + # Traceback (most recent call last): + # File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run + # result = self.fn(*self.args, **self.kwargs) + # File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service + # raise RuntimeError(f"Exception three text") + # RuntimeError: Exception three text + short_summary = "\n".join([f"{i}) {str(e)}" for i, e in enumerate(exceptions, 1)]) + stack_traces = "\n".join( + [f"==== {i} ====\n{''.join(traceback.TracebackException.from_exception(e).format())}" for i, e in enumerate(exceptions, 1)] + ) + message = f"{short_summary}\nTRACES:\n{stack_traces}" raise RuntimeError(f"The following exceptions occured during parallel run:\n{message}") return futures From 2a90ec74ff70934d65fa13e78c348afda3b195c2 Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Tue, 12 Nov 2024 16:01:12 +0300 Subject: [PATCH 308/363] [#317] update morph rule chain --- src/frostfs_testlib/cli/frostfs_adm/morph.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index f3e0137..5e39cf4 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -353,6 +353,7 @@ class FrostfsAdmMorph(CliCommand): rule: Optional[list[str]] = None, path: Optional[str] = None, chain_id_hex: Optional[bool] = None, + chain_name: Optional[str] = None, wallet: Optional[str] = None, address: Optional[str] = None, timeout: Optional[str] = None, @@ -383,6 +384,7 @@ class FrostfsAdmMorph(CliCommand): target_name: str, target_type: str, chain_id_hex: Optional[bool] = None, + chain_name: Optional[str] = None, wallet: Optional[str] = None, address: Optional[str] = None, timeout: Optional[str] = None, @@ -410,6 +412,7 @@ class FrostfsAdmMorph(CliCommand): target_type: str, target_name: Optional[str] = None, rpc_endpoint: Optional[str] = None, + chain_name: Optional[str] = None, wallet: Optional[str] = None, address: Optional[str] = None, timeout: Optional[str] = None, @@ -436,6 +439,7 @@ class FrostfsAdmMorph(CliCommand): target_name: str, target_type: str, all: Optional[bool] = None, + chain_name: Optional[str] = None, chain_id_hex: Optional[bool] = None, wallet: Optional[str] = None, address: Optional[str] = None, From 47bc11835bb7869e2b87e761e432e923fcd90343 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Wed, 13 Nov 2024 10:10:35 +0300 Subject: [PATCH 309/363] [#318] Add tombstone expiration test Signed-off-by: a.berezin --- src/frostfs_testlib/hosting/docker_host.py | 3 ++ src/frostfs_testlib/hosting/interfaces.py | 11 +++++++ src/frostfs_testlib/resources/common.py | 1 + src/frostfs_testlib/storage/cluster.py | 6 ++-- .../controllers/cluster_state_controller.py | 21 ++++++++++++-- .../state_managers/config_state_manager.py | 29 ++++++++++++++----- .../storage/dataclasses/node_base.py | 12 ++++---- 7 files changed, 63 insertions(+), 20 deletions(-) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 5110e63..01dc6b5 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -164,6 +164,9 @@ class DockerHost(Host): return volume_path + def send_signal_to_service(self, service_name: str, signal: str) -> None: + raise NotImplementedError("Not implemented for docker") + def delete_metabase(self, service_name: str) -> None: raise NotImplementedError("Not implemented for docker") diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index b84326a..6d1e5da 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -117,6 +117,17 @@ class Host(ABC): service_name: Name of the service to stop. """ + @abstractmethod + def send_signal_to_service(self, service_name: str, signal: str) -> None: + """Send signal to service with specified name using kill - + + The service must be hosted on this host. + + Args: + service_name: Name of the service to stop. + signal: signal name. See kill -l to all names + """ + @abstractmethod def mask_service(self, service_name: str) -> None: """Prevent the service from start by any activity by masking it. diff --git a/src/frostfs_testlib/resources/common.py b/src/frostfs_testlib/resources/common.py index 1c93b12..53bcfaa 100644 --- a/src/frostfs_testlib/resources/common.py +++ b/src/frostfs_testlib/resources/common.py @@ -53,3 +53,4 @@ HOSTING_CONFIG_FILE = os.getenv( ) MORE_LOG = os.getenv("MORE_LOG", "1") +EXPIRATION_EPOCH_ATTRIBUTE = "__SYSTEM__EXPIRATION_EPOCH" diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index 9fcc4c9..3ec4922 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -11,10 +11,10 @@ from frostfs_testlib.storage import get_service_registry from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml from frostfs_testlib.storage.constants import ConfigAttributes from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode +from frostfs_testlib.storage.dataclasses.metrics import Metrics from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.service_registry import ServiceRegistry -from frostfs_testlib.storage.dataclasses.metrics import Metrics class ClusterNode: @@ -91,10 +91,10 @@ class ClusterNode: config_str = yaml.dump(new_config) shell.exec(f"echo '{config_str}' | sudo tee {config_file_path}") - def config(self, service_type: type[ServiceClass]) -> ServiceConfigurationYml: + def config(self, service_type: ServiceClass) -> ServiceConfigurationYml: return self.service(service_type).config - def service(self, service_type: type[ServiceClass]) -> ServiceClass: + def service(self, service_type: ServiceClass) -> ServiceClass: """ Get a service cluster node of specified type. diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 53098b1..5080d40 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -172,6 +172,15 @@ class ClusterStateController: if service_type == StorageNode: self.wait_after_storage_startup() + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Send sighup to all {service_type} services") + def sighup_services_of_type(self, service_type: type[ServiceClass]): + services = self.cluster.services(service_type) + parallel([service.send_signal_to_service for service in services], signal="SIGHUP") + + if service_type == StorageNode: + self.wait_after_storage_startup() + @wait_for_success(600, 60) def wait_s3gate(self, s3gate: S3Gate): with reporter.step(f"Wait for {s3gate} reconnection"): @@ -206,21 +215,27 @@ class ClusterStateController: @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step("Stop {service_type} service on {node}") - def stop_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass], mask: bool = True): + def stop_service_of_type(self, node: ClusterNode, service_type: ServiceClass, mask: bool = True): service = node.service(service_type) service.stop_service(mask) self.stopped_services.add(service) + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) + @reporter.step("Send sighup to {service_type} service on {node}") + def sighup_service_of_type(self, node: ClusterNode, service_type: ServiceClass): + service = node.service(service_type) + service.send_signal_to_service("SIGHUP") + @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step("Start {service_type} service on {node}") - def start_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass]): + def start_service_of_type(self, node: ClusterNode, service_type: ServiceClass): service = node.service(service_type) service.start_service() self.stopped_services.discard(service) @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step("Start all stopped {service_type} services") - def start_stopped_services_of_type(self, service_type: type[ServiceClass]): + def start_stopped_services_of_type(self, service_type: ServiceClass): stopped_svc = self._get_stopped_by_type(service_type) if not stopped_svc: return diff --git a/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py b/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py index 66f72d6..f0b2a21 100644 --- a/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py +++ b/src/frostfs_testlib/storage/controllers/state_managers/config_state_manager.py @@ -14,14 +14,19 @@ class ConfigStateManager(StateManager): self.cluster = self.csc.cluster @reporter.step("Change configuration for {service_type} on all nodes") - def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any]): + def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any], sighup: bool = False): services = self.cluster.services(service_type) nodes = self.cluster.nodes(services) self.services_with_changed_config.update([(node, service_type) for node in nodes]) - self.csc.stop_services_of_type(service_type) + if not sighup: + self.csc.stop_services_of_type(service_type) + parallel([node.config(service_type).set for node in nodes], values=values) - self.csc.start_services_of_type(service_type) + if not sighup: + self.csc.start_services_of_type(service_type) + else: + self.csc.sighup_services_of_type(service_type) @reporter.step("Change configuration for {service_type} on {node}") def set_on_node(self, node: ClusterNode, service_type: type[ServiceClass], values: dict[str, Any]): @@ -32,18 +37,26 @@ class ConfigStateManager(StateManager): self.csc.start_service_of_type(node, service_type) @reporter.step("Revert all configuration changes") - def revert_all(self): + def revert_all(self, sighup: bool = False): if not self.services_with_changed_config: return - parallel(self._revert_svc, self.services_with_changed_config) + parallel(self._revert_svc, self.services_with_changed_config, sighup) self.services_with_changed_config.clear() - self.csc.start_all_stopped_services() + if not sighup: + self.csc.start_all_stopped_services() # TODO: parallel can't have multiple parallel_items :( @reporter.step("Revert all configuration {node_and_service}") - def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass]): + def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass], sighup: bool = False): node, service_type = node_and_service - self.csc.stop_service_of_type(node, service_type) + service = node.service(service_type) + + if not sighup: + self.csc.stop_service_of_type(node, service_type) + node.config(service_type).revert() + + if sighup: + service.send_signal_to_service("SIGHUP") diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 8291345..180877d 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -65,6 +65,10 @@ class NodeBase(HumanReadableABC): with reporter.step(f"Start {self.name} service on {self.host.config.address}"): self.host.start_service(self.name) + def send_signal_to_service(self, signal: str): + with reporter.step(f"Send -{signal} signal to {self.name} service on {self.host.config.address}"): + self.host.send_signal_to_service(self.name, signal) + @abstractmethod def service_healthcheck(self) -> bool: """Service healthcheck.""" @@ -185,9 +189,7 @@ class NodeBase(HumanReadableABC): if attribute_name not in config.attributes: if default_attribute_name is None: - raise RuntimeError( - f"Service {self.name} has no {attribute_name} in config and fallback attribute isn't set either" - ) + raise RuntimeError(f"Service {self.name} has no {attribute_name} in config and fallback attribute isn't set either") return config.attributes[default_attribute_name] @@ -197,9 +199,7 @@ class NodeBase(HumanReadableABC): return self.host.get_service_config(self.name) def get_service_uptime(self, service: str) -> datetime: - result = self.host.get_shell().exec( - f"systemctl show {service} --property ActiveEnterTimestamp | cut -d '=' -f 2" - ) + result = self.host.get_shell().exec(f"systemctl show {service} --property ActiveEnterTimestamp | cut -d '=' -f 2") start_time = parser.parse(result.stdout.strip()) current_time = datetime.now(tz=timezone.utc) active_time = current_time - start_time From f24bfc06fd04f0fc195135315d1d3a9c9828fcf8 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Wed, 13 Nov 2024 17:46:03 +0300 Subject: [PATCH 310/363] [#319] Add cached fixture feature Signed-off-by: a.berezin --- src/frostfs_testlib/resources/optionals.py | 11 +++--- src/frostfs_testlib/testing/test_control.py | 39 +++++++++++++++++++++ 2 files changed, 44 insertions(+), 6 deletions(-) diff --git a/src/frostfs_testlib/resources/optionals.py b/src/frostfs_testlib/resources/optionals.py index 2a7ff22..6caf158 100644 --- a/src/frostfs_testlib/resources/optionals.py +++ b/src/frostfs_testlib/resources/optionals.py @@ -16,11 +16,10 @@ OPTIONAL_NODE_UNDER_LOAD = os.getenv("OPTIONAL_NODE_UNDER_LOAD") OPTIONAL_FAILOVER_ENABLED = str_to_bool(os.getenv("OPTIONAL_FAILOVER_ENABLED", "true")) # Set this to True to disable background load. I.E. node which supposed to be stopped will not be actually stopped. -OPTIONAL_BACKGROUND_LOAD_ENABLED = str_to_bool( - os.getenv("OPTIONAL_BACKGROUND_LOAD_ENABLED", "true") -) +OPTIONAL_BACKGROUND_LOAD_ENABLED = str_to_bool(os.getenv("OPTIONAL_BACKGROUND_LOAD_ENABLED", "true")) # Set this to False for disable autouse fixture like node healthcheck during developing time. -OPTIONAL_AUTOUSE_FIXTURES_ENABLED = str_to_bool( - os.getenv("OPTIONAL_AUTOUSE_FIXTURES_ENABLED", "true") -) +OPTIONAL_AUTOUSE_FIXTURES_ENABLED = str_to_bool(os.getenv("OPTIONAL_AUTOUSE_FIXTURES_ENABLED", "true")) + +# Use cache for fixtures with @cachec_fixture decorator +OPTIONAL_CACHE_FIXTURES = str_to_bool(os.getenv("OPTIONAL_CACHE_FIXTURES", "false")) diff --git a/src/frostfs_testlib/testing/test_control.py b/src/frostfs_testlib/testing/test_control.py index 4fa6390..bc38208 100644 --- a/src/frostfs_testlib/testing/test_control.py +++ b/src/frostfs_testlib/testing/test_control.py @@ -1,13 +1,16 @@ import inspect import logging +import os from functools import wraps from time import sleep, time from typing import Any +import yaml from _pytest.outcomes import Failed from pytest import fail from frostfs_testlib import reporter +from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.utils.func_utils import format_by_args logger = logging.getLogger("NeoLogger") @@ -128,6 +131,42 @@ def run_optionally(enabled: bool, mock_value: Any = True): return deco +def cached_fixture(enabled: bool): + """ + Decorator to cache fixtures. + MUST be placed after @pytest.fixture and before @allure decorators. + + Args: + enabled: if true, decorated func will be cached. + """ + + def deco(func): + @wraps(func) + def func_impl(*a, **kw): + # TODO: *a and *kw should be parsed to some kind of hashsum and used in filename to prevent cache load from different parameters + cache_file = os.path.join(ASSETS_DIR, f"fixture_cache_{func.__name__}.yml") + + if enabled and os.path.exists(cache_file): + with open(cache_file, "r") as cache_input: + return yaml.load(cache_input, Loader=yaml.Loader) + + result = func(*a, **kw) + + if enabled: + with open(cache_file, "w") as cache_output: + yaml.dump(result, cache_output) + return result + + # TODO: cache yielding fixtures + @wraps(func) + def gen_impl(*a, **kw): + raise NotImplementedError("Not implemented for yielding fixtures") + + return gen_impl if inspect.isgeneratorfunction(func) else func_impl + + return deco + + def wait_for_success( max_wait_time: int = 60, interval: int = 1, From 451de5e07e7ef6dd68e684aaa431839583a82089 Mon Sep 17 00:00:00 2001 From: anurindm Date: Thu, 14 Nov 2024 16:22:06 +0300 Subject: [PATCH 311/363] [#320] Added shards detach function Signed-off-by: Dmitry Anurin --- src/frostfs_testlib/cli/frostfs_cli/shards.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/frostfs_testlib/cli/frostfs_cli/shards.py b/src/frostfs_testlib/cli/frostfs_cli/shards.py index 82ea87b..68a2f54 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/shards.py +++ b/src/frostfs_testlib/cli/frostfs_cli/shards.py @@ -241,3 +241,21 @@ class FrostfsCliShards(CliCommand): "control shards evacuation status", **{param: value for param, value in locals().items() if param not in ["self"]}, ) + + def detach(self, endpoint: str, address: Optional[str] = None, id: Optional[str] = None, timeout: Optional[str] = None): + """ + Detach and close the shards + + Args: + address: Address of wallet account + endpoint: Remote node control address (as 'multiaddr' or ':') + id: List of shard IDs in base58 encoding + timeout: Timeout for an operation (default 15s) + + Returns: + Command's result. + """ + return self._execute( + "control shards detach", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) From a1953684b87f8c3d96f95a14ce98f59fdcab657b Mon Sep 17 00:00:00 2001 From: Roman Chernykh Date: Wed, 16 Oct 2024 18:42:42 +0300 Subject: [PATCH 312/363] [#307] added methods for testing MFA --- src/frostfs_testlib/s3/aws_cli_client.py | 87 ++++++++++++++++++++++++ src/frostfs_testlib/s3/boto3_client.py | 73 ++++++++++++++++++++ src/frostfs_testlib/s3/interfaces.py | 29 ++++++++ src/frostfs_testlib/utils/file_utils.py | 8 ++- 4 files changed, 195 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index ff4e329..ba95733 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -1440,3 +1440,90 @@ class AwsCliClient(S3ClientWrapper): response = self._to_json(output) return response + + # MFA METHODS + @reporter.step("Creates a new virtual MFA device") + def iam_create_virtual_mfa_device(self, virtual_mfa_device_name: str, outfile: str, bootstrap_method: str) -> tuple: + cmd = f"aws {self.common_flags} iam create-virtual-mfa-device --virtual-mfa-device-name {virtual_mfa_device_name}\ + --outfile {outfile} --bootstrap-method {bootstrap_method} --endpoint {self.iam_endpoint}" + + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + serial_number = response.get("VirtualMFADevice", {}).get("SerialNumber") + assert serial_number, f"Expected SerialNumber in response:\n{response}" + + return serial_number, False + + @reporter.step("Deactivates the specified MFA device and removes it from association with the user name") + def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict: + cmd = f"aws {self.common_flags} iam deactivate-mfa-device --user-name {user_name} --serial-number {serial_number} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Deletes a virtual MFA device") + def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict: + cmd = f"aws {self.common_flags} iam delete-virtual-mfa-device --serial-number {serial_number} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Enables the specified MFA device and associates it with the specified IAM user") + def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict: + cmd = f"aws {self.common_flags} iam enable-mfa-device --user-name {user_name} --serial-number {serial_number} --authentication-code1 {authentication_code1}\ + --authentication-code2 {authentication_code2} --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + + return response + + @reporter.step("Lists the MFA devices for an IAM user") + def iam_list_virtual_mfa_devices(self) -> dict: + cmd = f"aws {self.common_flags} iam list-virtual-mfa-devices --endpoint {self.iam_endpoint}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + assert response.get("VirtualMFADevices"), f"Expected VirtualMFADevices in response:\n{response}" + + return response + + @reporter.step("Get session token for user") + def sts_get_session_token( + self, duration_seconds: Optional[str] = None, serial_number: Optional[str] = None, token_code: Optional[str] = None + ) -> tuple: + cmd = f"aws {self.common_flags} sts get-session-token --endpoint {self.iam_endpoint}" + if duration_seconds: + cmd += f" --duration-seconds {duration_seconds}" + if serial_number: + cmd += f" --serial-number {serial_number}" + if token_code: + cmd += f" --token-code {token_code}" + if self.profile: + cmd += f" --profile {self.profile}" + + output = self.local_shell.exec(cmd).stdout + response = self._to_json(output) + access_key = response.get("Credentials", {}).get("AccessKeyId") + secret_access_key = response.get("Credentials", {}).get("SecretAccessKey") + session_token = response.get("Credentials", {}).get("SessionToken") + assert access_key, f"Expected AccessKeyId in response:\n{response}" + assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}" + assert session_token, f"Expected SessionToken in response:\n{response}" + + return access_key, secret_access_key, session_token diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index 91d8c5a..12113ad 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -41,6 +41,8 @@ class Boto3ClientWrapper(S3ClientWrapper): self.boto3_iam_client: S3Client = None self.iam_endpoint: str = "" + self.boto3_sts_client: S3Client = None + self.access_key_id: str = access_key_id self.secret_access_key: str = secret_access_key self.profile = profile @@ -87,6 +89,14 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint_url=self.iam_endpoint, verify=False, ) + # since the STS does not have an enpoint, IAM is used + self.boto3_sts_client = self.session.client( + service_name="sts", + aws_access_key_id=self.access_key_id, + aws_secret_access_key=self.secret_access_key, + endpoint_url=iam_endpoint, + verify=False, + ) def _to_s3_param(self, param: str) -> str: replacement_map = { @@ -1265,3 +1275,66 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) + + # MFA methods + @reporter.step("Creates a new virtual MFA device") + def iam_create_virtual_mfa_device( + self, virtual_mfa_device_name: str, outfile: Optional[str] = None, bootstrap_method: Optional[str] = None + ) -> tuple: + response = self.boto3_iam_client.create_virtual_mfa_device(VirtualMFADeviceName=virtual_mfa_device_name) + + serial_number = response.get("VirtualMFADevice", {}).get("SerialNumber") + base32StringSeed = response.get("VirtualMFADevice", {}).get("Base32StringSeed") + assert serial_number, f"Expected SerialNumber in response:\n{response}" + assert base32StringSeed, f"Expected Base32StringSeed in response:\n{response}" + + return serial_number, base32StringSeed + + @reporter.step("Deactivates the specified MFA device and removes it from association with the user name") + def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict: + response = self.boto3_iam_client.deactivate_mfa_device(UserName=user_name, SerialNumber=serial_number) + + return response + + @reporter.step("Deletes a virtual MFA device") + def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict: + response = self.boto3_iam_client.delete_virtual_mfa_device(SerialNumber=serial_number) + + return response + + @reporter.step("Enables the specified MFA device and associates it with the specified IAM user") + def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict: + response = self.boto3_iam_client.enable_mfa_device( + UserName=user_name, + SerialNumber=serial_number, + AuthenticationCode1=authentication_code1, + AuthenticationCode2=authentication_code2, + ) + + return response + + @reporter.step("Lists the MFA devices for an IAM user") + def iam_list_virtual_mfa_devices(self) -> dict: + response = self.boto3_iam_client.list_virtual_mfa_devices() + assert response.get("VirtualMFADevices"), f"Expected VirtualMFADevices in response:\n{response}" + + return response + + @reporter.step("Get session token for user") + def sts_get_session_token( + self, duration_seconds: Optional[str] = "", serial_number: Optional[str] = "", token_code: Optional[str] = "" + ) -> tuple: + response = self.boto3_sts_client.get_session_token( + DurationSeconds=duration_seconds, + SerialNumber=serial_number, + TokenCode=token_code, + ) + + access_key = response.get("Credentials", {}).get("AccessKeyId") + secret_access_key = response.get("Credentials", {}).get("SecretAccessKey") + session_token = response.get("Credentials", {}).get("SessionToken") + assert access_key, f"Expected AccessKeyId in response:\n{response}" + assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}" + assert session_token, f"Expected SessionToken in response:\n{response}" + + return access_key, secret_access_key, session_token diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index c084484..69a5154 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -578,3 +578,32 @@ class S3ClientWrapper(HumanReadableABC): @abstractmethod def iam_untag_user(self, user_name: str, tag_keys: list) -> dict: """Removes the specified tags from the user""" + + # MFA methods + @abstractmethod + def iam_create_virtual_mfa_device( + self, virtual_mfa_device_name: str, outfile: Optional[str] = None, bootstrap_method: Optional[str] = None + ) -> tuple: + """Creates a new virtual MFA device""" + + @abstractmethod + def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict: + """Deactivates the specified MFA device and removes it from association with the user name""" + + @abstractmethod + def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict: + """Deletes a virtual MFA device""" + + @abstractmethod + def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict: + """Enables the specified MFA device and associates it with the specified IAM user""" + + @abstractmethod + def iam_list_virtual_mfa_devices(self) -> dict: + """Lists the MFA devices for an IAM user""" + + @abstractmethod + def sts_get_session_token( + self, duration_seconds: Optional[str] = None, serial_number: Optional[str] = None, token_code: Optional[str] = None + ) -> tuple: + """Get session token for user""" diff --git a/src/frostfs_testlib/utils/file_utils.py b/src/frostfs_testlib/utils/file_utils.py index c2b497f..8839d7f 100644 --- a/src/frostfs_testlib/utils/file_utils.py +++ b/src/frostfs_testlib/utils/file_utils.py @@ -45,7 +45,7 @@ def ensure_directory_opener(path, flags): # TODO: Do not add {size} to title yet, since it produces dynamic info in top level steps # Use object_size dt in future as argument @reporter.step("Generate file") -def generate_file(size: int) -> TestFile: +def generate_file(size: int, file_name: Optional[str] = None) -> TestFile: """Generates a binary file with the specified size in bytes. Args: @@ -54,7 +54,11 @@ def generate_file(size: int) -> TestFile: Returns: The path to the generated file. """ - test_file = TestFile(os.path.join(ASSETS_DIR, string_utils.unique_name("object-"))) + + if file_name is None: + file_name = string_utils.unique_name("object-") + + test_file = TestFile(os.path.join(ASSETS_DIR, file_name)) with open(test_file, "wb", opener=ensure_directory_opener) as file: file.write(os.urandom(size)) logger.info(f"File with size {size} bytes has been generated: {test_file}") From 8eaa511e5c39feaad06f7c3bf795639fcbbaac92 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 18 Nov 2024 16:57:14 +0300 Subject: [PATCH 313/363] [#322] Added classmethod decorator in Http client Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/http/http_client.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/http/http_client.py b/src/frostfs_testlib/http/http_client.py index 261b2a6..3106273 100644 --- a/src/frostfs_testlib/http/http_client.py +++ b/src/frostfs_testlib/http/http_client.py @@ -50,7 +50,8 @@ class HttpClient: return response - def _attach_response(self, response: httpx.Response): + @classmethod + def _attach_response(cls, response: httpx.Response): request = response.request try: @@ -83,12 +84,13 @@ class HttpClient: f"Response Headers: {response_headers}\n\n" f"Response Body: {response.text}\n\n" ) - curl_request = self._create_curl_request(request.url, request.method, request.headers, request_body) + curl_request = cls._create_curl_request(request.url, request.method, request.headers, request_body) reporter.attach(report, "Requests Info") reporter.attach(curl_request, "CURL") - def _create_curl_request(self, url: str, method: str, headers: httpx.Headers, data: str) -> str: + @classmethod + def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str) -> str: headers = " ".join(f'-H "{name.title()}: {value}"' for name, value in headers.items()) data = f" -d '{data}'" if data else "" # Option -k means no verify SSL From 0c9660fffc43b6cbeecf119a4e1cb3008020c042 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Wed, 20 Nov 2024 17:14:33 +0300 Subject: [PATCH 314/363] [#323] Update APE related entities Signed-off-by: a.berezin --- src/frostfs_testlib/resources/error_patterns.py | 8 ++++++-- src/frostfs_testlib/storage/dataclasses/ape.py | 14 +++++++++++++- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/resources/error_patterns.py b/src/frostfs_testlib/resources/error_patterns.py index 3ba5f13..9b5e8e4 100644 --- a/src/frostfs_testlib/resources/error_patterns.py +++ b/src/frostfs_testlib/resources/error_patterns.py @@ -27,6 +27,10 @@ S3_BUCKET_DOES_NOT_ALLOW_ACL = "The bucket does not allow ACLs" S3_MALFORMED_XML_REQUEST = "The XML you provided was not well-formed or did not validate against our published schema." RULE_ACCESS_DENIED_CONTAINER = "access to container operation {operation} is denied by access policy engine: Access denied" -RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied" +# Errors from node missing reasons if request was forwarded. Commenting for now +# RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied" +RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request" NO_RULE_FOUND_CONTAINER = "access to container operation {operation} is denied by access policy engine: NoRuleFound" -NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request: method {operation}: NoRuleFound" +# Errors from node missing reasons if request was forwarded. Commenting for now +# NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request: method {operation}: NoRuleFound" +NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request" diff --git a/src/frostfs_testlib/storage/dataclasses/ape.py b/src/frostfs_testlib/storage/dataclasses/ape.py index f0f1758..ef2e1f2 100644 --- a/src/frostfs_testlib/storage/dataclasses/ape.py +++ b/src/frostfs_testlib/storage/dataclasses/ape.py @@ -26,6 +26,18 @@ class ObjectOperations(HumanReadableEnum): return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL] +class ContainerOperations(HumanReadableEnum): + PUT = "container.put" + GET = "container.get" + LIST = "container.list" + DELETE = "container.delete" + WILDCARD_ALL = "container.*" + + @staticmethod + def get_all(): + return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL] + + @dataclass class Operations: GET_CONTAINER = "GetContainer" @@ -124,7 +136,7 @@ class Rule: if not operations: self.operations = [] - elif isinstance(operations, ObjectOperations): + elif isinstance(operations, (ObjectOperations, ContainerOperations)): self.operations = [operations] else: self.operations = operations From 24e1dfef282b46e40c900711563e3f69b24220cb Mon Sep 17 00:00:00 2001 From: Roman Chernykh Date: Mon, 18 Nov 2024 13:01:26 +0300 Subject: [PATCH 315/363] [#324]Extension list_objects method --- src/frostfs_testlib/s3/aws_cli_client.py | 13 +++++++++++-- src/frostfs_testlib/s3/boto3_client.py | 11 +++++++++-- src/frostfs_testlib/s3/interfaces.py | 4 +++- 3 files changed, 23 insertions(+), 5 deletions(-) diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index ba95733..2ac6d68 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -196,11 +196,20 @@ class AwsCliClient(S3ClientWrapper): return response.get("LocationConstraint") @reporter.step("List objects S3") - def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + def list_objects( + self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None + ) -> Union[dict, list[str]]: if bucket.startswith("-") or " " in bucket: bucket = f'"{bucket}"' - cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" + cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} " + if page_size: + cmd = cmd.replace("--no-paginate", "") + cmd += f" --page-size {page_size} " + if prefix: + cmd += f" --prefix {prefix}" + if self.profile: + cmd += f" --profile {self.profile} " output = self.local_shell.exec(cmd).stdout response = self._to_json(output) diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index 12113ad..e7f2c35 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -398,10 +398,17 @@ class Boto3ClientWrapper(S3ClientWrapper): return response if full_output else obj_list @reporter.step("List objects S3") - def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + def list_objects( + self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None + ) -> Union[dict, list[str]]: + params = {"Bucket": bucket} + if page_size: + params["MaxKeys"] = page_size + if prefix: + params["Prefix"] = prefix response = self._exec_request( self.boto3_client.list_objects, - params={"Bucket": bucket}, + params, endpoint=self.s3gate_endpoint, profile=self.profile, ) diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index 69a5154..c3d99eb 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -195,7 +195,9 @@ class S3ClientWrapper(HumanReadableABC): """ @abstractmethod - def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]: + def list_objects( + self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None + ) -> Union[dict, list[str]]: """Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Make sure to design your application From 3dc7a5bdb095dbf02c9942f6844540efcccf1b88 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Thu, 28 Nov 2024 16:43:46 +0300 Subject: [PATCH 316/363] [#328] Change logic activating split-brain Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/steps/network.py | 18 ++++---- .../controllers/cluster_state_controller.py | 41 +++++++++++++------ 2 files changed, 38 insertions(+), 21 deletions(-) diff --git a/src/frostfs_testlib/steps/network.py b/src/frostfs_testlib/steps/network.py index efaaf5a..6bde2f1 100644 --- a/src/frostfs_testlib/steps/network.py +++ b/src/frostfs_testlib/steps/network.py @@ -4,16 +4,18 @@ from frostfs_testlib.storage.cluster import ClusterNode class IpHelper: @staticmethod - def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[str]) -> None: + def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[tuple]) -> None: shell = node.host.get_shell() - for ip in block_ip: - shell.exec(f"ip route add blackhole {ip}") + for ip, table in block_ip: + if not table: + shell.exec(f"ip r a blackhole {ip}") + continue + shell.exec(f"ip r a blackhole {ip} table {table}") @staticmethod def restore_input_traffic_to_node(node: ClusterNode) -> None: shell = node.host.get_shell() - unlock_ip = shell.exec("ip route list | grep blackhole", CommandOptions(check=False)) - if unlock_ip.return_code != 0: - return - for ip in unlock_ip.stdout.strip().split("\n"): - shell.exec(f"ip route del blackhole {ip.split(' ')[1]}") + unlock_ip = shell.exec("ip r l table all | grep blackhole", CommandOptions(check=False)).stdout + + for active_blackhole in unlock_ip.strip().split("\n"): + shell.exec(f"ip r d {active_blackhole}") diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 5080d40..67e4d60 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -1,4 +1,5 @@ import datetime +import itertools import logging import time from typing import TypeVar @@ -39,7 +40,7 @@ class ClusterStateController: def __init__(self, shell: Shell, cluster: Cluster, healthcheck: Healthcheck) -> None: self.stopped_nodes: list[ClusterNode] = [] self.detached_disks: dict[str, DiskController] = {} - self.dropped_traffic: list[ClusterNode] = [] + self.dropped_traffic: set[ClusterNode] = set() self.excluded_from_netmap: list[StorageNode] = [] self.stopped_services: set[NodeBase] = set() self.cluster = cluster @@ -325,22 +326,22 @@ class ClusterStateController: @reporter.step("Drop traffic to {node}, nodes - {block_nodes}") def drop_traffic(self, node: ClusterNode, wakeup_timeout: int, name_interface: str, block_nodes: list[ClusterNode] = None) -> None: - list_ip = self._parse_interfaces(block_nodes, name_interface) - IpHelper.drop_input_traffic_to_node(node, list_ip) + interfaces_tables = self._parse_interfaces(block_nodes, name_interface) + IpHelper.drop_input_traffic_to_node(node, interfaces_tables) time.sleep(wakeup_timeout) - self.dropped_traffic.append(node) + self.dropped_traffic.add(node) @reporter.step("Start traffic to {node}") def restore_traffic(self, node: ClusterNode) -> None: IpHelper.restore_input_traffic_to_node(node=node) - index = self.dropped_traffic.index(node) - self.dropped_traffic.pop(index) + self.dropped_traffic.discard(node) @reporter.step("Restore blocked nodes") def restore_all_traffic(self): if not self.dropped_traffic: return parallel(self._restore_traffic_to_node, self.dropped_traffic) + self.dropped_traffic.clear() @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step("Hard reboot host {node} via magic SysRq option") @@ -516,17 +517,31 @@ class ClusterStateController: return disk_controller + @reporter.step("Restore traffic {node}") def _restore_traffic_to_node(self, node): IpHelper.restore_input_traffic_to_node(node) - def _parse_interfaces(self, nodes: list[ClusterNode], name_interface: str): - interfaces = [] + def _parse_interfaces(self, nodes: list[ClusterNode], name_interface: str) -> list[tuple]: + interfaces_and_tables = set() for node in nodes: - dict_interfaces = node.host.config.interfaces - for type, ip in dict_interfaces.items(): - if name_interface in type: - interfaces.append(ip) - return interfaces + shell = node.host.get_shell() + lines = shell.exec(f"ip r l table all | grep '{name_interface}'").stdout.splitlines() + + ips = [] + tables = [] + + for line in lines: + if "src" not in line or "table local" in line: + continue + parts = line.split() + ips.append(parts[-1]) + if "table" in line: + tables.append(parts[parts.index("table") + 1]) + tables.append(None) + + [interfaces_and_tables.add((ip, table)) for ip, table in itertools.product(ips, tables)] + + return interfaces_and_tables @reporter.step("Ping node") def _ping_host(self, node: ClusterNode): From 7d6768c83ff9d8169f1f73b01ae51b639db6c1cd Mon Sep 17 00:00:00 2001 From: anurindm Date: Thu, 28 Nov 2024 17:10:43 +0300 Subject: [PATCH 317/363] [#325] Added get nns records method to frostfs-adm Signed-off-by: Dmitry Anurin --- src/frostfs_testlib/cli/frostfs_adm/morph.py | 23 ++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/src/frostfs_testlib/cli/frostfs_adm/morph.py b/src/frostfs_testlib/cli/frostfs_adm/morph.py index 5e39cf4..bdf4a91 100644 --- a/src/frostfs_testlib/cli/frostfs_adm/morph.py +++ b/src/frostfs_testlib/cli/frostfs_adm/morph.py @@ -463,3 +463,26 @@ class FrostfsAdmMorph(CliCommand): "morph ape rm-rule-chain", **{param: value for param, value in locals().items() if param not in ["self"]}, ) + + def get_nns_records( + self, + name: str, + type: Optional[str] = None, + rpc_endpoint: Optional[str] = None, + alphabet_wallets: Optional[str] = None, + ) -> CommandResult: + """Returns domain record of the specified type + + Args: + name: Domain name + type: Domain name service record type(A|CNAME|SOA|TXT) + rpc_endpoint: N3 RPC node endpoint + alphabet_wallets: path to alphabet wallets dir + + Returns: + Command's result + """ + return self._execute( + "morph nns get-records", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) From 0e040d2722526c3a7ea092f6167b5324a87170f0 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Mon, 2 Dec 2024 14:18:17 +0300 Subject: [PATCH 318/363] [#330] Improve CURL generation and fix Boto3 logging Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/http/http_client.py | 100 +++++++++++++++++------- src/frostfs_testlib/utils/cli_utils.py | 3 + 2 files changed, 76 insertions(+), 27 deletions(-) diff --git a/src/frostfs_testlib/http/http_client.py b/src/frostfs_testlib/http/http_client.py index 3106273..0d1e0bd 100644 --- a/src/frostfs_testlib/http/http_client.py +++ b/src/frostfs_testlib/http/http_client.py @@ -1,6 +1,8 @@ +import io import json import logging import logging.config +from typing import IO import httpx @@ -40,7 +42,7 @@ class HttpClient: client = httpx.Client(timeout=timeout, transport=transport) response = client.request(method, url, **kwargs) - self._attach_response(response) + self._attach_response(response, **kwargs) logger.info(f"Response: {response.status_code} => {response.text}") if expected_status_code: @@ -51,47 +53,91 @@ class HttpClient: return response @classmethod - def _attach_response(cls, response: httpx.Response): - request = response.request - + def _parse_body(cls, readable: httpx.Request | httpx.Response) -> str | None: try: - request_headers = json.dumps(dict(request.headers), indent=4) - except json.JSONDecodeError: - request_headers = str(request.headers) - - try: - request_body = request.read() - try: - request_body = request_body.decode("utf-8") - except UnicodeDecodeError as e: - request_body = f"Unable to decode binary data to text using UTF-8 encoding: {str(e)}" + content = readable.read() except Exception as e: - request_body = f"Error reading request body: {str(e)}" + logger.warning(f"Unable to read file: {str(e)}") + return None - request_body = "" if request_body is None else request_body + if not content: + return None + + request_body = None try: - response_headers = json.dumps(dict(response.headers), indent=4) - except json.JSONDecodeError: - response_headers = str(response.headers) + request_body = json.loads(content) + except (json.JSONDecodeError, UnicodeDecodeError) as e: + logger.warning(f"Unable to convert body to json: {str(e)}") + + if request_body is not None: + return json.dumps(request_body, default=str, indent=4) + + try: + request_body = content.decode() + except UnicodeDecodeError as e: + logger.warning(f"Unable to decode binary data to text using UTF-8 encoding: {str(e)}") + + request_body = content if request_body is None else request_body + request_body = "" if len(request_body) > 1000 else request_body + + return request_body + + @classmethod + def _parse_files(cls, files: dict | None) -> str | None: + if not files: + return None + + filepaths = {} + + for name, file in files.items(): + if isinstance(file, io.IOBase): + filepaths[name] = file.name + + if isinstance(file, tuple): + filepaths[name] = file[1].name + + return json.dumps(filepaths, default=str, indent=4) + + @classmethod + def _attach_response(cls, response: httpx.Response, **kwargs): + request = response.request + request_headers = json.dumps(dict(request.headers), default=str, indent=4) + request_body = cls._parse_body(request) + + files = kwargs.get("files") + request_files = cls._parse_files(files) + + response_headers = json.dumps(dict(response.headers), default=str, indent=4) + response_body = cls._parse_body(response) report = ( f"Method: {request.method}\n\n" - f"URL: {request.url}\n\n" - f"Request Headers: {request_headers}\n\n" - f"Request Body: {request_body}\n\n" - f"Response Status Code: {response.status_code}\n\n" - f"Response Headers: {response_headers}\n\n" - f"Response Body: {response.text}\n\n" + + f"URL: {request.url}\n\n" + + f"Request Headers: {request_headers}\n\n" + + (f"Request Body: {request_body}\n\n" if request_body else "") + + (f"Request Files: {request_files}\n\n" if request_files else "") + + f"Response Status Code: {response.status_code}\n\n" + + f"Response Headers: {response_headers}\n\n" + + (f"Response Body: {response_body}\n\n" if response_body else "") ) - curl_request = cls._create_curl_request(request.url, request.method, request.headers, request_body) + curl_request = cls._create_curl_request(request.url, request.method, request.headers, request_body, files) reporter.attach(report, "Requests Info") reporter.attach(curl_request, "CURL") @classmethod - def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str) -> str: + def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str, files: dict = None) -> str: headers = " ".join(f'-H "{name.title()}: {value}"' for name, value in headers.items()) data = f" -d '{data}'" if data else "" + + if files: + for name, file in files.items(): + if isinstance(file, io.IOBase): + data += f' -F "{name}=@{file.name}"' + + if isinstance(file, tuple): + data += f' -F "{name}=@{file[1].name}"' + # Option -k means no verify SSL return f"curl {url} -X {method} {headers}{data} -k" diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index 32e4346..0f9fef2 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -80,6 +80,9 @@ def log_command_execution(cmd: str, output: Union[str, dict], params: Optional[d if not params: params = {} + if params.get("Body") and len(params.get("Body")) > 1000: + params["Body"] = "" + output_params = params try: From 8ec7e21e8450167d02875b3255ab9140f60facb2 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Tue, 3 Dec 2024 14:55:12 +0300 Subject: [PATCH 319/363] [#331] Fix type hints for service methods Signed-off-by: a.berezin --- src/frostfs_testlib/storage/cluster.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/storage/cluster.py b/src/frostfs_testlib/storage/cluster.py index 3ec4922..b67e34d 100644 --- a/src/frostfs_testlib/storage/cluster.py +++ b/src/frostfs_testlib/storage/cluster.py @@ -91,10 +91,10 @@ class ClusterNode: config_str = yaml.dump(new_config) shell.exec(f"echo '{config_str}' | sudo tee {config_file_path}") - def config(self, service_type: ServiceClass) -> ServiceConfigurationYml: + def config(self, service_type: type[ServiceClass]) -> ServiceConfigurationYml: return self.service(service_type).config - def service(self, service_type: ServiceClass) -> ServiceClass: + def service(self, service_type: type[ServiceClass]) -> ServiceClass: """ Get a service cluster node of specified type. From b3d05c5c28ab6727e3e56bdba7de05e8ed9fb6b1 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 15 Nov 2024 21:01:34 +0300 Subject: [PATCH 320/363] [#326] Automation of PATCH method in GRPC Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/cli/frostfs_cli/object.py | 47 +++++++++++ src/frostfs_testlib/storage/constants.py | 2 + .../storage/dataclasses/ape.py | 1 + .../grpc_operations/implementations/object.py | 83 +++++++++++++++++++ .../storage/grpc_operations/interfaces.py | 32 +++++++ 5 files changed, 165 insertions(+) diff --git a/src/frostfs_testlib/cli/frostfs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py index 1857987..0c00563 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/object.py +++ b/src/frostfs_testlib/cli/frostfs_cli/object.py @@ -276,6 +276,53 @@ class FrostfsCliObject(CliCommand): **{param: value for param, value in locals().items() if param not in ["self"]}, ) + def patch( + self, + rpc_endpoint: str, + cid: str, + oid: str, + range: list[str] = None, + payload: list[str] = None, + new_attrs: Optional[str] = None, + replace_attrs: bool = False, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + session: Optional[str] = None, + timeout: Optional[str] = None, + trace: bool = False, + ttl: Optional[int] = None, + wallet: Optional[str] = None, + xhdr: Optional[dict] = None, + ) -> CommandResult: + """ + PATCH an object. + + Args: + rpc_endpoint: Remote node address (as 'multiaddr' or ':') + cid: Container ID + oid: Object ID + range: An array of ranges in which to replace data in the format [offset1:length1, offset2:length2] + payload: An array of file paths to be applied in each range + new_attrs: Attributes to be changed in the format Key1=Value1,Key2=Value2 + replace_attrs: Replace all attributes completely with new ones specified in new_attrs + address: Address of wallet account + bearer: File with signed JSON or binary encoded bearer token + generate_key: Generate new private key + session: Filepath to a JSON- or binary-encoded token of the object RANGE session + timeout: Timeout for the operation + trace: Generate trace ID and print it + ttl: TTL value in request meta header (default 2) + wallet: WIF (NEP-2) string or path to the wallet or binary key + xhdr: Dict with request X-Headers + Returns: + (str): ID of patched Object + """ + return self._execute( + "object patch", + **{param: value for param, value in locals().items() if param not in ["self"]}, + ) + def range( self, rpc_endpoint: str, diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 2cffd3a..39c6b66 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -23,4 +23,6 @@ class PlacementRule: DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X" SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X" REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X" + REP_1_FOR_2_NODES_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 2 FROM * AS X" DEFAULT_EC_PLACEMENT_RULE = "EC 3.1" + EC_1_1_FOR_2_NODES_PLACEMENT_RULE = "EC 1.1 IN X CBF 1 SELECT 2 FROM * AS X" diff --git a/src/frostfs_testlib/storage/dataclasses/ape.py b/src/frostfs_testlib/storage/dataclasses/ape.py index ef2e1f2..b7b5dfc 100644 --- a/src/frostfs_testlib/storage/dataclasses/ape.py +++ b/src/frostfs_testlib/storage/dataclasses/ape.py @@ -13,6 +13,7 @@ FROSTFS_CONTRACT_CACHE_TIMEOUT = 30 class ObjectOperations(HumanReadableEnum): PUT = "object.put" + PATCH = "object.patch" GET = "object.get" HEAD = "object.head" GET_RANGE = "object.range" diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py index 0e14aec..f31f223 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py @@ -206,6 +206,11 @@ class ObjectOperations(interfaces.ObjectInterface): hash_type=hash_type, timeout=timeout, ) + + if range: + # Cut off the range and return only hash + return result.stdout.split(":")[1].strip() + return result.stdout @reporter.step("Head object") @@ -407,6 +412,57 @@ class ObjectOperations(interfaces.ObjectInterface): oid = id_str.split(":")[1] return oid.strip() + @reporter.step("Patch object") + def patch( + self, + cid: str, + oid: str, + endpoint: str, + ranges: list[str] = None, + payloads: list[str] = None, + new_attrs: Optional[str] = None, + replace_attrs: bool = False, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + trace: bool = False, + ) -> str: + """ + PATCH an object. + + Args: + cid: ID of Container where we get the Object from + oid: Object ID + endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key + ranges: An array of ranges in which to replace data in the format [offset1:length1, offset2:length2] + payloads: An array of file paths to be applied in each range + new_attrs: Attributes to be changed in the format "key1=value1,key2=value2" + replace_attrs: Replace all attributes completely with new ones specified in new_attrs + bearer: Path to Bearer Token file, appends to `--bearer` key + xhdr: Request X-Headers in form of Key=Value + session: Path to a JSON-encoded container session token + timeout: Timeout for the operation + trace: Generate trace ID and print it + Returns: + (str): ID of patched Object + """ + result = self.cli.object.patch( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + range=ranges, + payload=payloads, + new_attrs=new_attrs, + replace_attrs=replace_attrs, + bearer=bearer, + xhdr=xhdr, + session=session, + timeout=timeout, + trace=trace, + ) + return result.stdout.split(":")[1].strip() + @reporter.step("Put object to random node") def put_to_random_node( self, @@ -622,3 +678,30 @@ class ObjectOperations(interfaces.ObjectInterface): ] return object_nodes + + @reporter.step("Search parts of object") + def parts( + self, + cid: str, + oid: str, + alive_node: ClusterNode, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> list[str]: + endpoint = alive_node.storage_node.get_rpc_endpoint() + response = self.cli.object.nodes( + rpc_endpoint=endpoint, + cid=cid, + oid=oid, + bearer=bearer, + ttl=1 if is_direct else None, + json=True, + xhdr=xhdr, + timeout=timeout, + verify_presence_all=verify_presence_all, + ) + response_json = json.loads(response.stdout) + return [data_object["object_id"] for data_object in response_json["data_objects"]] diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces.py b/src/frostfs_testlib/storage/grpc_operations/interfaces.py index c293c2d..07fe52f 100644 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces.py +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces.py @@ -198,6 +198,24 @@ class ObjectInterface(ABC): ) -> str: pass + @abstractmethod + def patch( + self, + cid: str, + oid: str, + endpoint: str, + ranges: Optional[list[str]] = None, + payloads: Optional[list[str]] = None, + new_attrs: Optional[str] = None, + replace_attrs: bool = False, + bearer: Optional[str] = None, + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = None, + trace: bool = False, + ) -> str: + pass + @abstractmethod def put_to_random_node( self, @@ -264,6 +282,20 @@ class ObjectInterface(ABC): ) -> List[ClusterNode]: pass + @abstractmethod + def parts( + self, + cid: str, + oid: str, + alive_node: ClusterNode, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + timeout: Optional[str] = None, + ) -> List[str]: + pass + class ContainerInterface(ABC): @abstractmethod From 61353cb38c723a3d3513de96a4ae7f142ed3c637 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Thu, 5 Dec 2024 14:17:25 +0300 Subject: [PATCH 321/363] [#332] Fix `files` param in http client Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/http/http_client.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/http/http_client.py b/src/frostfs_testlib/http/http_client.py index 0d1e0bd..6008989 100644 --- a/src/frostfs_testlib/http/http_client.py +++ b/src/frostfs_testlib/http/http_client.py @@ -2,7 +2,7 @@ import io import json import logging import logging.config -from typing import IO +from typing import Mapping, Sequence import httpx @@ -84,13 +84,20 @@ class HttpClient: return request_body @classmethod - def _parse_files(cls, files: dict | None) -> str | None: + def _parse_files(cls, files: Mapping | Sequence | None) -> str | None: if not files: return None filepaths = {} - for name, file in files.items(): + if isinstance(files, Sequence): + items = files + elif isinstance(files, Mapping): + items = files.items() + else: + raise TypeError(f"'files' must be either Sequence or Mapping, got: {type(files).__name__}") + + for name, file in items: if isinstance(file, io.IOBase): filepaths[name] = file.name From ee7d9df4a9eddf7da21b66a2070227c0aaa71ad2 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Thu, 5 Dec 2024 16:34:36 +0300 Subject: [PATCH 322/363] [#333] Fix `files` param in http client part two Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/http/http_client.py | 26 ++++++++++--------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/src/frostfs_testlib/http/http_client.py b/src/frostfs_testlib/http/http_client.py index 6008989..a3e3e54 100644 --- a/src/frostfs_testlib/http/http_client.py +++ b/src/frostfs_testlib/http/http_client.py @@ -84,12 +84,12 @@ class HttpClient: return request_body @classmethod - def _parse_files(cls, files: Mapping | Sequence | None) -> str | None: - if not files: - return None - + def _parse_files(cls, files: Mapping | Sequence | None) -> dict: filepaths = {} + if not files: + return filepaths + if isinstance(files, Sequence): items = files elif isinstance(files, Mapping): @@ -100,11 +100,10 @@ class HttpClient: for name, file in items: if isinstance(file, io.IOBase): filepaths[name] = file.name - - if isinstance(file, tuple): + elif isinstance(file, Sequence): filepaths[name] = file[1].name - return json.dumps(filepaths, default=str, indent=4) + return filepaths @classmethod def _attach_response(cls, response: httpx.Response, **kwargs): @@ -128,23 +127,18 @@ class HttpClient: + f"Response Headers: {response_headers}\n\n" + (f"Response Body: {response_body}\n\n" if response_body else "") ) - curl_request = cls._create_curl_request(request.url, request.method, request.headers, request_body, files) + curl_request = cls._create_curl_request(request.url, request.method, request.headers, request_body, request_files) reporter.attach(report, "Requests Info") reporter.attach(curl_request, "CURL") @classmethod - def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str, files: dict = None) -> str: + def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str, files: dict) -> str: headers = " ".join(f'-H "{name.title()}: {value}"' for name, value in headers.items()) data = f" -d '{data}'" if data else "" - if files: - for name, file in files.items(): - if isinstance(file, io.IOBase): - data += f' -F "{name}=@{file.name}"' - - if isinstance(file, tuple): - data += f' -F "{name}=@{file[1].name}"' + for name, path in files.items(): + data += f' -F "{name}=@{path}"' # Option -k means no verify SSL return f"curl {url} -X {method} {headers}{data} -k" From 0ebb8453290b26f85bce7091dd6ea307df5f0d9a Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Fri, 6 Dec 2024 10:50:34 +0300 Subject: [PATCH 323/363] [#335] Fixed iam boto3 client --- src/frostfs_testlib/s3/boto3_client.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index e7f2c35..c680f17 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -86,6 +86,7 @@ class Boto3ClientWrapper(S3ClientWrapper): service_name="iam", aws_access_key_id=self.access_key_id, aws_secret_access_key=self.secret_access_key, + region_name=self.region, endpoint_url=self.iam_endpoint, verify=False, ) From 8ff1e72499f49054b7cf0d8fd05f87b040e5d32f Mon Sep 17 00:00:00 2001 From: Ekaterina Chernitsyna Date: Fri, 13 Dec 2024 10:45:14 +0300 Subject: [PATCH 324/363] [#337] Add rule chain error Signed-off-by: Ekaterina Chernitsyna --- src/frostfs_testlib/resources/error_patterns.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/frostfs_testlib/resources/error_patterns.py b/src/frostfs_testlib/resources/error_patterns.py index 9b5e8e4..4c22648 100644 --- a/src/frostfs_testlib/resources/error_patterns.py +++ b/src/frostfs_testlib/resources/error_patterns.py @@ -9,6 +9,7 @@ OBJECT_ALREADY_REMOVED = "code = 2052.*message = object already removed" SESSION_NOT_FOUND = "code = 4096.*message = session token not found" OUT_OF_RANGE = "code = 2053.*message = out of range" EXPIRED_SESSION_TOKEN = "code = 4097.*message = expired session token" +ADD_CHAIN_ERROR = "code = 5120 message = apemanager access denied" # TODO: Change to codes with message # OBJECT_IS_LOCKED = "code = 2050.*message = object is locked" # LOCK_NON_REGULAR_OBJECT = "code = 2051.*message = ..." will be available once 2092 is fixed From cd15be3b7c41448280217aac741f2fc1efefac95 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 15 Nov 2024 21:03:21 +0300 Subject: [PATCH 325/363] [#334] Automation of PATCH method in S3 Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/http/http_client.py | 6 +- src/frostfs_testlib/s3/aws_cli_client.py | 7 +- src/frostfs_testlib/s3/boto3_client.py | 7 +- src/frostfs_testlib/s3/interfaces.py | 4 +- src/frostfs_testlib/s3/s3_http_client.py | 127 ++++++++++++++++++++++ src/frostfs_testlib/steps/s3/s3_helper.py | 24 ++++ 6 files changed, 162 insertions(+), 13 deletions(-) create mode 100644 src/frostfs_testlib/s3/s3_http_client.py diff --git a/src/frostfs_testlib/http/http_client.py b/src/frostfs_testlib/http/http_client.py index a3e3e54..c3e5fae 100644 --- a/src/frostfs_testlib/http/http_client.py +++ b/src/frostfs_testlib/http/http_client.py @@ -46,9 +46,9 @@ class HttpClient: logger.info(f"Response: {response.status_code} => {response.text}") if expected_status_code: - assert response.status_code == expected_status_code, ( - f"Got {response.status_code} response code" f" while {expected_status_code} expected" - ) + assert ( + response.status_code == expected_status_code + ), f"Got {response.status_code} response code while {expected_status_code} expected" return response diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/s3/aws_cli_client.py index 2ac6d68..4196c77 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/s3/aws_cli_client.py @@ -171,7 +171,7 @@ class AwsCliClient(S3ClientWrapper): return response.get("TagSet") @reporter.step("Get bucket acl") - def get_bucket_acl(self, bucket: str) -> list: + def get_bucket_acl(self, bucket: str) -> dict: if bucket.startswith("-") or " " in bucket: bucket = f'"{bucket}"' @@ -179,8 +179,7 @@ class AwsCliClient(S3ClientWrapper): f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}" ) output = self.local_shell.exec(cmd).stdout - response = self._to_json(output) - return response.get("Grants") + return self._to_json(output) @reporter.step("Get bucket location") def get_bucket_location(self, bucket: str) -> dict: @@ -861,7 +860,7 @@ class AwsCliClient(S3ClientWrapper): return response["Parts"] @reporter.step("Complete multipart upload S3") - def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: + def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict: if bucket.startswith("-") or " " in bucket: bucket = f'"{bucket}"' diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/s3/boto3_client.py index c680f17..6b6c74e 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/s3/boto3_client.py @@ -230,14 +230,13 @@ class Boto3ClientWrapper(S3ClientWrapper): return response.get("TagSet") @reporter.step("Get bucket acl") - def get_bucket_acl(self, bucket: str) -> list: - response = self._exec_request( + def get_bucket_acl(self, bucket: str) -> dict: + return self._exec_request( self.boto3_client.get_bucket_acl, params={"Bucket": bucket}, endpoint=self.s3gate_endpoint, profile=self.profile, ) - return response.get("Grants") @reporter.step("Delete bucket tagging") def delete_bucket_tagging(self, bucket: str) -> None: @@ -705,7 +704,7 @@ class Boto3ClientWrapper(S3ClientWrapper): return response["Parts"] @reporter.step("Complete multipart upload S3") - def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: + def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict: parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts] params = self._convert_to_s3_params(locals(), exclude=["parts"]) params["MultipartUpload"] = {"Parts": parts} diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/s3/interfaces.py index c3d99eb..7ce9f31 100644 --- a/src/frostfs_testlib/s3/interfaces.py +++ b/src/frostfs_testlib/s3/interfaces.py @@ -128,7 +128,7 @@ class S3ClientWrapper(HumanReadableABC): """Deletes the tags from the bucket.""" @abstractmethod - def get_bucket_acl(self, bucket: str) -> list: + def get_bucket_acl(self, bucket: str) -> dict: """This implementation of the GET action uses the acl subresource to return the access control list (ACL) of a bucket.""" @abstractmethod @@ -336,7 +336,7 @@ class S3ClientWrapper(HumanReadableABC): """Lists the parts that have been uploaded for a specific multipart upload.""" @abstractmethod - def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None: + def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict: """Completes a multipart upload by assembling previously uploaded parts.""" @abstractmethod diff --git a/src/frostfs_testlib/s3/s3_http_client.py b/src/frostfs_testlib/s3/s3_http_client.py new file mode 100644 index 0000000..a34c380 --- /dev/null +++ b/src/frostfs_testlib/s3/s3_http_client.py @@ -0,0 +1,127 @@ +import hashlib +import logging +import xml.etree.ElementTree as ET + +import httpx +from botocore.auth import SigV4Auth +from botocore.awsrequest import AWSRequest +from botocore.credentials import Credentials + +from frostfs_testlib import reporter +from frostfs_testlib.http.http_client import HttpClient +from frostfs_testlib.utils.file_utils import TestFile + +logger = logging.getLogger("NeoLogger") + +DEFAULT_TIMEOUT = 60.0 + + +class S3HttpClient: + def __init__( + self, s3gate_endpoint: str, access_key_id: str, secret_access_key: str, profile: str = "default", region: str = "us-east-1" + ) -> None: + self.http_client = HttpClient() + self.s3gate_endpoint = s3gate_endpoint + self.credentials = Credentials(access_key_id, secret_access_key) + self.profile = profile + self.region = region + self.service = "s3" + self.signature = SigV4Auth(self.credentials, self.service, self.region) + + def _to_s3_header(self, header: str) -> dict: + replacement_map = { + "Acl": "ACL", + "_": "-", + } + + result = header + if not header.startswith("x_amz"): + result = header.title() + + for find, replace in replacement_map.items(): + result = result.replace(find, replace) + + return result + + def _convert_to_s3_headers(self, scope: dict, exclude: list[str] = None): + exclude = ["self", "cls"] if not exclude else exclude + ["self", "cls"] + return {self._to_s3_header(header): value for header, value in scope.items() if header not in exclude and value is not None} + + def _create_aws_request( + self, method: str, url: str, headers: dict, content: str | bytes | TestFile = None, params: dict = None + ) -> AWSRequest: + data = b"" + + if content is not None: + if isinstance(content, TestFile): + with open(content, "rb") as io_content: + data = io_content.read() + elif isinstance(content, str): + data = bytes(content, encoding="utf-8") + elif isinstance(content, bytes): + data = content + else: + raise TypeError(f"Content expected as a string, bytes or TestFile object, got: {content}") + + headers["X-Amz-Content-SHA256"] = hashlib.sha256(data).hexdigest() + aws_request = AWSRequest(method, url, headers, data, params) + self.signature.add_auth(aws_request) + + return aws_request + + def _exec_request( + self, + method: str, + url: str, + headers: dict, + content: str | bytes | TestFile = None, + params: dict = None, + timeout: float = DEFAULT_TIMEOUT, + ) -> dict: + aws_request = self._create_aws_request(method, url, headers, content, params) + response = self.http_client.send( + aws_request.method, + aws_request.url, + headers=dict(aws_request.headers), + data=aws_request.data, + params=aws_request.params, + timeout=timeout, + ) + + try: + response.raise_for_status() + except httpx.HTTPStatusError: + raise httpx.HTTPStatusError(response.text, request=response.request, response=response) + + root = ET.fromstring(response.read()) + data = { + "LastModified": root.find(".//LastModified").text, + "ETag": root.find(".//ETag").text, + } + + if response.headers.get("x-amz-version-id"): + data["VersionId"] = response.headers.get("x-amz-version-id") + + return data + + @reporter.step("Patch object S3") + def patch_object( + self, + bucket: str, + key: str, + content: str | bytes | TestFile, + content_range: str, + version_id: str = None, + if_match: str = None, + if_unmodified_since: str = None, + x_amz_expected_bucket_owner: str = None, + timeout: float = DEFAULT_TIMEOUT, + ) -> dict: + if content_range and not content_range.startswith("bytes"): + content_range = f"bytes {content_range}/*" + + url = f"{self.s3gate_endpoint}/{bucket}/{key}" + headers = self._convert_to_s3_headers(locals(), exclude=["bucket", "key", "content", "version_id", "timeout"]) + params = {"VersionId": version_id} if version_id is not None else None + + return self._exec_request("PATCH", url, headers, content, params, timeout=timeout) diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3/s3_helper.py index dbf48d3..7949f2d 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3/s3_helper.py @@ -12,6 +12,7 @@ from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.container import search_nodes_with_container from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.dataclasses.wallet import WalletInfo +from frostfs_testlib.utils.file_utils import TestFile, get_file_hash logger = logging.getLogger("NeoLogger") @@ -185,3 +186,26 @@ def search_nodes_with_bucket( break nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster) return nodes_list + + +def get_bytes_relative_to_object(value: int | str, object_size: int = None, part_size: int = None) -> int: + if isinstance(value, int): + return value + + if "part" not in value and "object" not in value: + return int(value) + + if object_size is not None: + value = value.replace("object", str(object_size)) + + if part_size is not None: + value = value.replace("part", str(part_size)) + + return int(eval(value)) + + +def get_range_relative_to_object(rng: str, object_size: int = None, part_size: int = None, int_values: bool = False) -> str | int: + start, end = rng.split(":") + start = get_bytes_relative_to_object(start, object_size, part_size) + end = get_bytes_relative_to_object(end, object_size, part_size) + return (start, end) if int_values else f"bytes {start}-{end}/*" From cc7bd4ffc9dd59115144bdd4cf81ff07ffe8b372 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 17 Dec 2024 13:55:15 +0300 Subject: [PATCH 326/363] [#339] Added ns args for func container create Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/steps/cli/container.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index 809b39a..db896ce 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -111,6 +111,8 @@ def create_container( options: Optional[dict] = None, await_mode: bool = True, wait_for_creation: bool = True, + nns_zone: Optional[str] = None, + nns_name: Optional[str] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, ) -> str: """ @@ -143,6 +145,8 @@ def create_container( result = cli.container.create( rpc_endpoint=endpoint, policy=rule, + nns_name=nns_name, + nns_zone=nns_zone, basic_acl=basic_acl, attributes=attributes, name=name, From 335eed85b152e2e8ac147bc95cc2af88beaad7ff Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Mon, 16 Dec 2024 22:06:00 +0300 Subject: [PATCH 327/363] [#338] Added parameter word_count to method get_logs Signed-off-by: Ilyas Niyazov --- src/frostfs_testlib/hosting/docker_host.py | 1 + src/frostfs_testlib/hosting/interfaces.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/src/frostfs_testlib/hosting/docker_host.py b/src/frostfs_testlib/hosting/docker_host.py index 01dc6b5..d458b0a 100644 --- a/src/frostfs_testlib/hosting/docker_host.py +++ b/src/frostfs_testlib/hosting/docker_host.py @@ -250,6 +250,7 @@ class DockerHost(Host): unit: Optional[str] = None, exclude_filter: Optional[str] = None, priority: Optional[str] = None, + word_count: bool = None, ) -> str: client = self._get_docker_client() filtered_logs = "" diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index 6d1e5da..f58d856 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -324,6 +324,7 @@ class Host(ABC): unit: Optional[str] = None, exclude_filter: Optional[str] = None, priority: Optional[str] = None, + word_count: bool = None, ) -> str: """Get logs from host filtered by regex. @@ -334,6 +335,7 @@ class Host(ABC): unit: required unit. priority: logs level, 0 - emergency, 7 - debug. All messages with that code and higher. For example, if we specify the -p 2 option, journalctl will show all messages with levels 2, 1 and 0. + word_count: output type, expected values: lines, bytes, json Returns: Found entries as str if any found. From dc5a9e7bb9336a9b331c119a09615e68f4703d01 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Tue, 17 Dec 2024 18:16:54 +0300 Subject: [PATCH 328/363] [#340] Move s3 and http directories to avoid conflict with requests Signed-off-by: Kirill Sosnovskikh --- pyproject.toml | 2 +- src/frostfs_testlib/clients/__init__.py | 5 +++++ src/frostfs_testlib/{ => clients}/http/__init__.py | 0 src/frostfs_testlib/{ => clients}/http/http_client.py | 0 src/frostfs_testlib/clients/s3/__init__.py | 1 + src/frostfs_testlib/{ => clients}/s3/aws_cli_client.py | 2 +- src/frostfs_testlib/{ => clients}/s3/boto3_client.py | 2 +- src/frostfs_testlib/{ => clients}/s3/curl_bucket_resolver.py | 2 +- src/frostfs_testlib/{ => clients}/s3/interfaces.py | 0 src/frostfs_testlib/{ => clients}/s3/s3_http_client.py | 2 +- src/frostfs_testlib/s3/__init__.py | 3 --- src/frostfs_testlib/steps/cli/container.py | 2 -- src/frostfs_testlib/steps/http/__init__.py | 0 src/frostfs_testlib/steps/{http => }/http_gate.py | 2 +- src/frostfs_testlib/steps/{s3 => }/s3_helper.py | 4 +--- .../storage/grpc_operations/implementations/container.py | 2 +- tests/test_dataclasses.py | 2 +- 17 files changed, 15 insertions(+), 16 deletions(-) create mode 100644 src/frostfs_testlib/clients/__init__.py rename src/frostfs_testlib/{ => clients}/http/__init__.py (100%) rename src/frostfs_testlib/{ => clients}/http/http_client.py (100%) create mode 100644 src/frostfs_testlib/clients/s3/__init__.py rename src/frostfs_testlib/{ => clients}/s3/aws_cli_client.py (99%) rename src/frostfs_testlib/{ => clients}/s3/boto3_client.py (99%) rename src/frostfs_testlib/{ => clients}/s3/curl_bucket_resolver.py (88%) rename src/frostfs_testlib/{ => clients}/s3/interfaces.py (100%) rename src/frostfs_testlib/{ => clients}/s3/s3_http_client.py (98%) delete mode 100644 src/frostfs_testlib/s3/__init__.py delete mode 100644 src/frostfs_testlib/steps/http/__init__.py rename src/frostfs_testlib/steps/{http => }/http_gate.py (99%) rename src/frostfs_testlib/steps/{s3 => }/s3_helper.py (97%) diff --git a/pyproject.toml b/pyproject.toml index 3faa637..2778f8a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -62,7 +62,7 @@ authmate = "frostfs_testlib.credentials.authmate_s3_provider:AuthmateS3Credentia wallet_factory = "frostfs_testlib.credentials.wallet_factory_provider:WalletFactoryProvider" [project.entry-points."frostfs.testlib.bucket_cid_resolver"] -frostfs = "frostfs_testlib.s3.curl_bucket_resolver:CurlBucketContainerResolver" +frostfs = "frostfs_testlib.clients.s3.curl_bucket_resolver:CurlBucketContainerResolver" [tool.isort] profile = "black" diff --git a/src/frostfs_testlib/clients/__init__.py b/src/frostfs_testlib/clients/__init__.py new file mode 100644 index 0000000..e46766b --- /dev/null +++ b/src/frostfs_testlib/clients/__init__.py @@ -0,0 +1,5 @@ +from frostfs_testlib.clients.http.http_client import HttpClient +from frostfs_testlib.clients.s3.aws_cli_client import AwsCliClient +from frostfs_testlib.clients.s3.boto3_client import Boto3ClientWrapper +from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper +from frostfs_testlib.clients.s3.s3_http_client import S3HttpClient diff --git a/src/frostfs_testlib/http/__init__.py b/src/frostfs_testlib/clients/http/__init__.py similarity index 100% rename from src/frostfs_testlib/http/__init__.py rename to src/frostfs_testlib/clients/http/__init__.py diff --git a/src/frostfs_testlib/http/http_client.py b/src/frostfs_testlib/clients/http/http_client.py similarity index 100% rename from src/frostfs_testlib/http/http_client.py rename to src/frostfs_testlib/clients/http/http_client.py diff --git a/src/frostfs_testlib/clients/s3/__init__.py b/src/frostfs_testlib/clients/s3/__init__.py new file mode 100644 index 0000000..65a3990 --- /dev/null +++ b/src/frostfs_testlib/clients/s3/__init__.py @@ -0,0 +1 @@ +from frostfs_testlib.clients.s3.interfaces import BucketContainerResolver, S3ClientWrapper, VersioningStatus diff --git a/src/frostfs_testlib/s3/aws_cli_client.py b/src/frostfs_testlib/clients/s3/aws_cli_client.py similarity index 99% rename from src/frostfs_testlib/s3/aws_cli_client.py rename to src/frostfs_testlib/clients/s3/aws_cli_client.py index 4196c77..3496b2b 100644 --- a/src/frostfs_testlib/s3/aws_cli_client.py +++ b/src/frostfs_testlib/clients/s3/aws_cli_client.py @@ -6,8 +6,8 @@ from time import sleep from typing import Literal, Optional, Union from frostfs_testlib import reporter +from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME -from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.shell import CommandOptions from frostfs_testlib.shell.local_shell import LocalShell from frostfs_testlib.utils import string_utils diff --git a/src/frostfs_testlib/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py similarity index 99% rename from src/frostfs_testlib/s3/boto3_client.py rename to src/frostfs_testlib/clients/s3/boto3_client.py index 6b6c74e..53e7ffa 100644 --- a/src/frostfs_testlib/s3/boto3_client.py +++ b/src/frostfs_testlib/clients/s3/boto3_client.py @@ -13,8 +13,8 @@ from botocore.exceptions import ClientError from mypy_boto3_s3 import S3Client from frostfs_testlib import reporter +from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME -from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict from frostfs_testlib.utils import string_utils # TODO: Refactor this code to use shell instead of _cmd_run diff --git a/src/frostfs_testlib/s3/curl_bucket_resolver.py b/src/frostfs_testlib/clients/s3/curl_bucket_resolver.py similarity index 88% rename from src/frostfs_testlib/s3/curl_bucket_resolver.py rename to src/frostfs_testlib/clients/s3/curl_bucket_resolver.py index b713e79..4d845cf 100644 --- a/src/frostfs_testlib/s3/curl_bucket_resolver.py +++ b/src/frostfs_testlib/clients/s3/curl_bucket_resolver.py @@ -1,7 +1,7 @@ import re from frostfs_testlib.cli.generic_cli import GenericCli -from frostfs_testlib.s3.interfaces import BucketContainerResolver +from frostfs_testlib.clients.s3 import BucketContainerResolver from frostfs_testlib.storage.cluster import ClusterNode diff --git a/src/frostfs_testlib/s3/interfaces.py b/src/frostfs_testlib/clients/s3/interfaces.py similarity index 100% rename from src/frostfs_testlib/s3/interfaces.py rename to src/frostfs_testlib/clients/s3/interfaces.py diff --git a/src/frostfs_testlib/s3/s3_http_client.py b/src/frostfs_testlib/clients/s3/s3_http_client.py similarity index 98% rename from src/frostfs_testlib/s3/s3_http_client.py rename to src/frostfs_testlib/clients/s3/s3_http_client.py index a34c380..b83e7a8 100644 --- a/src/frostfs_testlib/s3/s3_http_client.py +++ b/src/frostfs_testlib/clients/s3/s3_http_client.py @@ -8,7 +8,7 @@ from botocore.awsrequest import AWSRequest from botocore.credentials import Credentials from frostfs_testlib import reporter -from frostfs_testlib.http.http_client import HttpClient +from frostfs_testlib.clients import HttpClient from frostfs_testlib.utils.file_utils import TestFile logger = logging.getLogger("NeoLogger") diff --git a/src/frostfs_testlib/s3/__init__.py b/src/frostfs_testlib/s3/__init__.py deleted file mode 100644 index 32426c2..0000000 --- a/src/frostfs_testlib/s3/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from frostfs_testlib.s3.aws_cli_client import AwsCliClient -from frostfs_testlib.s3.boto3_client import Boto3ClientWrapper -from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus diff --git a/src/frostfs_testlib/steps/cli/container.py b/src/frostfs_testlib/steps/cli/container.py index db896ce..092b1a3 100644 --- a/src/frostfs_testlib/steps/cli/container.py +++ b/src/frostfs_testlib/steps/cli/container.py @@ -7,9 +7,7 @@ from typing import Optional, Union from frostfs_testlib import reporter from frostfs_testlib.cli import FrostfsCli -from frostfs_testlib.plugins import load_plugin from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC -from frostfs_testlib.s3.interfaces import BucketContainerResolver from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.object import put_object, put_object_to_random_node from frostfs_testlib.storage.cluster import Cluster, ClusterNode diff --git a/src/frostfs_testlib/steps/http/__init__.py b/src/frostfs_testlib/steps/http/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/frostfs_testlib/steps/http/http_gate.py b/src/frostfs_testlib/steps/http_gate.py similarity index 99% rename from src/frostfs_testlib/steps/http/http_gate.py rename to src/frostfs_testlib/steps/http_gate.py index 117cded..4e712c1 100644 --- a/src/frostfs_testlib/steps/http/http_gate.py +++ b/src/frostfs_testlib/steps/http_gate.py @@ -12,8 +12,8 @@ import requests from frostfs_testlib import reporter from frostfs_testlib.cli import GenericCli +from frostfs_testlib.clients.s3.aws_cli_client import command_options from frostfs_testlib.resources.common import ASSETS_DIR, SIMPLE_OBJECT_SIZE -from frostfs_testlib.s3.aws_cli_client import command_options from frostfs_testlib.shell import Shell from frostfs_testlib.shell.local_shell import LocalShell from frostfs_testlib.steps.cli.object import get_object diff --git a/src/frostfs_testlib/steps/s3/s3_helper.py b/src/frostfs_testlib/steps/s3_helper.py similarity index 97% rename from src/frostfs_testlib/steps/s3/s3_helper.py rename to src/frostfs_testlib/steps/s3_helper.py index 7949f2d..c3092df 100644 --- a/src/frostfs_testlib/steps/s3/s3_helper.py +++ b/src/frostfs_testlib/steps/s3_helper.py @@ -6,13 +6,11 @@ from typing import Optional from dateutil.parser import parse from frostfs_testlib import reporter -from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus -from frostfs_testlib.s3.interfaces import BucketContainerResolver +from frostfs_testlib.clients.s3 import BucketContainerResolver, S3ClientWrapper, VersioningStatus from frostfs_testlib.shell import Shell from frostfs_testlib.steps.cli.container import search_nodes_with_container from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.dataclasses.wallet import WalletInfo -from frostfs_testlib.utils.file_utils import TestFile, get_file_hash logger = logging.getLogger("NeoLogger") diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py index 7a637d7..86cac26 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py @@ -5,9 +5,9 @@ from typing import List, Optional, Union from frostfs_testlib import reporter from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.clients.s3 import BucketContainerResolver from frostfs_testlib.plugins import load_plugin from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT -from frostfs_testlib.s3.interfaces import BucketContainerResolver from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.grpc_operations import interfaces from frostfs_testlib.utils import json_utils diff --git a/tests/test_dataclasses.py b/tests/test_dataclasses.py index 19f3832..677aed4 100644 --- a/tests/test_dataclasses.py +++ b/tests/test_dataclasses.py @@ -2,7 +2,7 @@ from typing import Any import pytest -from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper +from frostfs_testlib.clients import AwsCliClient, Boto3ClientWrapper from frostfs_testlib.storage.dataclasses.acl import EACLRole from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode from frostfs_testlib.storage.dataclasses.object_size import ObjectSize From 0479701258ba115fce9ee3e91783b112b473a4ca Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Wed, 18 Dec 2024 17:35:14 +0300 Subject: [PATCH 329/363] [#341] Add test for multipart object in Test_http_object testsuite Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/steps/http_gate.py | 48 ++++++++++---------------- 1 file changed, 19 insertions(+), 29 deletions(-) diff --git a/src/frostfs_testlib/steps/http_gate.py b/src/frostfs_testlib/steps/http_gate.py index 4e712c1..51b0301 100644 --- a/src/frostfs_testlib/steps/http_gate.py +++ b/src/frostfs_testlib/steps/http_gate.py @@ -38,34 +38,34 @@ def get_via_http_gate( """ This function gets given object from HTTP gate cid: container id to get object from - oid: object ID + oid: object id / object key node: node to make request request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}] """ - # if `request_path` parameter omitted, use default - if request_path is None: - request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}" - else: + request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}" + if request_path: request = f"{node.http_gate.get_endpoint()}{request_path}" - resp = requests.get(request, stream=True, timeout=timeout, verify=False) + response = requests.get(request, stream=True, timeout=timeout, verify=False) - if not resp.ok: + if not response.ok: raise Exception( f"""Failed to get object via HTTP gate: - request: {resp.request.path_url}, - response: {resp.text}, - headers: {resp.headers}, - status code: {resp.status_code} {resp.reason}""" + request: {response.request.path_url}, + response: {response.text}, + headers: {response.headers}, + status code: {response.status_code} {response.reason}""" ) logger.info(f"Request: {request}") - _attach_allure_step(request, resp.status_code) + _attach_allure_step(request, response.status_code) test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}")) with open(test_file, "wb") as file: - shutil.copyfileobj(resp.raw, file) + for chunk in response.iter_content(chunk_size=8192): + file.write(chunk) + return test_file @@ -117,12 +117,12 @@ def get_via_http_gate_by_attribute( endpoint: http gate endpoint request_path: (optional) http request path, if ommited - use default [{endpoint}/get_by_attribute/{Key}/{Value}] """ + attr_name = list(attribute.keys())[0] attr_value = quote_plus(str(attribute.get(attr_name))) - # if `request_path` parameter ommited, use default - if request_path is None: - request = f"{node.http_gate.get_endpoint()}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}" - else: + + request = f"{node.http_gate.get_endpoint()}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}" + if request_path: request = f"{node.http_gate.get_endpoint()}{request_path}" resp = requests.get(request, stream=True, timeout=timeout, verify=False) @@ -357,19 +357,9 @@ def try_to_get_object_via_passed_request_and_expect_error( ) -> None: try: if attrs is None: - get_via_http_gate( - cid=cid, - oid=oid, - node=node, - request_path=http_request_path, - ) + get_via_http_gate(cid, oid, node, http_request_path) else: - get_via_http_gate_by_attribute( - cid=cid, - attribute=attrs, - node=node, - request_path=http_request_path, - ) + get_via_http_gate_by_attribute(cid, attrs, node, http_request_path) raise AssertionError(f"Expected error on getting object with cid: {cid}") except Exception as err: match = error_pattern.casefold() in str(err).casefold() From 6e951443edbb822e5cc7ac5a4b32b341cb114634 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Tue, 24 Dec 2024 11:16:38 +0300 Subject: [PATCH 330/363] [#342] Remove try-catch from delete block Signed-off-by: a.berezin --- .../implementations/container.py | 25 ++++++++----------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py index 86cac26..75af00c 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py @@ -181,20 +181,17 @@ class ContainerOperations(interfaces.ContainerInterface): force: bool = False, trace: bool = False, ): - try: - return self.cli.container.delete( - rpc_endpoint=endpoint, - cid=cid, - address=address, - await_mode=await_mode, - session=session, - ttl=ttl, - xhdr=xhdr, - force=force, - trace=trace, - ).stdout - except RuntimeError as e: - print(f"Error request:\n{e}") + return self.cli.container.delete( + rpc_endpoint=endpoint, + cid=cid, + address=address, + await_mode=await_mode, + session=session, + ttl=ttl, + xhdr=xhdr, + force=force, + trace=trace, + ).stdout @reporter.step("Get container") def get( From 9e3380d519be5f59279e5530b1e0a84a89286bb8 Mon Sep 17 00:00:00 2001 From: Vitaliy Potyarkin Date: Tue, 10 Dec 2024 15:42:13 +0300 Subject: [PATCH 331/363] [#336] Refine CODEOWNERS settings Signed-off-by: Vitaliy Potyarkin --- CODEOWNERS | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CODEOWNERS b/CODEOWNERS index 4a621d3..519ca42 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1 +1,3 @@ -* @JuliaKovshova @abereziny @d.zayakin @anikeev-yadro @anurindm @ylukoyan @i.niyazov +.* @TrueCloudLab/qa-committers +.forgejo/.* @potyarkin +Makefile @potyarkin From 0a3de927a2cf2c89c7d29f633083ef079f773cbc Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Tue, 10 Dec 2024 11:47:25 +0300 Subject: [PATCH 332/363] [#343] Extend testsuites for PATCH method Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/cli/frostfs_cli/object.py | 3 +- .../clients/s3/s3_http_client.py | 28 +++++++++++++++++-- .../storage/dataclasses/ape.py | 1 + 3 files changed, 28 insertions(+), 4 deletions(-) diff --git a/src/frostfs_testlib/cli/frostfs_cli/object.py b/src/frostfs_testlib/cli/frostfs_cli/object.py index 0c00563..e536544 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/object.py +++ b/src/frostfs_testlib/cli/frostfs_cli/object.py @@ -315,8 +315,9 @@ class FrostfsCliObject(CliCommand): ttl: TTL value in request meta header (default 2) wallet: WIF (NEP-2) string or path to the wallet or binary key xhdr: Dict with request X-Headers + Returns: - (str): ID of patched Object + Command's result. """ return self._execute( "object patch", diff --git a/src/frostfs_testlib/clients/s3/s3_http_client.py b/src/frostfs_testlib/clients/s3/s3_http_client.py index b83e7a8..f6f423d 100644 --- a/src/frostfs_testlib/clients/s3/s3_http_client.py +++ b/src/frostfs_testlib/clients/s3/s3_http_client.py @@ -21,12 +21,16 @@ class S3HttpClient: self, s3gate_endpoint: str, access_key_id: str, secret_access_key: str, profile: str = "default", region: str = "us-east-1" ) -> None: self.http_client = HttpClient() - self.s3gate_endpoint = s3gate_endpoint self.credentials = Credentials(access_key_id, secret_access_key) self.profile = profile self.region = region - self.service = "s3" - self.signature = SigV4Auth(self.credentials, self.service, self.region) + + self.iam_endpoint: str = None + self.s3gate_endpoint: str = None + self.service: str = None + self.signature: SigV4Auth = None + + self.set_endpoint(s3gate_endpoint) def _to_s3_header(self, header: str) -> dict: replacement_map = { @@ -104,6 +108,24 @@ class S3HttpClient: return data + @reporter.step("Set endpoint S3 to {s3gate_endpoint}") + def set_endpoint(self, s3gate_endpoint: str): + if self.s3gate_endpoint == s3gate_endpoint: + return + + self.s3gate_endpoint = s3gate_endpoint + self.service = "s3" + self.signature = SigV4Auth(self.credentials, self.service, self.region) + + @reporter.step("Set endpoint IAM to {iam_endpoint}") + def set_iam_endpoint(self, iam_endpoint: str): + if self.iam_endpoint == iam_endpoint: + return + + self.iam_endpoint = iam_endpoint + self.service = "iam" + self.signature = SigV4Auth(self.credentials, self.service, self.region) + @reporter.step("Patch object S3") def patch_object( self, diff --git a/src/frostfs_testlib/storage/dataclasses/ape.py b/src/frostfs_testlib/storage/dataclasses/ape.py index b7b5dfc..1199435 100644 --- a/src/frostfs_testlib/storage/dataclasses/ape.py +++ b/src/frostfs_testlib/storage/dataclasses/ape.py @@ -52,6 +52,7 @@ class Operations: SEARCH_OBJECT = "SearchObject" HEAD_OBJECT = "HeadObject" PUT_OBJECT = "PutObject" + PATCH_OBJECT = "PatchObject" class Verb(HumanReadableEnum): From 6fe7fef44b100b976c5a72aad76477a277975b05 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Wed, 25 Dec 2024 19:25:14 +0300 Subject: [PATCH 333/363] [#344] Update ifaces Signed-off-by: a.berezin --- src/frostfs_testlib/cli/netmap_parser.py | 4 ++-- src/frostfs_testlib/steps/cli/object.py | 6 +++++- .../storage/controllers/cluster_state_controller.py | 8 +++++--- .../storage/grpc_operations/implementations/chunks.py | 6 +++--- .../storage/grpc_operations/implementations/object.py | 3 ++- 5 files changed, 17 insertions(+), 10 deletions(-) diff --git a/src/frostfs_testlib/cli/netmap_parser.py b/src/frostfs_testlib/cli/netmap_parser.py index 23ac4da..db6f55f 100644 --- a/src/frostfs_testlib/cli/netmap_parser.py +++ b/src/frostfs_testlib/cli/netmap_parser.py @@ -1,7 +1,7 @@ import re from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo, NodeStatus +from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeNetInfo, NodeNetmapInfo, NodeStatus class NetmapParser: @@ -85,7 +85,7 @@ class NetmapParser: @staticmethod def snapshot_one_node(output: str, cluster_node: ClusterNode) -> NodeNetmapInfo | None: snapshot_nodes = NetmapParser.snapshot_all_nodes(output=output) - snapshot_node = [node for node in snapshot_nodes if node.node == cluster_node.host_ip] + snapshot_node = [node for node in snapshot_nodes if node.node == cluster_node.get_interface(Interfaces.MGMT)] if not snapshot_node: return None return snapshot_node[0] diff --git a/src/frostfs_testlib/steps/cli/object.py b/src/frostfs_testlib/steps/cli/object.py index f28de06..7f8391d 100644 --- a/src/frostfs_testlib/steps/cli/object.py +++ b/src/frostfs_testlib/steps/cli/object.py @@ -12,6 +12,7 @@ from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.shell import Shell from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing import wait_for_success from frostfs_testlib.utils import json_utils @@ -752,7 +753,10 @@ def get_object_nodes( ] object_nodes = [ - cluster_node for netmap_node in netmap_nodes for cluster_node in cluster.cluster_nodes if netmap_node.node == cluster_node.host_ip + cluster_node + for netmap_node in netmap_nodes + for cluster_node in cluster.cluster_nodes + if netmap_node.node == cluster_node.get_interface(Interfaces.MGMT) ] return object_nodes diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 67e4d60..3a10ded 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -19,7 +19,7 @@ from frostfs_testlib.steps.node_management import include_node_to_network_map, r from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode from frostfs_testlib.storage.controllers.disk_controller import DiskController from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass -from frostfs_testlib.storage.dataclasses.storage_object_info import NodeStatus +from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeStatus from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing import parallel from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for_success @@ -454,9 +454,11 @@ class ClusterStateController: if not checker_node: checker_node = cluster_node netmap = NetmapParser.snapshot_all_nodes(frostfs_cli.netmap.snapshot(checker_node.storage_node.get_rpc_endpoint()).stdout) - netmap = [node for node in netmap if cluster_node.host_ip == node.node] + netmap = [node for node in netmap if cluster_node.get_interface(Interfaces.MGMT) == node.node] if status == NodeStatus.OFFLINE: - assert cluster_node.host_ip not in netmap, f"{cluster_node.host_ip} not in Offline" + assert ( + cluster_node.get_interface(Interfaces.MGMT) not in netmap + ), f"{cluster_node.get_interface(Interfaces.MGMT)} not in Offline" else: assert netmap[0].node_status == status, f"Node status should be '{status}', but was '{netmap[0].node_status}'" diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py index 7f3161c..ad45855 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py @@ -6,7 +6,7 @@ from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.storage.cluster import Cluster, ClusterNode from frostfs_testlib.storage.controllers.shards_watcher import ShardsWatcher -from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo +from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, Interfaces, NodeNetmapInfo from frostfs_testlib.storage.grpc_operations import interfaces from frostfs_testlib.testing.test_control import wait_for_success from frostfs_testlib.utils.cli_utils import parse_netmap_output @@ -30,7 +30,7 @@ class ChunksOperations(interfaces.ChunksInterface): result = [] for node_info in netmap: for cluster_node in cluster.cluster_nodes: - if node_info.node == cluster_node.host_ip: + if node_info.node == cluster_node.get_interface(Interfaces.MGMT): result.append(cluster_node) return result @@ -40,7 +40,7 @@ class ChunksOperations(interfaces.ChunksInterface): for node_info in netmap: if node_info.node_id in chunk.confirmed_nodes: for cluster_node in cluster.cluster_nodes: - if cluster_node.host_ip == node_info.node: + if cluster_node.get_interface(Interfaces.MGMT) == node_info.node: return (cluster_node, node_info) @wait_for_success(300, 5, fail_testcase=None) diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py index f31f223..be8a470 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/object.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/object.py @@ -11,6 +11,7 @@ from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT from frostfs_testlib.resources.common import ASSETS_DIR from frostfs_testlib.shell.interfaces import CommandResult from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces from frostfs_testlib.storage.grpc_operations import interfaces from frostfs_testlib.storage.grpc_operations.implementations.chunks import ChunksOperations from frostfs_testlib.testing.test_control import wait_for_success @@ -674,7 +675,7 @@ class ObjectOperations(interfaces.ObjectInterface): cluster_node for netmap_node in netmap_nodes for cluster_node in cluster.cluster_nodes - if netmap_node.node == cluster_node.host_ip + if netmap_node.node == cluster_node.get_interface(Interfaces.MGMT) ] return object_nodes From 974836f1bd91a3fc567b7d64b853f051e53d7cec Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Mon, 13 Jan 2025 12:58:29 +0300 Subject: [PATCH 334/363] [#346] Added correct exception in Chunks parse Signed-off-by: Dmitriy Zayakin --- .../storage/grpc_operations/implementations/chunks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py index ad45855..0d787e2 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/chunks.py @@ -161,5 +161,5 @@ class ChunksOperations(interfaces.ChunksInterface): def _parse_object_nodes(self, object_nodes: str) -> list[Chunk]: parse_result = json.loads(object_nodes) if parse_result.get("errors"): - raise parse_result["errors"] + raise RuntimeError(", ".join(parse_result["errors"])) return [Chunk(**chunk) for chunk in parse_result["data_objects"]] From 5a291c5b7f9374a7f9c8b479158024e73459616d Mon Sep 17 00:00:00 2001 From: "m.malygina" Date: Mon, 13 Jan 2025 16:32:47 +0300 Subject: [PATCH 335/363] [#347] remove stderr check Signed-off-by: m.malygina --- src/frostfs_testlib/processes/remote_process.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/processes/remote_process.py b/src/frostfs_testlib/processes/remote_process.py index 5624940..071675a 100644 --- a/src/frostfs_testlib/processes/remote_process.py +++ b/src/frostfs_testlib/processes/remote_process.py @@ -193,7 +193,7 @@ class RemoteProcess: ) if "No such file or directory" in terminal.stderr: return None - elif terminal.stderr or terminal.return_code != 0: + elif terminal.return_code != 0: raise AssertionError(f"cat process {file} was not successful: {terminal.stderr}") return terminal.stdout From daf186690beff8d4f8bafbbdfa7aedd1c458317d Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 10 Jan 2025 14:29:03 +0300 Subject: [PATCH 336/363] [#345] Fix curl request generation Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/clients/http/http_client.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/clients/http/http_client.py b/src/frostfs_testlib/clients/http/http_client.py index c3e5fae..aebd5ef 100644 --- a/src/frostfs_testlib/clients/http/http_client.py +++ b/src/frostfs_testlib/clients/http/http_client.py @@ -134,9 +134,10 @@ class HttpClient: @classmethod def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str, files: dict) -> str: - headers = " ".join(f'-H "{name.title()}: {value}"' for name, value in headers.items()) - data = f" -d '{data}'" if data else "" + excluded_headers = {"Accept-Encoding", "Connection", "User-Agent", "Content-Length"} + headers = " ".join(f"-H '{header.title()}: {value}'" for header, value in headers.items() if header.title() not in excluded_headers) + data = f" -d '{data}'" if data else "" for name, path in files.items(): data += f' -F "{name}=@{path}"' From 80dd8d0b169dbbbd875c03b753f119ad2fce382a Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Wed, 15 Jan 2025 16:31:54 +0300 Subject: [PATCH 337/363] [#348] Fixed check of fields in S3 aws/boto3 methods related to policies Signed-off-by: y.lukoyanova --- src/frostfs_testlib/clients/s3/aws_cli_client.py | 12 ++++++------ src/frostfs_testlib/clients/s3/boto3_client.py | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/frostfs_testlib/clients/s3/aws_cli_client.py b/src/frostfs_testlib/clients/s3/aws_cli_client.py index 3496b2b..accc289 100644 --- a/src/frostfs_testlib/clients/s3/aws_cli_client.py +++ b/src/frostfs_testlib/clients/s3/aws_cli_client.py @@ -1227,7 +1227,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" + assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" return response @@ -1239,7 +1239,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" + assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" return response @@ -1264,7 +1264,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" + assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" return response @@ -1276,7 +1276,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert response.get("Groups"), f"Expected Groups in response:\n{response}" + assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" return response @@ -1288,7 +1288,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert response.get("Groups"), f"Expected Groups in response:\n{response}" + assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" return response @@ -1324,7 +1324,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" + assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" return response diff --git a/src/frostfs_testlib/clients/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py index 53e7ffa..890b4e9 100644 --- a/src/frostfs_testlib/clients/s3/boto3_client.py +++ b/src/frostfs_testlib/clients/s3/boto3_client.py @@ -1091,7 +1091,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" + assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" return response @reporter.step("Lists all managed policies that are attached to the specified IAM user") @@ -1102,7 +1102,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}" + assert "AttachedPolicies" in response.keys(), f"Expected AttachedPolicies in response:\n{response}" return response @reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to") @@ -1127,7 +1127,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" + assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" return response @reporter.step("Lists the IAM groups") @@ -1137,7 +1137,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert response.get("Groups"), f"Expected Groups in response:\n{response}" + assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" return response @reporter.step("Lists the IAM groups that the specified IAM user belongs to") @@ -1148,7 +1148,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert response.get("Groups"), f"Expected Groups in response:\n{response}" + assert "Groups" in response.keys(), f"Expected Groups in response:\n{response}" return response @reporter.step("Lists all the managed policies that are available in your AWS account") @@ -1180,7 +1180,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}" + assert "PolicyNames" in response.keys(), f"Expected PolicyNames in response:\n{response}" return response @reporter.step("Lists the IAM users") From aed20e02accb3656ebf2b480fa7b884de6768f7d Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Fri, 17 Jan 2025 17:37:51 +0300 Subject: [PATCH 338/363] [#349] Fixed hook pytest-collect-modifyitems Signed-off-by: Ilyas Niyazov --- src/frostfs_testlib/__init__.py | 2 +- src/frostfs_testlib/hooks.py | 19 +++++++++++++++++-- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/__init__.py b/src/frostfs_testlib/__init__.py index 1ceb972..e557a79 100644 --- a/src/frostfs_testlib/__init__.py +++ b/src/frostfs_testlib/__init__.py @@ -1,4 +1,4 @@ __version__ = "2.0.1" from .fixtures import configure_testlib, hosting, temp_directory -from .hooks import pytest_collection_modifyitems +from .hooks import pytest_add_frostfs_marker, pytest_collection_modifyitems diff --git a/src/frostfs_testlib/hooks.py b/src/frostfs_testlib/hooks.py index 6830e78..1ada660 100644 --- a/src/frostfs_testlib/hooks.py +++ b/src/frostfs_testlib/hooks.py @@ -1,8 +1,8 @@ import pytest -@pytest.hookimpl -def pytest_collection_modifyitems(items: list[pytest.Item]): +@pytest.hookimpl(specname="pytest_collection_modifyitems") +def pytest_add_frostfs_marker(items: list[pytest.Item]): # All tests which reside in frostfs nodeid are granted with frostfs marker, excluding # nodeid = full path of the test # 1. plugins @@ -11,3 +11,18 @@ def pytest_collection_modifyitems(items: list[pytest.Item]): location = item.location[0] if "frostfs" in location and "plugin" not in location and "testlib" not in location: item.add_marker("frostfs") + + +# pytest hook. Do not rename +@pytest.hookimpl(trylast=True) +def pytest_collection_modifyitems(items: list[pytest.Item]): + # Change order of tests based on @pytest.mark.order() marker + def order(item: pytest.Item) -> int: + order_marker = item.get_closest_marker("order") + if order_marker and (len(order_marker.args) != 1 or not isinstance(order_marker.args[0], int)): + raise RuntimeError("Incorrect usage of pytest.mark.order") + + order_value = order_marker.args[0] if order_marker else 0 + return order_value + + items.sort(key=lambda item: order(item)) From 0015ea7f93a1a102cd08fbbd5276bc9ca508c620 Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Thu, 23 Jan 2025 17:46:47 +0300 Subject: [PATCH 339/363] [#350] Add ape rule for load config Signed-off-by: a.berezin --- src/frostfs_testlib/load/load_config.py | 4 ++- tests/test_load_config.py | 39 +++++++++++++++++++++++-- 2 files changed, 39 insertions(+), 4 deletions(-) diff --git a/src/frostfs_testlib/load/load_config.py b/src/frostfs_testlib/load/load_config.py index 15103e0..3830203 100644 --- a/src/frostfs_testlib/load/load_config.py +++ b/src/frostfs_testlib/load/load_config.py @@ -182,8 +182,10 @@ class Preset(MetaConfig): pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False) # Workers count for preset workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False) - # Acl for container/buckets + # TODO: Deprecated. Acl for container/buckets acl: Optional[str] = metadata_field(all_load_scenarios, "acl", None, False) + # APE rule for containers instead of deprecated ACL + rule: Optional[list[str]] = metadata_field(grpc_preset_scenarios, "rule", None, False, formatter=force_list) # ------ GRPC ------ # Amount of containers which should be created diff --git a/tests/test_load_config.py b/tests/test_load_config.py index 883b1f2..fbeb587 100644 --- a/tests/test_load_config.py +++ b/tests/test_load_config.py @@ -6,10 +6,7 @@ import pytest from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType, Preset, ReadFrom from frostfs_testlib.load.runners import DefaultRunner from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME -from frostfs_testlib.storage.cluster import ClusterNode from frostfs_testlib.storage.controllers.background_load_controller import BackgroundLoadController -from frostfs_testlib.storage.dataclasses.frostfs_services import StorageNode -from frostfs_testlib.storage.dataclasses.node_base import NodeBase @dataclass @@ -129,6 +126,8 @@ class TestLoadConfig: "--size '11'", "--acl 'acl'", "--preload_obj '13'", + "--retry '24'", + "--rule 'rule' --rule 'rule_2'", "--out 'pregen_json'", "--workers '7'", "--containers '16'", @@ -161,6 +160,8 @@ class TestLoadConfig: expected_preset_args = [ "--size '11'", "--preload_obj '13'", + "--retry '24'", + "--rule 'rule' --rule 'rule_2'", "--out 'pregen_json'", "--workers '7'", "--containers '16'", @@ -317,6 +318,8 @@ class TestLoadConfig: "--no-verify-ssl", "--size '11'", "--preload_obj '13'", + "--retry '24'", + "--rule 'rule' --rule 'rule_2'", "--out 'pregen_json'", "--workers '7'", "--containers '16'", @@ -350,6 +353,8 @@ class TestLoadConfig: expected_preset_args = [ "--size '11'", "--preload_obj '13'", + "--retry '24'", + "--rule 'rule' --rule 'rule_2'", "--out 'pregen_json'", "--workers '7'", "--containers '16'", @@ -415,6 +420,26 @@ class TestLoadConfig: self._check_preset_params(load_params, params) + @pytest.mark.parametrize( + "load_type, input, value, params", + [ + (LoadType.gRPC, ["A C ", " B"], ["A C", "B"], [f"--rule 'A C' --rule 'B'"]), + (LoadType.gRPC, " A ", ["A"], ["--rule 'A'"]), + (LoadType.gRPC, " A , B ", ["A , B"], ["--rule 'A , B'"]), + (LoadType.gRPC, [" A", "B "], ["A", "B"], ["--rule 'A' --rule 'B'"]), + (LoadType.gRPC, None, None, []), + (LoadType.S3, ["A C ", " B"], ["A C", "B"], []), + (LoadType.S3, None, None, []), + ], + ) + def test_ape_list_parsing_formatter(self, load_type, input, value, params): + load_params = LoadParams(load_type) + load_params.preset = Preset() + load_params.preset.rule = input + assert load_params.preset.rule == value + + self._check_preset_params(load_params, params) + @pytest.mark.parametrize("load_params, load_type", [(LoadScenario.VERIFY, LoadType.S3)], indirect=True) def test_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams): expected_env_vars = { @@ -444,6 +469,8 @@ class TestLoadConfig: expected_preset_args = [ "--size '0'", "--preload_obj '0'", + "--retry '0'", + "--rule ''", "--out ''", "--workers '0'", "--containers '0'", @@ -475,6 +502,8 @@ class TestLoadConfig: expected_preset_args = [ "--size '0'", "--preload_obj '0'", + "--retry '0'", + "--rule ''", "--out ''", "--workers '0'", "--containers '0'", @@ -582,6 +611,8 @@ class TestLoadConfig: expected_preset_args = [ "--size '0'", "--preload_obj '0'", + "--retry '0'", + "--rule ''", "--out ''", "--workers '0'", "--containers '0'", @@ -613,6 +644,8 @@ class TestLoadConfig: expected_preset_args = [ "--size '0'", "--preload_obj '0'", + "--retry '0'", + "--rule ''", "--out ''", "--workers '0'", "--containers '0'", From ace9564243b8e7c4740c296dcfe0f55a06e719cd Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Thu, 30 Jan 2025 11:16:23 +0300 Subject: [PATCH 340/363] [#352] Fix versions parsing Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/utils/version_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/utils/version_utils.py b/src/frostfs_testlib/utils/version_utils.py index 490abb0..0676085 100644 --- a/src/frostfs_testlib/utils/version_utils.py +++ b/src/frostfs_testlib/utils/version_utils.py @@ -64,7 +64,7 @@ def parallel_binary_verions(host: Host) -> dict[str, str]: try: result = shell.exec(f"{binary_path} {binary['param']}") version = parse_version(result.stdout) or parse_version(result.stderr) or "Unknown" - versions_at_host[binary_name] = version + versions_at_host[binary_name] = version.strip() except Exception as exc: logger.error(f"Cannot get version for {binary_path} because of\n{exc}") versions_at_host[binary_name] = "Unknown" From b44705eb2fd23ca0db313b07e8b5616367ce0d8f Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Thu, 30 Jan 2025 14:38:22 +0300 Subject: [PATCH 341/363] [#353] Added Netmap command for CliWrapper Signed-off-by: Dmitriy Zayakin --- requirements.txt | 1 + src/frostfs_testlib/cli/frostfs_cli/netmap.py | 4 + src/frostfs_testlib/cli/netmap_parser.py | 29 +- .../dataclasses/storage_object_info.py | 36 +- .../grpc_operations/client_wrappers.py | 12 +- .../implementations/__init__.py | 4 + .../grpc_operations/implementations/netmap.py | 171 +++++++ .../storage/grpc_operations/interfaces.py | 424 ------------------ .../grpc_operations/interfaces/__init__.py | 4 + .../grpc_operations/interfaces/chunks.py | 79 ++++ .../grpc_operations/interfaces/container.py | 125 ++++++ .../grpc_operations/interfaces/netmap.py | 89 ++++ .../grpc_operations/interfaces/object.py | 223 +++++++++ .../grpc_operations/interfaces_wrapper.py | 10 + 14 files changed, 770 insertions(+), 441 deletions(-) create mode 100644 src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py delete mode 100644 src/frostfs_testlib/storage/grpc_operations/interfaces.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/interfaces/container.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/interfaces/object.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py diff --git a/requirements.txt b/requirements.txt index e012366..a0bcc11 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,6 +10,7 @@ tenacity==8.0.1 pytest==7.1.2 boto3==1.35.30 boto3-stubs[essential]==1.35.30 +pydantic==2.10.6 # Dev dependencies black==22.8.0 diff --git a/src/frostfs_testlib/cli/frostfs_cli/netmap.py b/src/frostfs_testlib/cli/frostfs_cli/netmap.py index d219940..cd197d3 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/netmap.py +++ b/src/frostfs_testlib/cli/frostfs_cli/netmap.py @@ -12,6 +12,7 @@ class FrostfsCliNetmap(CliCommand): address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, + trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: @@ -42,6 +43,7 @@ class FrostfsCliNetmap(CliCommand): address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, + trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: @@ -73,6 +75,7 @@ class FrostfsCliNetmap(CliCommand): generate_key: bool = False, json: bool = False, ttl: Optional[int] = None, + trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: @@ -104,6 +107,7 @@ class FrostfsCliNetmap(CliCommand): address: Optional[str] = None, generate_key: bool = False, ttl: Optional[int] = None, + trace: Optional[bool] = False, xhdr: Optional[dict] = None, timeout: Optional[str] = None, ) -> CommandResult: diff --git a/src/frostfs_testlib/cli/netmap_parser.py b/src/frostfs_testlib/cli/netmap_parser.py index db6f55f..2c97c3a 100644 --- a/src/frostfs_testlib/cli/netmap_parser.py +++ b/src/frostfs_testlib/cli/netmap_parser.py @@ -1,7 +1,7 @@ import re from frostfs_testlib.storage.cluster import ClusterNode -from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeNetInfo, NodeNetmapInfo, NodeStatus +from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces, NodeInfo, NodeNetInfo, NodeNetmapInfo, NodeStatus class NetmapParser: @@ -20,8 +20,6 @@ class NetmapParser: "withdrawal_fee": r"Withdrawal fee: (?P\d+)", "homomorphic_hashing_disabled": r"Homomorphic hashing disabled: (?Ptrue|false)", "maintenance_mode_allowed": r"Maintenance mode allowed: (?Ptrue|false)", - "eigen_trust_alpha": r"EigenTrustAlpha: (?P\d+\w+$)", - "eigen_trust_iterations": r"EigenTrustIterations: (?P\d+)", } parse_result = {} @@ -64,7 +62,7 @@ class NetmapParser: for node in netmap_nodes: for key, regex in regexes.items(): search_result = re.search(regex, node, flags=re.MULTILINE) - if search_result == None: + if search_result is None: result_netmap[key] = None continue if key == "node_data_ips": @@ -83,9 +81,22 @@ class NetmapParser: return dataclasses_netmap @staticmethod - def snapshot_one_node(output: str, cluster_node: ClusterNode) -> NodeNetmapInfo | None: + def snapshot_one_node(output: str, rpc_endpoint: str) -> NodeNetmapInfo | None: snapshot_nodes = NetmapParser.snapshot_all_nodes(output=output) - snapshot_node = [node for node in snapshot_nodes if node.node == cluster_node.get_interface(Interfaces.MGMT)] - if not snapshot_node: - return None - return snapshot_node[0] + for snapshot in snapshot_nodes: + for endpoint in snapshot.external_address: + if rpc_endpoint.split(":")[0] in endpoint: + return snapshot + + @staticmethod + def node_info(output: dict) -> NodeNetmapInfo: + data_dict = {"attributes": {}} + + for key, value in output.items(): + if key != "attributes": + data_dict[key] = value + + for attribute in output["attributes"]: + data_dict["attributes"][attribute["key"]] = attribute["value"] + + return NodeInfo(**data_dict) diff --git a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py index 55a8388..4c303fc 100644 --- a/src/frostfs_testlib/storage/dataclasses/storage_object_info.py +++ b/src/frostfs_testlib/storage/dataclasses/storage_object_info.py @@ -1,6 +1,9 @@ +import re from dataclasses import dataclass from typing import Optional +from pydantic import BaseModel, Field, field_validator + from frostfs_testlib.storage.dataclasses.wallet import WalletInfo from frostfs_testlib.testing.readable import HumanReadableEnum @@ -75,8 +78,37 @@ class NodeNetInfo: withdrawal_fee: str = None homomorphic_hashing_disabled: str = None maintenance_mode_allowed: str = None - eigen_trust_alpha: str = None - eigen_trust_iterations: str = None + + +class Attributes(BaseModel): + cluster_name: str = Field(alias="ClusterName") + continent: str = Field(alias="Continent") + country: str = Field(alias="Country") + country_code: str = Field(alias="CountryCode") + external_addr: list[str] = Field(alias="ExternalAddr") + location: str = Field(alias="Location") + node: str = Field(alias="Node") + subdiv: str = Field(alias="SubDiv") + subdiv_code: str = Field(alias="SubDivCode") + un_locode: str = Field(alias="UN-LOCODE") + role: str = Field(alias="role") + + @field_validator("external_addr", mode="before") + @classmethod + def convert_external_addr(cls, value: str) -> list[str]: + return [f"{ip}:{port}" for ip, port in re.findall(r"/ip4/([\d\.]+)/(?:tcp|tls)/(\d+)", value)] + + +class NodeInfo(BaseModel): + public_key: str = Field(alias="publicKey") + addresses: list[str] = Field(alias="addresses") + state: str = Field(alias="state") + attributes: Attributes = Field(alias="attributes") + + @field_validator("addresses", mode="before") + @classmethod + def convert_external_addr(cls, value: str) -> list[str]: + return [f"{ip}:{port}" for ip, port in re.findall(r"/ip4/([\d\.]+)/(?:tcp|tls)/(\d+)", ",".join(value))] @dataclass diff --git a/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py b/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py index 8cef23b..c1e3a31 100644 --- a/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py +++ b/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py @@ -1,14 +1,14 @@ from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli -from frostfs_testlib.storage.grpc_operations import interfaces -from frostfs_testlib.storage.grpc_operations.implementations import container, object +from frostfs_testlib.storage.grpc_operations import implementations, interfaces, interfaces_wrapper -class CliClientWrapper(interfaces.GrpcClientWrapper): +class CliClientWrapper(interfaces_wrapper.GrpcClientWrapper): def __init__(self, cli: FrostfsCli) -> None: self.cli = cli - self.object: interfaces.ObjectInterface = object.ObjectOperations(self.cli) - self.container: interfaces.ContainerInterface = container.ContainerOperations(self.cli) + self.object: interfaces.ObjectInterface = implementations.ObjectOperations(self.cli) + self.container: interfaces.ContainerInterface = implementations.ContainerOperations(self.cli) + self.netmap: interfaces.NetmapInterface = implementations.NetmapOperations(self.cli) -class RpcClientWrapper(interfaces.GrpcClientWrapper): +class RpcClientWrapper(interfaces_wrapper.GrpcClientWrapper): pass # The next series diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py b/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py index e69de29..18e8ae5 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py @@ -0,0 +1,4 @@ +from .chunks import ChunksOperations +from .container import ContainerOperations +from .netmap import NetmapOperations +from .object import ObjectOperations diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py b/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py new file mode 100644 index 0000000..905171b --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py @@ -0,0 +1,171 @@ +import json as module_json +from typing import List, Optional + +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.cli.netmap_parser import NetmapParser +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo + +from .. import interfaces + + +class NetmapOperations(interfaces.NetmapInterface): + def __init__(self, cli: FrostfsCli) -> None: + self.cli = cli + + def epoch( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + trace: Optional[bool] = True, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> int: + """ + Get current epoch number. + """ + output = ( + self.cli.netmap.epoch( + rpc_endpoint=rpc_endpoint, + wallet=wallet, + address=address, + generate_key=generate_key, + ttl=ttl, + trace=trace, + xhdr=xhdr, + timeout=timeout, + ) + .stdout.split("Trace ID")[0] + .strip() + ) + + return int(output) + + def netinfo( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + trace: Optional[bool] = True, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> NodeNetInfo: + """ + Get target node info. + """ + output = ( + self.cli.netmap.netinfo( + rpc_endpoint=rpc_endpoint, + wallet=wallet, + address=address, + generate_key=generate_key, + ttl=ttl, + trace=trace, + xhdr=xhdr, + timeout=timeout, + ) + .stdout.split("Trace ID")[0] + .strip() + ) + + return NetmapParser.netinfo(output) + + def nodeinfo( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + json: bool = True, + ttl: Optional[int] = None, + trace: Optional[bool] = True, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> NodeNetmapInfo: + """ + Get target node info. + """ + output = ( + self.cli.netmap.nodeinfo( + rpc_endpoint=rpc_endpoint, + wallet=wallet, + address=address, + generate_key=generate_key, + json=json, + ttl=ttl, + trace=trace, + xhdr=xhdr, + timeout=timeout, + ) + .stdout.split("Trace ID")[0] + .strip() + ) + + return NetmapParser.node_info(module_json.loads(output)) + + def snapshot( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + trace: Optional[bool] = True, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> List[NodeNetmapInfo]: + """ + Get target node info. + """ + output = ( + self.cli.netmap.snapshot( + rpc_endpoint=rpc_endpoint, + wallet=wallet, + address=address, + generate_key=generate_key, + ttl=ttl, + trace=trace, + xhdr=xhdr, + timeout=timeout, + ) + .stdout.split("Trace ID")[0] + .strip() + ) + + return NetmapParser.snapshot_all_nodes(output) + + def snapshot_one_node( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + trace: Optional[bool] = True, + xhdr: Optional[dict] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ) -> List[NodeNetmapInfo]: + """ + Get target one node info. + """ + output = ( + self.cli.netmap.snapshot( + rpc_endpoint=rpc_endpoint, + wallet=wallet, + address=address, + generate_key=generate_key, + ttl=ttl, + trace=trace, + xhdr=xhdr, + timeout=timeout, + ) + .stdout.split("Trace ID")[0] + .strip() + ) + + return NetmapParser.snapshot_one_node(output, rpc_endpoint) diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces.py b/src/frostfs_testlib/storage/grpc_operations/interfaces.py deleted file mode 100644 index 07fe52f..0000000 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces.py +++ /dev/null @@ -1,424 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Any, List, Optional - -from frostfs_testlib.shell.interfaces import CommandResult -from frostfs_testlib.storage.cluster import Cluster, ClusterNode -from frostfs_testlib.storage.constants import PlacementRule -from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo -from frostfs_testlib.utils import file_utils - - -class ChunksInterface(ABC): - @abstractmethod - def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]: - pass - - @abstractmethod - def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]: - pass - - @abstractmethod - def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str: - pass - - @abstractmethod - def get_all( - self, - rpc_endpoint: str, - cid: str, - oid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - trace: bool = False, - root: bool = False, - verify_presence_all: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> list[Chunk]: - pass - - @abstractmethod - def get_parity( - self, - rpc_endpoint: str, - cid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - oid: Optional[str] = None, - trace: bool = False, - root: bool = False, - verify_presence_all: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> Chunk: - pass - - @abstractmethod - def get_first_data( - self, - rpc_endpoint: str, - cid: str, - wallet: Optional[str] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - oid: Optional[str] = None, - trace: bool = False, - root: bool = False, - verify_presence_all: bool = False, - json: bool = True, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> Chunk: - pass - - -class ObjectInterface(ABC): - def __init__(self) -> None: - self.chunks: ChunksInterface - - @abstractmethod - def delete( - self, - cid: str, - oid: str, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def get( - self, - cid: str, - oid: str, - endpoint: str, - bearer: Optional[str] = None, - write_object: Optional[str] = None, - xhdr: Optional[dict] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> file_utils.TestFile: - pass - - @abstractmethod - def get_from_random_node( - self, - cid: str, - oid: str, - cluster: Cluster, - bearer: Optional[str] = None, - write_object: Optional[str] = None, - xhdr: Optional[dict] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def hash( - self, - endpoint: str, - cid: str, - oid: str, - address: Optional[str] = None, - bearer: Optional[str] = None, - generate_key: Optional[bool] = None, - range: Optional[str] = None, - salt: Optional[str] = None, - ttl: Optional[int] = None, - session: Optional[str] = None, - hash_type: Optional[str] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def head( - self, - cid: str, - oid: str, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - json_output: bool = True, - is_raw: bool = False, - is_direct: bool = False, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> CommandResult | Any: - pass - - @abstractmethod - def lock( - self, - cid: str, - oid: str, - endpoint: str, - lifetime: Optional[int] = None, - expire_at: Optional[int] = None, - address: Optional[str] = None, - bearer: Optional[str] = None, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def put( - self, - path: str, - cid: str, - endpoint: str, - bearer: Optional[str] = None, - copies_number: Optional[int] = None, - attributes: Optional[dict] = None, - xhdr: Optional[dict] = None, - expire_at: Optional[int] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def patch( - self, - cid: str, - oid: str, - endpoint: str, - ranges: Optional[list[str]] = None, - payloads: Optional[list[str]] = None, - new_attrs: Optional[str] = None, - replace_attrs: bool = False, - bearer: Optional[str] = None, - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = None, - trace: bool = False, - ) -> str: - pass - - @abstractmethod - def put_to_random_node( - self, - path: str, - cid: str, - cluster: Cluster, - bearer: Optional[str] = None, - copies_number: Optional[int] = None, - attributes: Optional[dict] = None, - xhdr: Optional[dict] = None, - expire_at: Optional[int] = None, - no_progress: bool = True, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> str: - pass - - @abstractmethod - def range( - self, - cid: str, - oid: str, - range_cut: str, - endpoint: str, - bearer: str = "", - xhdr: Optional[dict] = None, - session: Optional[str] = None, - timeout: Optional[str] = None, - ) -> tuple[file_utils.TestFile, bytes]: - pass - - @abstractmethod - def search( - self, - cid: str, - endpoint: str, - bearer: str = "", - oid: Optional[str] = None, - filters: Optional[dict] = None, - expected_objects_list: Optional[list] = None, - xhdr: Optional[dict] = None, - session: Optional[str] = None, - phy: bool = False, - root: bool = False, - timeout: Optional[str] = None, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - ttl: Optional[int] = None, - ) -> List: - pass - - @abstractmethod - def nodes( - self, - cluster: Cluster, - cid: str, - oid: str, - alive_node: ClusterNode, - bearer: str = "", - xhdr: Optional[dict] = None, - is_direct: bool = False, - verify_presence_all: bool = False, - timeout: Optional[str] = None, - ) -> List[ClusterNode]: - pass - - @abstractmethod - def parts( - self, - cid: str, - oid: str, - alive_node: ClusterNode, - bearer: str = "", - xhdr: Optional[dict] = None, - is_direct: bool = False, - verify_presence_all: bool = False, - timeout: Optional[str] = None, - ) -> List[str]: - pass - - -class ContainerInterface(ABC): - @abstractmethod - def create( - self, - endpoint: str, - nns_zone: Optional[str] = None, - nns_name: Optional[str] = None, - address: Optional[str] = None, - attributes: Optional[dict] = None, - basic_acl: Optional[str] = None, - await_mode: bool = False, - disable_timestamp: bool = False, - force: bool = False, - trace: bool = False, - name: Optional[str] = None, - nonce: Optional[str] = None, - policy: Optional[str] = None, - session: Optional[str] = None, - subnet: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> str: - """ - Create a new container and register it in the FrostFS. - It will be stored in the sidechain when the Inner Ring accepts it. - """ - raise NotImplementedError("No implemethed method create") - - @abstractmethod - def delete( - self, - endpoint: str, - cid: str, - address: Optional[str] = None, - await_mode: bool = False, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - force: bool = False, - trace: bool = False, - ) -> List[str]: - """ - Delete an existing container. - Only the owner of the container has permission to remove the container. - """ - raise NotImplementedError("No implemethed method delete") - - @abstractmethod - def get( - self, - endpoint: str, - cid: str, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - await_mode: bool = False, - to: Optional[str] = None, - json_mode: bool = True, - trace: bool = False, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> List[str]: - """Get container field info.""" - raise NotImplementedError("No implemethed method get") - - @abstractmethod - def get_eacl( - self, - endpoint: str, - cid: str, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - await_mode: bool = False, - json_mode: bool = True, - trace: bool = False, - to: Optional[str] = None, - session: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - ) -> List[str]: - """Get extended ACL table of container.""" - raise NotImplementedError("No implemethed method get-eacl") - - @abstractmethod - def list( - self, - endpoint: str, - name: Optional[str] = None, - address: Optional[str] = None, - generate_key: Optional[bool] = None, - trace: bool = False, - owner: Optional[str] = None, - ttl: Optional[int] = None, - xhdr: Optional[dict] = None, - timeout: Optional[str] = None, - **params, - ) -> List[str]: - """List all created containers.""" - raise NotImplementedError("No implemethed method list") - - @abstractmethod - def nodes( - self, - endpoint: str, - cid: str, - cluster: Cluster, - address: Optional[str] = None, - ttl: Optional[int] = None, - from_file: Optional[str] = None, - trace: bool = False, - short: Optional[bool] = True, - xhdr: Optional[dict] = None, - generate_key: Optional[bool] = None, - timeout: Optional[str] = None, - ) -> List[ClusterNode]: - """Show the nodes participating in the container in the current epoch.""" - raise NotImplementedError("No implemethed method nodes") - - -class GrpcClientWrapper(ABC): - def __init__(self) -> None: - self.object: ObjectInterface - self.container: ContainerInterface diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py new file mode 100644 index 0000000..17b3e9c --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py @@ -0,0 +1,4 @@ +from .chunks import ChunksInterface +from .container import ContainerInterface +from .netmap import NetmapInterface +from .object import ObjectInterface diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py new file mode 100644 index 0000000..986b938 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/chunks.py @@ -0,0 +1,79 @@ +from abc import ABC, abstractmethod +from typing import Optional + +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo + + +class ChunksInterface(ABC): + @abstractmethod + def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]: + pass + + @abstractmethod + def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]: + pass + + @abstractmethod + def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str: + pass + + @abstractmethod + def get_all( + self, + rpc_endpoint: str, + cid: str, + oid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> list[Chunk]: + pass + + @abstractmethod + def get_parity( + self, + rpc_endpoint: str, + cid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + oid: Optional[str] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> Chunk: + pass + + @abstractmethod + def get_first_data( + self, + rpc_endpoint: str, + cid: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + oid: Optional[str] = None, + trace: bool = False, + root: bool = False, + verify_presence_all: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> Chunk: + pass diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py new file mode 100644 index 0000000..d5e3eeb --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py @@ -0,0 +1,125 @@ +from abc import ABC, abstractmethod +from typing import List, Optional + +from frostfs_testlib.storage.cluster import Cluster, ClusterNode + + +class ContainerInterface(ABC): + @abstractmethod + def create( + self, + endpoint: str, + nns_zone: Optional[str] = None, + nns_name: Optional[str] = None, + address: Optional[str] = None, + attributes: Optional[dict] = None, + basic_acl: Optional[str] = None, + await_mode: bool = False, + disable_timestamp: bool = False, + force: bool = False, + trace: bool = False, + name: Optional[str] = None, + nonce: Optional[str] = None, + policy: Optional[str] = None, + session: Optional[str] = None, + subnet: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> str: + """ + Create a new container and register it in the FrostFS. + It will be stored in the sidechain when the Inner Ring accepts it. + """ + raise NotImplementedError("No implemethed method create") + + @abstractmethod + def delete( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + await_mode: bool = False, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + force: bool = False, + trace: bool = False, + ) -> List[str]: + """ + Delete an existing container. + Only the owner of the container has permission to remove the container. + """ + raise NotImplementedError("No implemethed method delete") + + @abstractmethod + def get( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + to: Optional[str] = None, + json_mode: bool = True, + trace: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[str]: + """Get container field info.""" + raise NotImplementedError("No implemethed method get") + + @abstractmethod + def get_eacl( + self, + endpoint: str, + cid: str, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + await_mode: bool = False, + json_mode: bool = True, + trace: bool = False, + to: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[str]: + """Get extended ACL table of container.""" + raise NotImplementedError("No implemethed method get-eacl") + + @abstractmethod + def list( + self, + endpoint: str, + name: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + trace: bool = False, + owner: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + **params, + ) -> List[str]: + """List all created containers.""" + raise NotImplementedError("No implemethed method list") + + @abstractmethod + def nodes( + self, + endpoint: str, + cid: str, + cluster: Cluster, + address: Optional[str] = None, + ttl: Optional[int] = None, + from_file: Optional[str] = None, + trace: bool = False, + short: Optional[bool] = True, + xhdr: Optional[dict] = None, + generate_key: Optional[bool] = None, + timeout: Optional[str] = None, + ) -> List[ClusterNode]: + """Show the nodes participating in the container in the current epoch.""" + raise NotImplementedError("No implemethed method nodes") diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py new file mode 100644 index 0000000..3f0a341 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py @@ -0,0 +1,89 @@ +from abc import ABC, abstractmethod +from typing import List, Optional + +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo + + +class NetmapInterface(ABC): + @abstractmethod + def epoch( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + trace: Optional[bool] = False, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> int: + """ + Get current epoch number. + """ + raise NotImplementedError("No implemethed method epoch") + + @abstractmethod + def netinfo( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> NodeNetInfo: + """ + Get target node info. + """ + raise NotImplementedError("No implemethed method netinfo") + + @abstractmethod + def nodeinfo( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + json: bool = True, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> NodeNetmapInfo: + """ + Get target node info. + """ + raise NotImplementedError("No implemethed method nodeinfo") + + @abstractmethod + def snapshot( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[NodeNetmapInfo]: + """ + Get target node info. + """ + raise NotImplementedError("No implemethed method snapshot") + + @abstractmethod + def snapshot_one_node( + self, + rpc_endpoint: str, + wallet: Optional[str] = None, + address: Optional[str] = None, + generate_key: bool = False, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> List[NodeNetmapInfo]: + """ + Get target one node info. + """ + raise NotImplementedError("No implemethed method snapshot") diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/object.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/object.py new file mode 100644 index 0000000..550c461 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/object.py @@ -0,0 +1,223 @@ +from abc import ABC, abstractmethod +from typing import Any, List, Optional + +from frostfs_testlib.shell.interfaces import CommandResult +from frostfs_testlib.storage.cluster import Cluster, ClusterNode +from frostfs_testlib.utils import file_utils + +from .chunks import ChunksInterface + + +class ObjectInterface(ABC): + def __init__(self) -> None: + self.chunks: ChunksInterface + + @abstractmethod + def delete( + self, + cid: str, + oid: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def get( + self, + cid: str, + oid: str, + endpoint: str, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> file_utils.TestFile: + pass + + @abstractmethod + def get_from_random_node( + self, + cid: str, + oid: str, + cluster: Cluster, + bearer: Optional[str] = None, + write_object: Optional[str] = None, + xhdr: Optional[dict] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def hash( + self, + endpoint: str, + cid: str, + oid: str, + address: Optional[str] = None, + bearer: Optional[str] = None, + generate_key: Optional[bool] = None, + range: Optional[str] = None, + salt: Optional[str] = None, + ttl: Optional[int] = None, + session: Optional[str] = None, + hash_type: Optional[str] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def head( + self, + cid: str, + oid: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + json_output: bool = True, + is_raw: bool = False, + is_direct: bool = False, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult | Any: + pass + + @abstractmethod + def lock( + self, + cid: str, + oid: str, + endpoint: str, + lifetime: Optional[int] = None, + expire_at: Optional[int] = None, + address: Optional[str] = None, + bearer: Optional[str] = None, + session: Optional[str] = None, + ttl: Optional[int] = None, + xhdr: Optional[dict] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def put( + self, + path: str, + cid: str, + endpoint: str, + bearer: Optional[str] = None, + copies_number: Optional[int] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def patch( + self, + cid: str, + oid: str, + endpoint: str, + ranges: Optional[list[str]] = None, + payloads: Optional[list[str]] = None, + new_attrs: Optional[str] = None, + replace_attrs: bool = False, + bearer: Optional[str] = None, + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = None, + trace: bool = False, + ) -> str: + pass + + @abstractmethod + def put_to_random_node( + self, + path: str, + cid: str, + cluster: Cluster, + bearer: Optional[str] = None, + copies_number: Optional[int] = None, + attributes: Optional[dict] = None, + xhdr: Optional[dict] = None, + expire_at: Optional[int] = None, + no_progress: bool = True, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> str: + pass + + @abstractmethod + def range( + self, + cid: str, + oid: str, + range_cut: str, + endpoint: str, + bearer: str = "", + xhdr: Optional[dict] = None, + session: Optional[str] = None, + timeout: Optional[str] = None, + ) -> tuple[file_utils.TestFile, bytes]: + pass + + @abstractmethod + def search( + self, + cid: str, + endpoint: str, + bearer: str = "", + oid: Optional[str] = None, + filters: Optional[dict] = None, + expected_objects_list: Optional[list] = None, + xhdr: Optional[dict] = None, + session: Optional[str] = None, + phy: bool = False, + root: bool = False, + timeout: Optional[str] = None, + address: Optional[str] = None, + generate_key: Optional[bool] = None, + ttl: Optional[int] = None, + ) -> List: + pass + + @abstractmethod + def nodes( + self, + cluster: Cluster, + cid: str, + oid: str, + alive_node: ClusterNode, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + timeout: Optional[str] = None, + ) -> List[ClusterNode]: + pass + + @abstractmethod + def parts( + self, + cid: str, + oid: str, + alive_node: ClusterNode, + bearer: str = "", + xhdr: Optional[dict] = None, + is_direct: bool = False, + verify_presence_all: bool = False, + timeout: Optional[str] = None, + ) -> List[str]: + pass diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py b/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py new file mode 100644 index 0000000..6574012 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py @@ -0,0 +1,10 @@ +from abc import ABC + +from . import interfaces + + +class GrpcClientWrapper(ABC): + def __init__(self) -> None: + self.object: interfaces.ObjectInterface + self.container: interfaces.ContainerInterface + self.netmap: interfaces.NetmapInterface From 87afc4b58c070d35643f95efd0e5db27eeb6fab6 Mon Sep 17 00:00:00 2001 From: Dmitry Anurin Date: Tue, 4 Feb 2025 10:03:58 +0300 Subject: [PATCH 342/363] [#356] Added pprof endpoint and working dir to service attributes Signed-off-by: Dmitry Anurin --- src/frostfs_testlib/storage/constants.py | 2 ++ src/frostfs_testlib/storage/dataclasses/node_base.py | 10 ++++++++++ 2 files changed, 12 insertions(+) diff --git a/src/frostfs_testlib/storage/constants.py b/src/frostfs_testlib/storage/constants.py index 39c6b66..2e49208 100644 --- a/src/frostfs_testlib/storage/constants.py +++ b/src/frostfs_testlib/storage/constants.py @@ -5,6 +5,7 @@ class ConfigAttributes: WALLET_CONFIG = "wallet_config" CONFIG_DIR = "service_config_dir" CONFIG_PATH = "config_path" + WORKING_DIR = "working_dir" SHARD_CONFIG_PATH = "shard_config_path" LOGGER_CONFIG_PATH = "logger_config_path" LOCAL_WALLET_PATH = "local_wallet_path" @@ -15,6 +16,7 @@ class ConfigAttributes: ENDPOINT_DATA_0_NS = "endpoint_data0_namespace" ENDPOINT_INTERNAL = "endpoint_internal0" ENDPOINT_PROMETHEUS = "endpoint_prometheus" + ENDPOINT_PPROF = "endpoint_pprof" CONTROL_ENDPOINT = "control_endpoint" UN_LOCODE = "un_locode" diff --git a/src/frostfs_testlib/storage/dataclasses/node_base.py b/src/frostfs_testlib/storage/dataclasses/node_base.py index 180877d..5c8b723 100644 --- a/src/frostfs_testlib/storage/dataclasses/node_base.py +++ b/src/frostfs_testlib/storage/dataclasses/node_base.py @@ -82,6 +82,9 @@ class NodeBase(HumanReadableABC): def get_metrics_endpoint(self) -> str: return self._get_attribute(ConfigAttributes.ENDPOINT_PROMETHEUS) + def get_pprof_endpoint(self) -> str: + return self._get_attribute(ConfigAttributes.ENDPOINT_PPROF) + def stop_service(self, mask: bool = True): if mask: with reporter.step(f"Mask {self.name} service on {self.host.config.address}"): @@ -144,6 +147,13 @@ class NodeBase(HumanReadableABC): else None ) + def get_working_dir_path(self) -> Optional[str]: + """ + Returns working directory path located on remote host + """ + config_attributes = self.host.get_service_config(self.name) + return self._get_attribute(ConfigAttributes.WORKING_DIR) if ConfigAttributes.WORKING_DIR in config_attributes.attributes else None + @property def config_dir(self) -> str: return self._get_attribute(ConfigAttributes.CONFIG_DIR) From e9bc36b3d3063043e2b754fbccbde53e93e3785a Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 4 Feb 2025 16:39:34 +0300 Subject: [PATCH 343/363] [#355] Change CSC time methods Signed-off-by: Dmitriy Zayakin --- .../controllers/cluster_state_controller.py | 34 +++++-------------- 1 file changed, 8 insertions(+), 26 deletions(-) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 3a10ded..6370033 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -1,7 +1,7 @@ -import datetime import itertools import logging import time +from datetime import datetime, timezone from typing import TypeVar import frostfs_testlib.resources.optionals as optionals @@ -390,31 +390,23 @@ class ClusterStateController: @reporter.step("Get node time") def get_node_date(self, node: ClusterNode) -> datetime: shell = node.host.get_shell() - return datetime.datetime.strptime(shell.exec("hwclock -r").stdout.strip(), "%Y-%m-%d %H:%M:%S.%f%z") + return datetime.strptime(shell.exec('date +"%Y-%m-%d %H:%M:%S"').stdout.strip(), "%Y-%m-%d %H:%M:%S") @reporter.step("Set node time to {in_date}") def change_node_date(self, node: ClusterNode, in_date: datetime) -> None: shell = node.host.get_shell() - shell.exec(f"date -s @{time.mktime(in_date.timetuple())}") - shell.exec("hwclock --systohc") + in_date_frmt = in_date.strftime("%Y-%m-%d %H:%M:%S") + shell.exec(f"timedatectl set-time '{in_date_frmt}'") node_time = self.get_node_date(node) with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"): - assert (self.get_node_date(node) - in_date) < datetime.timedelta(minutes=1) + assert (node_time - in_date).total_seconds() < 60 - @reporter.step(f"Restore time") + @reporter.step("Restore time") def restore_node_date(self, node: ClusterNode) -> None: shell = node.host.get_shell() - now_time = datetime.datetime.now(datetime.timezone.utc) + now_time = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S") with reporter.step(f"Set {now_time} time"): - shell.exec(f"date -s @{time.mktime(now_time.timetuple())}") - shell.exec("hwclock --systohc") - - @reporter.step("Change the synchronizer status to {status}") - def set_sync_date_all_nodes(self, status: str): - if status == "active": - parallel(self._enable_date_synchronizer, self.cluster.cluster_nodes) - return - parallel(self._disable_date_synchronizer, self.cluster.cluster_nodes) + shell.exec(f"timedatectl set-time '{now_time}'") @reporter.step("Set MaintenanceModeAllowed - {status}") def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None: @@ -500,16 +492,6 @@ class ClusterStateController: frostfs_cli_remote = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config_path) return frostfs_adm, frostfs_cli, frostfs_cli_remote - def _enable_date_synchronizer(self, cluster_node: ClusterNode): - shell = cluster_node.host.get_shell() - shell.exec("timedatectl set-ntp true") - cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "active", 15) - - def _disable_date_synchronizer(self, cluster_node: ClusterNode): - shell = cluster_node.host.get_shell() - shell.exec("timedatectl set-ntp false") - cluster_node.host.wait_for_service_to_be_in_state("systemd-timesyncd", "inactive", 15) - def _get_disk_controller(self, node: StorageNode, device: str, mountpoint: str) -> DiskController: disk_controller_id = DiskController.get_id(node, device) if disk_controller_id in self.detached_disks.keys(): From 97b9b5498af883d2dd111aa17b916d2aba36429e Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 21 Feb 2025 16:27:13 +0300 Subject: [PATCH 344/363] [#358] Add minor improvements for convenient work with clients Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/clients/http/__init__.py | 1 + src/frostfs_testlib/clients/s3/__init__.py | 4 ++- .../clients/s3/aws_cli_client.py | 6 +++-- .../clients/s3/boto3_client.py | 20 ++++++-------- src/frostfs_testlib/clients/s3/interfaces.py | 26 ++++++++++++------- .../resources/error_patterns.py | 1 + 6 files changed, 34 insertions(+), 24 deletions(-) diff --git a/src/frostfs_testlib/clients/http/__init__.py b/src/frostfs_testlib/clients/http/__init__.py index e69de29..ab6e2b0 100644 --- a/src/frostfs_testlib/clients/http/__init__.py +++ b/src/frostfs_testlib/clients/http/__init__.py @@ -0,0 +1 @@ +from frostfs_testlib.clients.http.http_client import HttpClient diff --git a/src/frostfs_testlib/clients/s3/__init__.py b/src/frostfs_testlib/clients/s3/__init__.py index 65a3990..5481f48 100644 --- a/src/frostfs_testlib/clients/s3/__init__.py +++ b/src/frostfs_testlib/clients/s3/__init__.py @@ -1 +1,3 @@ -from frostfs_testlib.clients.s3.interfaces import BucketContainerResolver, S3ClientWrapper, VersioningStatus +from frostfs_testlib.clients.s3.aws_cli_client import AwsCliClient +from frostfs_testlib.clients.s3.boto3_client import Boto3ClientWrapper +from frostfs_testlib.clients.s3.interfaces import ACL, BucketContainerResolver, S3ClientWrapper, VersioningStatus diff --git a/src/frostfs_testlib/clients/s3/aws_cli_client.py b/src/frostfs_testlib/clients/s3/aws_cli_client.py index accc289..8b2d774 100644 --- a/src/frostfs_testlib/clients/s3/aws_cli_client.py +++ b/src/frostfs_testlib/clients/s3/aws_cli_client.py @@ -33,12 +33,14 @@ class AwsCliClient(S3ClientWrapper): self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1" ) -> None: self.s3gate_endpoint = s3gate_endpoint + self.iam_endpoint = None + self.access_key_id: str = access_key_id self.secret_access_key: str = secret_access_key self.profile = profile - self.local_shell = LocalShell() self.region = region - self.iam_endpoint = None + + self.local_shell = LocalShell() try: _configure_aws_cli(f"aws configure --profile {profile}", access_key_id, secret_access_key, region) self.local_shell.exec(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS} --profile {profile}") diff --git a/src/frostfs_testlib/clients/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py index 890b4e9..9d9fefe 100644 --- a/src/frostfs_testlib/clients/s3/boto3_client.py +++ b/src/frostfs_testlib/clients/s3/boto3_client.py @@ -35,26 +35,20 @@ class Boto3ClientWrapper(S3ClientWrapper): def __init__( self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1" ) -> None: - self.boto3_client: S3Client = None self.s3gate_endpoint: str = "" + self.boto3_client: S3Client = None - self.boto3_iam_client: S3Client = None self.iam_endpoint: str = "" - + self.boto3_iam_client: S3Client = None self.boto3_sts_client: S3Client = None - self.access_key_id: str = access_key_id - self.secret_access_key: str = secret_access_key + self.access_key_id = access_key_id + self.secret_access_key = secret_access_key self.profile = profile self.region = region self.session = boto3.Session() - self.config = Config( - retries={ - "max_attempts": MAX_REQUEST_ATTEMPTS, - "mode": RETRY_MODE, - } - ) + self.config = Config(retries={"max_attempts": MAX_REQUEST_ATTEMPTS, "mode": RETRY_MODE}) self.set_endpoint(s3gate_endpoint) @@ -90,7 +84,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint_url=self.iam_endpoint, verify=False, ) - # since the STS does not have an enpoint, IAM is used + # since the STS does not have an endpoint, IAM is used self.boto3_sts_client = self.session.client( service_name="sts", aws_access_key_id=self.access_key_id, @@ -145,6 +139,7 @@ class Boto3ClientWrapper(S3ClientWrapper): params = {"Bucket": bucket} if object_lock_enabled_for_bucket is not None: params.update({"ObjectLockEnabledForBucket": object_lock_enabled_for_bucket}) + if acl is not None: params.update({"ACL": acl}) elif grant_write or grant_read or grant_full_control: @@ -154,6 +149,7 @@ class Boto3ClientWrapper(S3ClientWrapper): params.update({"GrantRead": grant_read}) elif grant_full_control: params.update({"GrantFullControl": grant_full_control}) + if location_constraint: params.update({"CreateBucketConfiguration": {"LocationConstraint": location_constraint}}) diff --git a/src/frostfs_testlib/clients/s3/interfaces.py b/src/frostfs_testlib/clients/s3/interfaces.py index 7ce9f31..d636182 100644 --- a/src/frostfs_testlib/clients/s3/interfaces.py +++ b/src/frostfs_testlib/clients/s3/interfaces.py @@ -22,15 +22,15 @@ class VersioningStatus(HumanReadableEnum): SUSPENDED = "Suspended" -ACL_COPY = [ - "private", - "public-read", - "public-read-write", - "authenticated-read", - "aws-exec-read", - "bucket-owner-read", - "bucket-owner-full-control", -] +class ACL: + PRIVATE = "private" + PUBLIC_READ = "public-read" + PUBLIC_READ_WRITE = "public-read-write" + AUTHENTICATED_READ = "authenticated-read" + AWS_EXEC_READ = "aws-exec-read" + BUCKET_OWNER_READ = "bucket-owner-read" + BUCKET_OWNER_FULL_CONTROL = "bucket-owner-full-control" + LOG_DELIVERY_WRITE = "log-delivery-write" class BucketContainerResolver(ABC): @@ -50,6 +50,14 @@ class BucketContainerResolver(ABC): class S3ClientWrapper(HumanReadableABC): + access_key_id: str + secret_access_key: str + profile: str + region: str + + s3gate_endpoint: str + iam_endpoint: str + @abstractmethod def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str, region: str) -> None: pass diff --git a/src/frostfs_testlib/resources/error_patterns.py b/src/frostfs_testlib/resources/error_patterns.py index 4c22648..6c0cb14 100644 --- a/src/frostfs_testlib/resources/error_patterns.py +++ b/src/frostfs_testlib/resources/error_patterns.py @@ -1,5 +1,6 @@ # Regex patterns of status codes of Container service CONTAINER_NOT_FOUND = "code = 3072.*message = container not found" +SUBJECT_NOT_FOUND = "code = 1024.*message = frostfs error: chain/client.*subject not found.*" # Regex patterns of status codes of Object service MALFORMED_REQUEST = "code = 1024.*message = malformed request" From b00d080982804c8c9237a49a606dbf6fc4ef03f1 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Tue, 25 Feb 2025 16:43:34 +0300 Subject: [PATCH 345/363] [#357] Synchronize client and CliCommand timeouts Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/cli/cli_command.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/src/frostfs_testlib/cli/cli_command.py b/src/frostfs_testlib/cli/cli_command.py index 3600e77..7fccc65 100644 --- a/src/frostfs_testlib/cli/cli_command.py +++ b/src/frostfs_testlib/cli/cli_command.py @@ -24,9 +24,7 @@ class CliCommand: def __init__(self, shell: Shell, cli_exec_path: str, **base_params): self.shell = shell self.cli_exec_path = cli_exec_path - self.__base_params = " ".join( - [f"--{param} {value}" for param, value in base_params.items() if value] - ) + self.__base_params = " ".join([f"--{param} {value}" for param, value in base_params.items() if value]) def _format_command(self, command: str, **params) -> str: param_str = [] @@ -48,9 +46,7 @@ class CliCommand: val_str = str(value_item).replace("'", "\\'") param_str.append(f"--{param} '{val_str}'") elif isinstance(value, dict): - param_str.append( - f'--{param} \'{",".join(f"{key}={val}" for key, val in value.items())}\'' - ) + param_str.append(f'--{param} \'{",".join(f"{key}={val}" for key, val in value.items())}\'') else: if "'" in str(value): value_str = str(value).replace('"', '\\"') @@ -63,12 +59,18 @@ class CliCommand: return f"{self.cli_exec_path} {self.__base_params} {command or ''} {param_str}" def _execute(self, command: Optional[str], **params) -> CommandResult: - return self.shell.exec(self._format_command(command, **params)) - - def _execute_with_password(self, command: Optional[str], password, **params) -> CommandResult: + timeout = int(params["timeout"].rstrip("s")) if params.get("timeout") else None return self.shell.exec( self._format_command(command, **params), - options=CommandOptions( - interactive_inputs=[InteractiveInput(prompt_pattern="assword", input=password)] + CommandOptions(timeout=timeout), + ) + + def _execute_with_password(self, command: Optional[str], password, **params) -> CommandResult: + timeout = int(params["timeout"].rstrip("s")) if params.get("timeout") else None + return self.shell.exec( + self._format_command(command, **params), + CommandOptions( + interactive_inputs=[InteractiveInput(prompt_pattern="assword", input=password)], + timeout=timeout, ), ) From f1073d214cc300ede89cfd05907039511a1970f0 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Wed, 5 Mar 2025 15:29:35 +0300 Subject: [PATCH 346/363] [#360] Increased timeout for IAM policy attach/detach Signed-off-by: Yaroslava Lukoyanova --- src/frostfs_testlib/clients/s3/aws_cli_client.py | 12 ++++++------ src/frostfs_testlib/clients/s3/boto3_client.py | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/frostfs_testlib/clients/s3/aws_cli_client.py b/src/frostfs_testlib/clients/s3/aws_cli_client.py index 8b2d774..a2e3fc7 100644 --- a/src/frostfs_testlib/clients/s3/aws_cli_client.py +++ b/src/frostfs_testlib/clients/s3/aws_cli_client.py @@ -979,7 +979,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @@ -990,7 +990,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @@ -1122,7 +1122,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @@ -1133,7 +1133,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @@ -1352,7 +1352,7 @@ class AwsCliClient(S3ClientWrapper): cmd += f" --profile {self.profile}" output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @@ -1367,7 +1367,7 @@ class AwsCliClient(S3ClientWrapper): output = self.local_shell.exec(cmd).stdout response = self._to_json(output) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response diff --git a/src/frostfs_testlib/clients/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py index 9d9fefe..4157bd6 100644 --- a/src/frostfs_testlib/clients/s3/boto3_client.py +++ b/src/frostfs_testlib/clients/s3/boto3_client.py @@ -836,7 +836,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @reporter.step("Attaches the specified managed policy to the specified user") @@ -848,7 +848,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @reporter.step("Creates a new AWS secret access key and access key ID for the specified user") @@ -979,7 +979,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @reporter.step("Removes the specified managed policy from the specified user") @@ -991,7 +991,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @reporter.step("Returns a list of IAM users that are in the specified IAM group") @@ -1201,7 +1201,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user") @@ -1216,7 +1216,7 @@ class Boto3ClientWrapper(S3ClientWrapper): endpoint=self.iam_endpoint, profile=self.profile, ) - sleep(S3_SYNC_WAIT_TIME * 10) + sleep(S3_SYNC_WAIT_TIME * 14) return response @reporter.step("Removes the specified user from the specified group") From 0c4e601840d81ceef400e334b3d3bcd8bee4592e Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Mon, 3 Mar 2025 14:54:22 +0300 Subject: [PATCH 347/363] [#359] Override represantation method for Host Signed-off-by: Ilyas Niyazov --- src/frostfs_testlib/hosting/interfaces.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/frostfs_testlib/hosting/interfaces.py b/src/frostfs_testlib/hosting/interfaces.py index f58d856..a41161c 100644 --- a/src/frostfs_testlib/hosting/interfaces.py +++ b/src/frostfs_testlib/hosting/interfaces.py @@ -29,6 +29,9 @@ class Host(ABC): self._service_config_by_name = {service_config.name: service_config for service_config in config.services} self._cli_config_by_name = {cli_config.name: cli_config for cli_config in config.clis} + def __repr__(self) -> str: + return self.config.address + @property def config(self) -> HostConfig: """Returns config of the host. From 7d2c92ebc096dc378666dce09d26cfd0a0313d2f Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Fri, 7 Mar 2025 15:18:43 +0300 Subject: [PATCH 348/363] [#361] Move common fixture to testlib Signed-off-by: a.berezin --- src/frostfs_testlib/__init__.py | 2 +- src/frostfs_testlib/fixtures.py | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/__init__.py b/src/frostfs_testlib/__init__.py index e557a79..4724a8b 100644 --- a/src/frostfs_testlib/__init__.py +++ b/src/frostfs_testlib/__init__.py @@ -1,4 +1,4 @@ __version__ = "2.0.1" -from .fixtures import configure_testlib, hosting, temp_directory +from .fixtures import configure_testlib, hosting, session_start_time, temp_directory from .hooks import pytest_add_frostfs_marker, pytest_collection_modifyitems diff --git a/src/frostfs_testlib/fixtures.py b/src/frostfs_testlib/fixtures.py index d0f92f2..7d767d2 100644 --- a/src/frostfs_testlib/fixtures.py +++ b/src/frostfs_testlib/fixtures.py @@ -1,5 +1,6 @@ import logging import os +from datetime import datetime from importlib.metadata import entry_points import pytest @@ -11,6 +12,12 @@ from frostfs_testlib.resources.common import ASSETS_DIR, HOSTING_CONFIG_FILE from frostfs_testlib.storage import get_service_registry +@pytest.fixture(scope="session", autouse=True) +def session_start_time(): + start_time = datetime.utcnow() + return start_time + + @pytest.fixture(scope="session") def configure_testlib(): reporter.get_reporter().register_handler(reporter.AllureHandler()) From c2af1bba5c300b1bb1758eaa19f687962ef98224 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 7 Mar 2025 18:14:38 +0300 Subject: [PATCH 349/363] [#362] Add functions to change date on nodes in `ClusterStateController` Signed-off-by: Kirill Sosnovskikh --- .../controllers/cluster_state_controller.py | 61 +++++++------------ 1 file changed, 22 insertions(+), 39 deletions(-) diff --git a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py index 6370033..51aaefb 100644 --- a/src/frostfs_testlib/storage/controllers/cluster_state_controller.py +++ b/src/frostfs_testlib/storage/controllers/cluster_state_controller.py @@ -247,23 +247,20 @@ class ClusterStateController: if service_type == StorageNode: self.wait_after_storage_startup() - # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Stop all storage services on cluster") - def stop_all_storage_services(self, reversed_order: bool = False): - nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes + @reporter.step("Restart {service_type} service on {node}") + def restart_service_of_type(self, node: ClusterNode, service_type: ServiceClass): + service = node.service(service_type) + service.restart_service() - for node in nodes: - self.stop_service_of_type(node, StorageNode) - - # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Stop all S3 gates on cluster") - def stop_all_s3_gates(self, reversed_order: bool = False): - nodes = reversed(self.cluster.cluster_nodes) if reversed_order else self.cluster.cluster_nodes + @reporter.step("Restart all {service_type} services") + def restart_services_of_type(self, service_type: type[ServiceClass]): + services = self.cluster.services(service_type) + parallel([service.restart_service for service in services]) - for node in nodes: - self.stop_service_of_type(node, S3Gate) + if service_type == StorageNode: + self.wait_after_storage_startup() # TODO: Deprecated @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @@ -277,30 +274,6 @@ class ClusterStateController: def start_storage_service(self, node: ClusterNode): self.start_service_of_type(node, StorageNode) - # TODO: Deprecated - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Start stopped storage services") - def start_stopped_storage_services(self): - self.start_stopped_services_of_type(StorageNode) - - # TODO: Deprecated - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Stop s3 gate on {node}") - def stop_s3_gate(self, node: ClusterNode, mask: bool = True): - self.stop_service_of_type(node, S3Gate, mask) - - # TODO: Deprecated - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Start s3 gate on {node}") - def start_s3_gate(self, node: ClusterNode): - self.start_service_of_type(node, S3Gate) - - # TODO: Deprecated - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) - @reporter.step("Start stopped S3 gates") - def start_stopped_s3_gates(self): - self.start_stopped_services_of_type(S3Gate) - @run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED) @reporter.step("Suspend {process_name} service in {node}") def suspend_service(self, process_name: str, node: ClusterNode): @@ -392,19 +365,29 @@ class ClusterStateController: shell = node.host.get_shell() return datetime.strptime(shell.exec('date +"%Y-%m-%d %H:%M:%S"').stdout.strip(), "%Y-%m-%d %H:%M:%S") - @reporter.step("Set node time to {in_date}") + @reporter.step("Set time on nodes in {in_date}") + def change_date_on_all_nodes(self, cluster: Cluster, in_date: datetime) -> None: + parallel(self.change_node_date, cluster.cluster_nodes, in_date=in_date) + + @reporter.step("Set time on {node} to {in_date}") def change_node_date(self, node: ClusterNode, in_date: datetime) -> None: shell = node.host.get_shell() in_date_frmt = in_date.strftime("%Y-%m-%d %H:%M:%S") shell.exec(f"timedatectl set-time '{in_date_frmt}'") node_time = self.get_node_date(node) + with reporter.step(f"Verify difference between {node_time} and {in_date} is less than a minute"): assert (node_time - in_date).total_seconds() < 60 - @reporter.step("Restore time") + @reporter.step("Restore time on nodes") + def restore_date_on_all_nodes(self, cluster: Cluster) -> None: + parallel(self.restore_node_date, cluster.cluster_nodes) + + @reporter.step("Restore time on {node}") def restore_node_date(self, node: ClusterNode) -> None: shell = node.host.get_shell() now_time = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S") + with reporter.step(f"Set {now_time} time"): shell.exec(f"timedatectl set-time '{now_time}'") From dfb048fe519f6ab72d59453569ead9cf2e93cafa Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Tue, 11 Mar 2025 17:22:13 +0300 Subject: [PATCH 350/363] [#363] Add accounting for timeout inaccuracy between process and cli Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/cli/cli_command.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/cli/cli_command.py b/src/frostfs_testlib/cli/cli_command.py index 7fccc65..224e9e3 100644 --- a/src/frostfs_testlib/cli/cli_command.py +++ b/src/frostfs_testlib/cli/cli_command.py @@ -1,10 +1,11 @@ from typing import Optional from frostfs_testlib.shell import CommandOptions, CommandResult, InteractiveInput, Shell +from frostfs_testlib.utils.datetime_utils import parse_time class CliCommand: - + TIMEOUT_INACCURACY = 10 WALLET_SOURCE_ERROR_MSG = "Provide either wallet or wallet_config to specify wallet location" WALLET_PASSWD_ERROR_MSG = "Provide either wallet_password or wallet_config to specify password" @@ -59,14 +60,18 @@ class CliCommand: return f"{self.cli_exec_path} {self.__base_params} {command or ''} {param_str}" def _execute(self, command: Optional[str], **params) -> CommandResult: - timeout = int(params["timeout"].rstrip("s")) if params.get("timeout") else None + if timeout := params.get("timeout"): + timeout = parse_time(timeout) + self.TIMEOUT_INACCURACY + return self.shell.exec( self._format_command(command, **params), CommandOptions(timeout=timeout), ) def _execute_with_password(self, command: Optional[str], password, **params) -> CommandResult: - timeout = int(params["timeout"].rstrip("s")) if params.get("timeout") else None + if timeout := params.get("timeout"): + timeout = parse_time(timeout) + self.TIMEOUT_INACCURACY + return self.shell.exec( self._format_command(command, **params), CommandOptions( From 3966f65c95cbad9f5adc99d9c396178008409c37 Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Mon, 17 Mar 2025 16:24:36 +0300 Subject: [PATCH 351/363] [#364] Fixed hook order tests collection Signed-off-by: Ilyas Niyazov --- src/frostfs_testlib/hooks.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/frostfs_testlib/hooks.py b/src/frostfs_testlib/hooks.py index 1ada660..c56c75a 100644 --- a/src/frostfs_testlib/hooks.py +++ b/src/frostfs_testlib/hooks.py @@ -16,6 +16,9 @@ def pytest_add_frostfs_marker(items: list[pytest.Item]): # pytest hook. Do not rename @pytest.hookimpl(trylast=True) def pytest_collection_modifyitems(items: list[pytest.Item]): + # The order of running tests corresponded to the suites + items.sort(key=lambda item: item.nodeid) + # Change order of tests based on @pytest.mark.order() marker def order(item: pytest.Item) -> int: order_marker = item.get_closest_marker("order") From dcde9e15b104602f117e6ed352f30726601d8545 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Thu, 13 Mar 2025 16:53:42 +0300 Subject: [PATCH 352/363] [#365] Change type hint for `NetmapOperations.nodeinfo` Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/cli/netmap_parser.py | 2 +- .../storage/grpc_operations/implementations/netmap.py | 4 ++-- .../storage/grpc_operations/interfaces/netmap.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/frostfs_testlib/cli/netmap_parser.py b/src/frostfs_testlib/cli/netmap_parser.py index 2c97c3a..4b4a501 100644 --- a/src/frostfs_testlib/cli/netmap_parser.py +++ b/src/frostfs_testlib/cli/netmap_parser.py @@ -89,7 +89,7 @@ class NetmapParser: return snapshot @staticmethod - def node_info(output: dict) -> NodeNetmapInfo: + def node_info(output: dict) -> NodeInfo: data_dict = {"attributes": {}} for key, value in output.items(): diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py b/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py index 905171b..76ee69a 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/netmap.py @@ -4,7 +4,7 @@ from typing import List, Optional from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli from frostfs_testlib.cli.netmap_parser import NetmapParser from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT -from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeInfo, NodeNetInfo, NodeNetmapInfo from .. import interfaces @@ -86,7 +86,7 @@ class NetmapOperations(interfaces.NetmapInterface): trace: Optional[bool] = True, xhdr: Optional[dict] = None, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, - ) -> NodeNetmapInfo: + ) -> NodeInfo: """ Get target node info. """ diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py index 3f0a341..3fdc98a 100644 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/netmap.py @@ -1,7 +1,7 @@ from abc import ABC, abstractmethod from typing import List, Optional -from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo +from frostfs_testlib.storage.dataclasses.storage_object_info import NodeInfo, NodeNetInfo, NodeNetmapInfo class NetmapInterface(ABC): @@ -50,7 +50,7 @@ class NetmapInterface(ABC): ttl: Optional[int] = None, xhdr: Optional[dict] = None, timeout: Optional[str] = None, - ) -> NodeNetmapInfo: + ) -> NodeInfo: """ Get target node info. """ From 91a2706b06f2bb5d00f0ef60ef5bf1e2c55ece3a Mon Sep 17 00:00:00 2001 From: anurindm Date: Wed, 19 Mar 2025 11:43:21 +0300 Subject: [PATCH 353/363] [#366] Test order depends on location Signed-off-by: Dmitry Anurin --- src/frostfs_testlib/hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/hooks.py b/src/frostfs_testlib/hooks.py index c56c75a..d7e4cc8 100644 --- a/src/frostfs_testlib/hooks.py +++ b/src/frostfs_testlib/hooks.py @@ -17,7 +17,7 @@ def pytest_add_frostfs_marker(items: list[pytest.Item]): @pytest.hookimpl(trylast=True) def pytest_collection_modifyitems(items: list[pytest.Item]): # The order of running tests corresponded to the suites - items.sort(key=lambda item: item.nodeid) + items.sort(key=lambda item: item.location[0]) # Change order of tests based on @pytest.mark.order() marker def order(item: pytest.Item) -> int: From 8bedd9b3d6d57b493a93888f35177e58eb35fb0d Mon Sep 17 00:00:00 2001 From: "a.berezin" Date: Wed, 19 Mar 2025 14:33:25 +0300 Subject: [PATCH 354/363] [#367] Use full date during log Signed-off-by: a.berezin --- src/frostfs_testlib/shell/local_shell.py | 2 +- src/frostfs_testlib/shell/ssh_shell.py | 23 +++++++---------------- src/frostfs_testlib/utils/cli_utils.py | 2 +- 3 files changed, 9 insertions(+), 18 deletions(-) diff --git a/src/frostfs_testlib/shell/local_shell.py b/src/frostfs_testlib/shell/local_shell.py index 746070f..c0f3b06 100644 --- a/src/frostfs_testlib/shell/local_shell.py +++ b/src/frostfs_testlib/shell/local_shell.py @@ -141,6 +141,6 @@ class LocalShell(Shell): f"RETCODE: {result.return_code}\n\n" f"STDOUT:\n{result.stdout}\n" f"STDERR:\n{result.stderr}\n" - f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}" + f"Start / End / Elapsed\t {start_time} / {end_time} / {elapsed_time}" ) reporter.attach(command_attachment, "Command execution.txt") diff --git a/src/frostfs_testlib/shell/ssh_shell.py b/src/frostfs_testlib/shell/ssh_shell.py index e718b4d..3f13dca 100644 --- a/src/frostfs_testlib/shell/ssh_shell.py +++ b/src/frostfs_testlib/shell/ssh_shell.py @@ -68,8 +68,7 @@ class SshConnectionProvider: try: if creds.ssh_key_path: logger.info( - f"Trying to connect to host {host} as {creds.ssh_login} using SSH key " - f"{creds.ssh_key_path} (attempt {attempt})" + f"Trying to connect to host {host} as {creds.ssh_login} using SSH key " f"{creds.ssh_key_path} (attempt {attempt})" ) connection.connect( hostname=host, @@ -79,9 +78,7 @@ class SshConnectionProvider: timeout=self.CONNECTION_TIMEOUT, ) else: - logger.info( - f"Trying to connect to host {host} as {creds.ssh_login} using password " f"(attempt {attempt})" - ) + logger.info(f"Trying to connect to host {host} as {creds.ssh_login} using password " f"(attempt {attempt})") connection.connect( hostname=host, port=port, @@ -104,9 +101,7 @@ class SshConnectionProvider: connection.close() can_retry = attempt + 1 < self.SSH_CONNECTION_ATTEMPTS if can_retry: - logger.warn( - f"Can't connect to host {host}, will retry after {self.SSH_ATTEMPTS_INTERVAL}s. Error: {exc}" - ) + logger.warn(f"Can't connect to host {host}, will retry after {self.SSH_ATTEMPTS_INTERVAL}s. Error: {exc}") sleep(self.SSH_ATTEMPTS_INTERVAL) continue logger.exception(f"Can't connect to host {host}") @@ -139,7 +134,7 @@ def log_command(func): f"RC:\n {result.return_code}\n" f"STDOUT:\n{textwrap.indent(result.stdout, ' ')}\n" f"STDERR:\n{textwrap.indent(result.stderr, ' ')}\n" - f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}" + f"Start / End / Elapsed\t {start_time} / {end_time} / {elapsed_time}" ) if not options.no_log: @@ -185,13 +180,11 @@ class SSHShell(Shell): private_key_passphrase: Optional[str] = None, port: str = "22", command_inspectors: Optional[list[CommandInspector]] = None, - custom_environment: Optional[dict] = None + custom_environment: Optional[dict] = None, ) -> None: super().__init__() self.connection_provider = SshConnectionProvider() - self.connection_provider.store_creds( - host, SshCredentials(login, password, private_key_path, private_key_passphrase) - ) + self.connection_provider.store_creds(host, SshCredentials(login, password, private_key_path, private_key_passphrase)) self.host = host self.port = port @@ -220,9 +213,7 @@ class SSHShell(Shell): result = self._exec_non_interactive(command, options) if options.check and result.return_code != 0: - raise RuntimeError( - f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}\nStderr: {result.stderr}\n" - ) + raise RuntimeError(f"Command: {command}\nreturn code: {result.return_code}\nOutput: {result.stdout}\nStderr: {result.stderr}\n") return result @log_command diff --git a/src/frostfs_testlib/utils/cli_utils.py b/src/frostfs_testlib/utils/cli_utils.py index 0f9fef2..8787296 100644 --- a/src/frostfs_testlib/utils/cli_utils.py +++ b/src/frostfs_testlib/utils/cli_utils.py @@ -68,7 +68,7 @@ def _attach_allure_log(cmd: str, output: str, return_code: int, start_time: date f"COMMAND: '{cmd}'\n" f"OUTPUT:\n {output}\n" f"RC: {return_code}\n" - f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {end_time - start_time}" + f"Start / End / Elapsed\t {start_time} / {end_time} / {end_time - start_time}" ) with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'): reporter.attach(command_attachment, "Command execution") From 6bbc359ec9e653f74aa92346d0ee971e944af3cd Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Thu, 20 Mar 2025 09:05:50 +0300 Subject: [PATCH 355/363] [#368] Fixed function check metrics Signed-off-by: Ilyas Niyazov --- src/frostfs_testlib/steps/metrics.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/steps/metrics.py b/src/frostfs_testlib/steps/metrics.py index a9e545a..0d0950a 100644 --- a/src/frostfs_testlib/steps/metrics.py +++ b/src/frostfs_testlib/steps/metrics.py @@ -6,7 +6,7 @@ from frostfs_testlib.testing.test_control import wait_for_success @reporter.step("Check metrics result") -@wait_for_success(interval=10) +@wait_for_success(max_wait_time=300, interval=10) def check_metrics_counter( cluster_nodes: list[ClusterNode], operator: str = "==", @@ -19,7 +19,7 @@ def check_metrics_counter( counter_act += get_metrics_value(cluster_node, parse_from_command, **metrics_greps) assert eval( f"{counter_act} {operator} {counter_exp}" - ), f"Expected: {counter_exp} {operator} Actual: {counter_act} in nodes: {cluster_nodes}" + ), f"Actual: {counter_act} {operator} Expected: {counter_exp} in nodes: {cluster_nodes}" @reporter.step("Get metrics value from node: {node}") From c8eec119062001768568d1d0da3e93f7d761dfb8 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Thu, 20 Mar 2025 17:11:45 +0300 Subject: [PATCH 356/363] [#369] Set region in S3 STS client Signed-off-by: Yaroslava Lukoyanova --- src/frostfs_testlib/clients/s3/boto3_client.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/frostfs_testlib/clients/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py index 4157bd6..bceecdf 100644 --- a/src/frostfs_testlib/clients/s3/boto3_client.py +++ b/src/frostfs_testlib/clients/s3/boto3_client.py @@ -90,6 +90,7 @@ class Boto3ClientWrapper(S3ClientWrapper): aws_access_key_id=self.access_key_id, aws_secret_access_key=self.secret_access_key, endpoint_url=iam_endpoint, + region_name=self.region, verify=False, ) From c4ab14fce8acf26907132f91f0b3566edc853bf7 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Fri, 21 Mar 2025 20:03:06 +0300 Subject: [PATCH 357/363] [#370] Unify `delete_object_tagging` method in S3 clients Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/clients/s3/boto3_client.py | 2 +- src/frostfs_testlib/clients/s3/interfaces.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/clients/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py index bceecdf..dd13e6f 100644 --- a/src/frostfs_testlib/clients/s3/boto3_client.py +++ b/src/frostfs_testlib/clients/s3/boto3_client.py @@ -770,7 +770,7 @@ class Boto3ClientWrapper(S3ClientWrapper): return response.get("TagSet") @reporter.step("Delete object tagging") - def delete_object_tagging(self, bucket: str, key: str) -> None: + def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None: params = self._convert_to_s3_params(locals()) self._exec_request( self.boto3_client.delete_object_tagging, diff --git a/src/frostfs_testlib/clients/s3/interfaces.py b/src/frostfs_testlib/clients/s3/interfaces.py index d636182..b35d3bf 100644 --- a/src/frostfs_testlib/clients/s3/interfaces.py +++ b/src/frostfs_testlib/clients/s3/interfaces.py @@ -377,7 +377,7 @@ class S3ClientWrapper(HumanReadableABC): """Returns the tag-set of an object.""" @abstractmethod - def delete_object_tagging(self, bucket: str, key: str) -> None: + def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None: """Removes the entire tag set from the specified object.""" @abstractmethod From d38808a1f55e370d43e868e7551127dea6506998 Mon Sep 17 00:00:00 2001 From: Yaroslava Lukoyanova Date: Mon, 3 Feb 2025 12:44:21 +0300 Subject: [PATCH 358/363] [#354] Support of presigned url methods for S3 Signed-off-by: Yaroslava Lukoyanova --- .../clients/s3/aws_cli_client.py | 9 +++++++++ src/frostfs_testlib/clients/s3/boto3_client.py | 18 +++++++++++++++++- src/frostfs_testlib/clients/s3/interfaces.py | 4 ++++ src/frostfs_testlib/steps/http_gate.py | 4 ++++ 4 files changed, 34 insertions(+), 1 deletion(-) diff --git a/src/frostfs_testlib/clients/s3/aws_cli_client.py b/src/frostfs_testlib/clients/s3/aws_cli_client.py index a2e3fc7..c1dd6b6 100644 --- a/src/frostfs_testlib/clients/s3/aws_cli_client.py +++ b/src/frostfs_testlib/clients/s3/aws_cli_client.py @@ -959,6 +959,15 @@ class AwsCliClient(S3ClientWrapper): return json_output + @reporter.step("Create presign url for the object") + def create_presign_url(self, method: str, bucket: str, key: str, expires_in: Optional[int] = 3600) -> str: + # AWS CLI does not support method definition and world only in 'get_object' state by default + cmd = f"aws {self.common_flags} s3 presign s3://{bucket}/{key} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}" + if expires_in: + cmd += f" --expires-in {expires_in}" + response = self.local_shell.exec(cmd).stdout + return response.strip() + # IAM METHODS # # Some methods don't have checks because AWS is silent in some cases (delete, attach, etc.) diff --git a/src/frostfs_testlib/clients/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py index dd13e6f..0c4e8e4 100644 --- a/src/frostfs_testlib/clients/s3/boto3_client.py +++ b/src/frostfs_testlib/clients/s3/boto3_client.py @@ -48,7 +48,13 @@ class Boto3ClientWrapper(S3ClientWrapper): self.region = region self.session = boto3.Session() - self.config = Config(retries={"max_attempts": MAX_REQUEST_ATTEMPTS, "mode": RETRY_MODE}) + self.config = Config( + signature_version="s3v4", + retries={ + "max_attempts": MAX_REQUEST_ATTEMPTS, + "mode": RETRY_MODE, + }, + ) self.set_endpoint(s3gate_endpoint) @@ -813,6 +819,16 @@ class Boto3ClientWrapper(S3ClientWrapper): ) -> dict: raise NotImplementedError("Cp is not supported for boto3 client") + @reporter.step("Create presign url for the object") + def create_presign_url(self, method: str, bucket: str, key: str, expires_in: Optional[int] = 3600) -> str: + response = self._exec_request( + method=self.boto3_client.generate_presigned_url, + params={"ClientMethod": method, "Params": {"Bucket": bucket, "Key": key}, "ExpiresIn": expires_in}, + endpoint=self.s3gate_endpoint, + profile=self.profile, + ) + return response + # END OBJECT METHODS # # IAM METHODS # diff --git a/src/frostfs_testlib/clients/s3/interfaces.py b/src/frostfs_testlib/clients/s3/interfaces.py index b35d3bf..0d03a28 100644 --- a/src/frostfs_testlib/clients/s3/interfaces.py +++ b/src/frostfs_testlib/clients/s3/interfaces.py @@ -425,6 +425,10 @@ class S3ClientWrapper(HumanReadableABC): ) -> dict: """cp directory TODO: Add proper description""" + @abstractmethod + def create_presign_url(self, method: str, bucket: str, key: str, expires_in: Optional[int] = 3600) -> str: + """Creates presign URL""" + # END OF OBJECT METHODS # # IAM METHODS # diff --git a/src/frostfs_testlib/steps/http_gate.py b/src/frostfs_testlib/steps/http_gate.py index 51b0301..aa4abf2 100644 --- a/src/frostfs_testlib/steps/http_gate.py +++ b/src/frostfs_testlib/steps/http_gate.py @@ -33,6 +33,7 @@ def get_via_http_gate( oid: str, node: ClusterNode, request_path: Optional[str] = None, + presigned_url: Optional[str] = None, timeout: Optional[int] = 300, ): """ @@ -47,6 +48,9 @@ def get_via_http_gate( if request_path: request = f"{node.http_gate.get_endpoint()}{request_path}" + if presigned_url: + request = presigned_url + response = requests.get(request, stream=True, timeout=timeout, verify=False) if not response.ok: From 80226ee0a8c2e309394bc7de13f0dba794e4fad6 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Thu, 27 Mar 2025 15:25:24 +0300 Subject: [PATCH 359/363] [#371] Add IAM and STS clients to boto3-stubs Signed-off-by: Kirill Sosnovskikh --- pyproject.toml | 2 +- requirements.txt | 4 ++-- src/frostfs_testlib/clients/s3/boto3_client.py | 6 ++++-- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 2778f8a..d62f04b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,7 +28,7 @@ dependencies = [ "pytest==7.1.2", "tenacity==8.0.1", "boto3==1.35.30", - "boto3-stubs[essential]==1.35.30", + "boto3-stubs[s3,iam,sts]==1.35.30", ] requires-python = ">=3.10" diff --git a/requirements.txt b/requirements.txt index a0bcc11..56d9b83 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,7 @@ testrail-api==1.12.0 tenacity==8.0.1 pytest==7.1.2 boto3==1.35.30 -boto3-stubs[essential]==1.35.30 +boto3-stubs[s3,iam,sts]==1.35.30 pydantic==2.10.6 # Dev dependencies @@ -22,4 +22,4 @@ pylint==2.17.4 # Packaging dependencies build==0.8.0 setuptools==65.3.0 -twine==4.0.1 +twine==4.0.1 \ No newline at end of file diff --git a/src/frostfs_testlib/clients/s3/boto3_client.py b/src/frostfs_testlib/clients/s3/boto3_client.py index 0c4e8e4..ac4d55b 100644 --- a/src/frostfs_testlib/clients/s3/boto3_client.py +++ b/src/frostfs_testlib/clients/s3/boto3_client.py @@ -10,7 +10,9 @@ import boto3 import urllib3 from botocore.config import Config from botocore.exceptions import ClientError +from mypy_boto3_iam import IAMClient from mypy_boto3_s3 import S3Client +from mypy_boto3_sts import STSClient from frostfs_testlib import reporter from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict @@ -39,8 +41,8 @@ class Boto3ClientWrapper(S3ClientWrapper): self.boto3_client: S3Client = None self.iam_endpoint: str = "" - self.boto3_iam_client: S3Client = None - self.boto3_sts_client: S3Client = None + self.boto3_iam_client: IAMClient = None + self.boto3_sts_client: STSClient = None self.access_key_id = access_key_id self.secret_access_key = secret_access_key From aab4d4f657590dcb1be0231b477862192d51c33c Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 15 Apr 2025 12:26:35 +0300 Subject: [PATCH 360/363] [#373] Add step to httpClient for log write Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/clients/http/http_client.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/frostfs_testlib/clients/http/http_client.py b/src/frostfs_testlib/clients/http/http_client.py index aebd5ef..16d7707 100644 --- a/src/frostfs_testlib/clients/http/http_client.py +++ b/src/frostfs_testlib/clients/http/http_client.py @@ -15,14 +15,14 @@ LOGGING_CONFIG = { "handlers": {"default": {"class": "logging.StreamHandler", "formatter": "http", "stream": "ext://sys.stderr"}}, "formatters": { "http": { - "format": "%(levelname)s [%(asctime)s] %(name)s - %(message)s", + "format": "%(asctime)s [%(levelname)s] %(name)s - %(message)s", "datefmt": "%Y-%m-%d %H:%M:%S", } }, "loggers": { "httpx": { "handlers": ["default"], - "level": "DEBUG", + "level": "ERROR", }, "httpcore": { "handlers": ["default"], @@ -43,7 +43,7 @@ class HttpClient: response = client.request(method, url, **kwargs) self._attach_response(response, **kwargs) - logger.info(f"Response: {response.status_code} => {response.text}") + # logger.info(f"Response: {response.status_code} => {response.text}") if expected_status_code: assert ( @@ -131,6 +131,7 @@ class HttpClient: reporter.attach(report, "Requests Info") reporter.attach(curl_request, "CURL") + cls._write_log(curl_request, response_body, response.status_code) @classmethod def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str, files: dict) -> str: @@ -143,3 +144,9 @@ class HttpClient: # Option -k means no verify SSL return f"curl {url} -X {method} {headers}{data} -k" + + @classmethod + def _write_log(cls, curl: str, res_body: str, res_code: int) -> None: + if res_body: + curl += f"\nResponse: {res_code}\n{res_body}" + logger.info(f"{curl}") From 9ad620121e3871f9eab4e5afd3495197541a90a9 Mon Sep 17 00:00:00 2001 From: Ilyas Niyazov Date: Wed, 9 Apr 2025 16:15:46 +0300 Subject: [PATCH 361/363] [#372] Added decorator wait until stabilization metric values Signed-off-by: Ilyas Niyazov --- .../storage/dataclasses/metrics.py | 48 ++++++++++++++++++- 1 file changed, 46 insertions(+), 2 deletions(-) diff --git a/src/frostfs_testlib/storage/dataclasses/metrics.py b/src/frostfs_testlib/storage/dataclasses/metrics.py index 81e757c..8969015 100644 --- a/src/frostfs_testlib/storage/dataclasses/metrics.py +++ b/src/frostfs_testlib/storage/dataclasses/metrics.py @@ -1,3 +1,9 @@ +import time +from functools import wraps +from typing import Callable + +import pytest + from frostfs_testlib.hosting import Host from frostfs_testlib.shell.interfaces import CommandResult @@ -7,11 +13,11 @@ class Metrics: self.storage = StorageMetrics(host, metrics_endpoint) - class StorageMetrics: """ Class represents storage metrics in a cluster """ + def __init__(self, host: Host, metrics_endpoint: str) -> None: self.host = host self.metrics_endpoint = metrics_endpoint @@ -29,8 +35,46 @@ class StorageMetrics: additional_greps = " |grep ".join([grep_command for grep_command in greps.values()]) result = shell.exec(f"curl -s {self.metrics_endpoint} | grep {additional_greps}") return result - + def get_all_metrics(self) -> CommandResult: shell = self.host.get_shell() result = shell.exec(f"curl -s {self.metrics_endpoint}") return result + + +def wait_until_metric_result_is_stable( + relative_deviation: float = None, absolute_deviation: int = None, max_attempts: int = 10, sleep_interval: int = 30 +): + """ + A decorator function that repeatedly calls the decorated function until its result stabilizes + within a specified relative tolerance or until the maximum number of attempts is reached. + + This decorator is useful for scenarios where a function returns a metric or value that may fluctuate + over time, and you want to ensure that the result has stabilized before proceeding. + """ + + def decorator(func: Callable): + @wraps(func) + def wrapper(*args, **kwargs): + last_result = None + for _ in range(max_attempts): + # first function call + first_result = func(*args, **kwargs) + + # waiting before the second call + time.sleep(sleep_interval) + + # second function call + last_result = func(*args, **kwargs) + + # checking value stability + if first_result == pytest.approx(last_result, rel=relative_deviation, abs=absolute_deviation): + return last_result + + # if stability is not achieved, return the last value + if last_result is not None: + return last_result + + return wrapper + + return decorator From 517a7b932261a142b2a86b2687843d8fc9651ce0 Mon Sep 17 00:00:00 2001 From: Kirill Sosnovskikh Date: Mon, 28 Apr 2025 18:43:44 +0300 Subject: [PATCH 362/363] [#377] Update text for "subject not found" error Signed-off-by: Kirill Sosnovskikh --- src/frostfs_testlib/resources/error_patterns.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frostfs_testlib/resources/error_patterns.py b/src/frostfs_testlib/resources/error_patterns.py index 6c0cb14..15e2977 100644 --- a/src/frostfs_testlib/resources/error_patterns.py +++ b/src/frostfs_testlib/resources/error_patterns.py @@ -1,6 +1,6 @@ # Regex patterns of status codes of Container service CONTAINER_NOT_FOUND = "code = 3072.*message = container not found" -SUBJECT_NOT_FOUND = "code = 1024.*message = frostfs error: chain/client.*subject not found.*" +SUBJECT_NOT_FOUND = "code = 1024.*message =.*chain/client.*subject not found.*" # Regex patterns of status codes of Object service MALFORMED_REQUEST = "code = 1024.*message = malformed request" From c27fb5747bd010fcb0cce82b3d3daf5466838743 Mon Sep 17 00:00:00 2001 From: Dmitriy Zayakin Date: Tue, 29 Apr 2025 10:43:02 +0300 Subject: [PATCH 363/363] [#376] Added ape manager group command to grpc client Signed-off-by: Dmitriy Zayakin Signed-off-by: Dmitriy Zayakin --- src/frostfs_testlib/cli/frostfs_cli/cli.py | 1 + .../grpc_operations/client_wrappers.py | 1 + .../implementations/__init__.py | 1 + .../implementations/ape_manager.py | 79 +++++++++++++++++++ .../implementations/container.py | 11 +++ .../grpc_operations/interfaces/__init__.py | 1 + .../grpc_operations/interfaces/ape_manager.py | 48 +++++++++++ .../grpc_operations/interfaces/container.py | 4 + .../grpc_operations/interfaces_wrapper.py | 4 + 9 files changed, 150 insertions(+) create mode 100644 src/frostfs_testlib/storage/grpc_operations/implementations/ape_manager.py create mode 100644 src/frostfs_testlib/storage/grpc_operations/interfaces/ape_manager.py diff --git a/src/frostfs_testlib/cli/frostfs_cli/cli.py b/src/frostfs_testlib/cli/frostfs_cli/cli.py index d83b7ae..7874f18 100644 --- a/src/frostfs_testlib/cli/frostfs_cli/cli.py +++ b/src/frostfs_testlib/cli/frostfs_cli/cli.py @@ -29,6 +29,7 @@ class FrostfsCli: util: FrostfsCliUtil version: FrostfsCliVersion control: FrostfsCliControl + ape_manager: FrostfsCliApeManager def __init__(self, shell: Shell, frostfs_cli_exec_path: str, config_file: Optional[str] = None): self.accounting = FrostfsCliAccounting(shell, frostfs_cli_exec_path, config=config_file) diff --git a/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py b/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py index c1e3a31..d9f94b2 100644 --- a/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py +++ b/src/frostfs_testlib/storage/grpc_operations/client_wrappers.py @@ -8,6 +8,7 @@ class CliClientWrapper(interfaces_wrapper.GrpcClientWrapper): self.object: interfaces.ObjectInterface = implementations.ObjectOperations(self.cli) self.container: interfaces.ContainerInterface = implementations.ContainerOperations(self.cli) self.netmap: interfaces.NetmapInterface = implementations.NetmapOperations(self.cli) + self.ape_manager: interfaces.ApeManagerInterface = implementations.ApeManagerOperations(self.cli) class RpcClientWrapper(interfaces_wrapper.GrpcClientWrapper): diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py b/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py index 18e8ae5..df820fa 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/__init__.py @@ -1,3 +1,4 @@ +from .ape_manager import ApeManagerOperations from .chunks import ChunksOperations from .container import ContainerOperations from .netmap import NetmapOperations diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/ape_manager.py b/src/frostfs_testlib/storage/grpc_operations/implementations/ape_manager.py new file mode 100644 index 0000000..070d8a6 --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/ape_manager.py @@ -0,0 +1,79 @@ +from typing import Optional + +from frostfs_testlib import reporter +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli +from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT + + +class ApeManagerOperations: + def __init__(self, cli: FrostfsCli): + self.cli = cli + + @reporter.step("Add ape rule") + def add( + self, + rpc_endpoint: str, + chain_id: Optional[str] = None, + chain_id_hex: Optional[str] = None, + path: Optional[str] = None, + rule: Optional[str] | Optional[list[str]] = None, + target_name: Optional[str] = None, + target_type: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ): + return self.cli.ape_manager.add( + rpc_endpoint=rpc_endpoint, + chain_id=chain_id, + chain_id_hex=chain_id_hex, + path=path, + rule=rule, + target_name=target_name, + target_type=target_type, + wallet=wallet, + address=address, + timeout=timeout, + ) + + @reporter.step("Get list APE rules") + def list( + self, + rpc_endpoint: str, + target_name: Optional[str] = None, + target_type: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ): + return self.cli.ape_manager.list( + rpc_endpoint=rpc_endpoint, + target_name=target_name, + target_type=target_type, + wallet=wallet, + address=address, + timeout=timeout, + ) + + @reporter.step("Remove APE rule") + def remove( + self, + rpc_endpoint: str, + chain_id: Optional[str] = None, + chain_id_hex: Optional[str] = None, + target_name: Optional[str] = None, + target_type: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, + ): + return self.cli.ape_manager.remove( + rpc_endpoint=rpc_endpoint, + chain_id=chain_id, + chain_id_hex=chain_id_hex, + target_name=target_name, + target_type=target_type, + wallet=wallet, + address=address, + timeout=timeout, + ) diff --git a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py index 75af00c..afdf6cb 100644 --- a/src/frostfs_testlib/storage/grpc_operations/implementations/container.py +++ b/src/frostfs_testlib/storage/grpc_operations/implementations/container.py @@ -1,6 +1,7 @@ import json import logging import re +from time import sleep from typing import List, Optional, Union from frostfs_testlib import reporter @@ -301,6 +302,16 @@ class ContainerOperations(interfaces.ContainerInterface): resolver: BucketContainerResolver = resolver_cls() return resolver.resolve(node, name) + @reporter.step("Wait create container, with list") + def wait_creation(self, cid: str, endpoint: str, attempts: int = 15, sleep_interval: int = 1): + for _ in range(attempts): + containers = self.list(endpoint) + if cid in containers: + return + logger.info(f"There is no {cid} in {containers} yet; sleep {sleep_interval} and continue") + sleep(sleep_interval) + raise RuntimeError(f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting") + def _parse_cid(self, output: str) -> str: """ Parses container ID from a given CLI output. The input string we expect: diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py index 17b3e9c..379bbe0 100644 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/__init__.py @@ -1,3 +1,4 @@ +from .ape_manager import ApeManagerInterface from .chunks import ChunksInterface from .container import ContainerInterface from .netmap import NetmapInterface diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/ape_manager.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/ape_manager.py new file mode 100644 index 0000000..5b198bc --- /dev/null +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/ape_manager.py @@ -0,0 +1,48 @@ +from abc import ABC, abstractmethod +from typing import Optional + +from frostfs_testlib.shell.interfaces import CommandResult + + +class ApeManagerInterface(ABC): + @abstractmethod + def add( + self, + rpc_endpoint: str, + chain_id: Optional[str] = None, + chain_id_hex: Optional[str] = None, + path: Optional[str] = None, + rule: Optional[str] | Optional[list[str]] = None, + target_name: Optional[str] = None, + target_type: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + pass + + @abstractmethod + def list( + self, + rpc_endpoint: str, + target_name: Optional[str] = None, + target_type: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + pass + + @abstractmethod + def remove( + self, + rpc_endpoint: str, + chain_id: Optional[str] = None, + chain_id_hex: Optional[str] = None, + target_name: Optional[str] = None, + target_type: Optional[str] = None, + wallet: Optional[str] = None, + address: Optional[str] = None, + timeout: Optional[str] = None, + ) -> CommandResult: + pass diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py b/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py index d5e3eeb..397f7b2 100644 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces/container.py @@ -123,3 +123,7 @@ class ContainerInterface(ABC): ) -> List[ClusterNode]: """Show the nodes participating in the container in the current epoch.""" raise NotImplementedError("No implemethed method nodes") + + @abstractmethod + def wait_creation(self, cid: str, endpoint: str, attempts: Optional[str], sleep_interval: Optional[int]) -> None: + raise NotImplementedError("No implemented method wait_creation") diff --git a/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py b/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py index 6574012..5edc99f 100644 --- a/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py +++ b/src/frostfs_testlib/storage/grpc_operations/interfaces_wrapper.py @@ -1,10 +1,14 @@ from abc import ABC +from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli + from . import interfaces class GrpcClientWrapper(ABC): def __init__(self) -> None: + self.cli: FrostfsCli self.object: interfaces.ObjectInterface self.container: interfaces.ContainerInterface self.netmap: interfaces.NetmapInterface + self.ape_manager: interfaces.ApeManagerInterface