commit dadfd90dcd6387f71fb477532372a4b6dba5789d Author: alexvanin Date: Fri Jul 10 17:17:51 2020 +0300 Initial commit Initial public review release v0.10.0 diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..ea5f32935 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,8 @@ +.idea +.vscode +.git +docker-compose.yml +Dockerfile +temp +.dockerignore +docker \ No newline at end of file diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 000000000..b002a5dde --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +/**/*.pb.go -diff binary diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..690f5d9ef --- /dev/null +++ b/.gitignore @@ -0,0 +1,8 @@ +bin +temp +cmd/test +/plugins/ +/vendor/ + +testfile +.neofs-cli.yml diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 000000000..7fc9abf2c --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,136 @@ +# https://habr.com/company/roistat/blog/413175/ +# https://github.com/golangci/golangci-lint +linters-settings: + govet: + check-shadowing: false + golint: + # minimal confidence for issues, default is 0.8 + min-confidence: 0.8 + gofmt: + # simplify code: gofmt with `-s` option, true by default + simplify: true + gocyclo: + min-complexity: 30 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 2 + gosimple: + gocritic: + # Which checks should be enabled; can't be combined with 'disabled-checks'; + # See https://go-critic.github.io/overview#checks-overview + # To check which checks are enabled run `GL_DEBUG=gocritic golangci-lint run` + # By default list of stable checks is used. +# enabled-checks: +# - rangeValCopy + # Which checks should be disabled; can't be combined with 'enabled-checks'; default is empty + disabled-checks: + - regexpMust + # Enable multiple checks by tags, run `GL_DEBUG=gocritic golangci-lint` run to see all tags and checks. + # Empty list by default. See https://github.com/go-critic/go-critic#usage -> section "Tags". + enabled-tags: + - performance + + settings: # settings passed to gocritic + captLocal: # must be valid enabled check name + paramsOnly: true + rangeValCopy: + sizeThreshold: 32 +# depguard: +# list-type: blacklist +# include-go-root: false +# packages: +# - github.com/davecgh/go-spew/spew + lll: + # max line length, lines longer will be reported. Default is 120. + # '\t' is counted as 1 character by default, and can be changed with the tab-width option + line-length: 120 + # tab width in spaces. Default to 1. + tab-width: 1 + unused: + # treat code as a program (not a library) and report unused exported identifiers; default is false. + # XXX: if you enable this setting, unused will report a lot of false-positives in text editors: + # if it's called for subdir of a project it can't find funcs usages. All text editor integrations + # with golangci-lint call it on a directory with the changed file. + check-exported: false + unparam: + # Inspect exported functions, default is false. Set to true if no external program/library imports your code. + # XXX: if you enable this setting, unparam will report a lot of false-positives in text editors: + # if it's called for subdir of a project it can't find external interfaces. All text editor integrations + # with golangci-lint call it on a directory with the changed file. + check-exported: false + nakedret: + # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 + max-func-lines: 30 + + +linters: + enable-all: true + fast: false + disable: + - gochecknoglobals +# - maligned +# - prealloc +# disable-all: false +# presets: +# - bugs +# - unused + +# options for analysis running +run: + # default concurrency is a available CPU number +# concurrency: 8 + + # timeout for analysis, e.g. 30s, 5m, default is 1m +# deadline: 1m + + # exit code when at least one issue was found, default is 1 +# issues-exit-code: 1 + + # include test files or not, default is true +# tests: true + + # list of build tags, all linters use it. Default is empty list. +# build-tags: +# - mytag + + # which dirs to skip: they won't be analyzed; + # can use regexp here: generated.*, regexp is applied on full path; + # default value is empty list, but next dirs are always skipped independently + # from this option's value: + # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ +# skip-dirs: +# - src/external_libs +# - autogenerated_by_my_lib + + # which files to skip: they will be analyzed, but issues from them + # won't be reported. Default value is empty list, but there is + # no need to include all autogenerated files, we confidently recognize + # autogenerated files. If it's not please let us know. +# skip-files: +# - ".*\\.my\\.go$" +# - lib/bad.go + + # by default isn't set. If set we pass it to "go list -mod={option}". From "go help modules": + # If invoked with -mod=readonly, the go command is disallowed from the implicit + # automatic updating of go.mod described above. Instead, it fails when any changes + # to go.mod are needed. This setting is most useful to check that go.mod does + # not need updates, such as in a continuous integration and testing system. + # If invoked with -mod=vendor, the go command assumes that the vendor + # directory holds the correct copies of dependencies and ignores + # the dependency descriptions in go.mod. +# modules-download-mode: readonly|release|vendor + +# output configuration options +output: + # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" + format: tab + + # print lines of code with issue, default is true + print-issued-lines: true + + # print linter name in the end of issue text, default is true + print-linter-name: true \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 000000000..7e4a00da0 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,6 @@ +# Changelog +Changelog for NeoFS Node + +## [0.10.0] - 2020-07-10 + +First public review release. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..b11fe2a49 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,3 @@ +# Contributing + +We do not accept any contributions. As yet. diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000..82e568138 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,21 @@ +FROM golang:1.14-alpine as builder + +ARG BUILD=now +ARG VERSION=dev +ARG REPO=repository + +WORKDIR /src + +COPY . /src + +RUN apk add --update make bash +RUN make bin/neofs-node + +# Executable image +FROM scratch AS neofs-node + +WORKDIR / + +COPY --from=builder /src/bin/neofs-node /bin/neofs-node + +CMD ["neofs-node"] diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..f288702d2 --- /dev/null +++ b/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..48dcf385e --- /dev/null +++ b/Makefile @@ -0,0 +1,99 @@ +#!/usr/bin/make -f +SHELL = bash + +REPO ?= $(shell go list -m) +VERSION ?= "$(shell git describe --tags --dirty --always)" + +HUB_IMAGE ?= nspccdev/neofs +HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')" + +BIN = bin +DIRS= $(BIN) + +# List of binaries to build. May be automated. +CMDS = neofs-node +CMS = $(addprefix $(BIN)/, $(CMDS)) +BINS = $(addprefix $(BIN)/, $(CMDS)) + +.PHONY: help dep clean fmt + +# To build a specific binary, use it's name prfixed with bin/ as a target +# For example `make bin/neofs-node` will buils only Storage node binary +# Just `make` will +# Build all possible binaries +all: $(DIRS) $(BINS) + +$(BINS): $(DIRS) dep + @echo "⇒ Build $@" + GOGC=off \ + CGO_ENABLED=0 \ + go build -v -mod=vendor -trimpath \ + -ldflags "-X ${REPO}/misc.Version=$(VERSION) -X ${REPO}/misc.Build=${BUILD}" \ + -o $@ ./cmd/$(notdir $@) + +$(DIRS): + @echo "⇒ Ensure dir: $@" + @mkdir -p $@ + +# Pull go dependencies +dep: + @printf "⇒ Ensure vendor: " + @go mod tidy -v && echo OK || (echo fail && exit 2) + @printf "⇒ Download requirements: " + @go mod download && echo OK || (echo fail && exit 2) + @printf "⇒ Store vendor localy: " + @go mod vendor && echo OK || (echo fail && exit 2) + +# Regenerate proto files: +protoc: + @GOPRIVATE=github.com/nspcc-dev go mod tidy -v + @GOPRIVATE=github.com/nspcc-dev go mod vendor + # Install specific version for gogo-proto + @go list -f '{{.Path}}/...@{{.Version}}' -m github.com/gogo/protobuf | xargs go get -v + # Install specific version for protobuf lib + @go list -f '{{.Path}}/...@{{.Version}}' -m github.com/golang/protobuf | xargs go get -v + # Protoc generate + @for f in `find . -type f -name '*.proto' -not -path './vendor/*'`; do \ + echo "⇒ Processing $$f "; \ + protoc \ + --proto_path=.:./vendor:./vendor/github.com/nspcc-dev/neofs-api-go:/usr/local/include \ + --gofast_out=plugins=grpc,paths=source_relative:. $$f; \ + done + +# Build NeoFS Sorage Node docker image +image-storage: + @echo "⇒ Build NeoFS Sorage Node docker image " + @docker build \ + --build-arg REPO=$(REPO) \ + --build-arg VERSION=$(VERSION) \ + -f Dockerfile \ + -t $(HUB_IMAGE)-storage:$(HUB_TAG) . + +# Build all Docker images +images: image-storage + +# Reformat code +fmt: + @[ ! -z `which goimports` ] || (echo "Install goimports" && exit 2) + @for f in `find . -type f -name '*.go' -not -path './vendor/*' -not -name '*.pb.go' -prune`; do \ + echo "⇒ Processing $$f"; \ + goimports -w $$f; \ + done + +# Print version +version: + @echo $(VERSION) + +# Show this help prompt +help: + @echo ' Usage:' + @echo '' + @echo ' make ' + @echo '' + @echo ' Targets:' + @echo '' + @awk '/^#/{ comment = substr($$0,3) } comment && /^[a-zA-Z][a-zA-Z0-9_-]+ ?:/{ print " ", $$1, comment }' $(MAKEFILE_LIST) | column -t -s ':' | grep -v 'IGNORE' | sort | uniq + +clean: + rm -rf vendor + rm -rf $(BIN) diff --git a/cmd/neofs-node/defaults.go b/cmd/neofs-node/defaults.go new file mode 100644 index 000000000..4fe24b50e --- /dev/null +++ b/cmd/neofs-node/defaults.go @@ -0,0 +1,346 @@ +package main + +import ( + "time" + + "github.com/nspcc-dev/neo-go/pkg/config/netmode" + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/nspcc-dev/neofs-node/modules/morph" + "github.com/spf13/viper" +) + +func setDefaults(v *viper.Viper) { + // Logger section + { + v.SetDefault("logger.level", "debug") + v.SetDefault("logger.format", "console") + v.SetDefault("logger.trace_level", "fatal") + v.SetDefault("logger.no_disclaimer", false) // to disable app_name and app_version + + v.SetDefault("logger.sampling.initial", 1000) // todo: add description + v.SetDefault("logger.sampling.thereafter", 1000) // todo: add description + } + + // Transport section + { + v.SetDefault("transport.attempts_count", 5) + v.SetDefault("transport.attempts_ttl", "30s") + } + + // Peers section + { + v.SetDefault("peers.metrics_timeout", "5s") + v.SetDefault("peers.connections_ttl", "30s") + v.SetDefault("peers.connections_idle", "30s") + v.SetDefault("peers.keep_alive.ttl", "30s") + v.SetDefault("peers.keep_alive.ping", "100ms") + } + + // Muxer session + { + v.SetDefault("muxer.http.read_buffer_size", 0) + v.SetDefault("muxer.http.write_buffer_size", 0) + v.SetDefault("muxer.http.read_timeout", 0) + v.SetDefault("muxer.http.write_timeout", 0) + } + + // Node section + { + v.SetDefault("node.proto", "tcp") // tcp or udp + v.SetDefault("node.address", ":8080") + v.SetDefault("node.shutdown_ttl", "30s") + v.SetDefault("node.private_key", "keys/node_00.key") + + v.SetDefault("node.grpc.logging", true) + v.SetDefault("node.grpc.metrics", true) + v.SetDefault("node.grpc.billing", true) + + // Contains public keys, which can send requests to state.DumpConfig + // for now, in the future, should be replaced with ACL or something else. + v.SetDefault("node.rpc.owners", []string{ + // By default we add user.key + // TODO should be removed before public release: + // or add into default Dockerfile `NEOFS_NODE_RPC_OWNERS_0=` + "031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a", + }) + } + + // Storage section + { + storageTypes := []string{ + core.BlobStore.String(), + core.MetaStore.String(), + core.SpaceMetricsStore.String(), + } + + for i := range storageTypes { + v.SetDefault("storage."+storageTypes[i]+".bucket", "boltdb") + v.SetDefault("storage."+storageTypes[i]+".path", "./temp/storage/"+storageTypes[i]) + v.SetDefault("storage."+storageTypes[i]+".perm", 0777) + // v.SetDefault("storage."+storageTypes[i]+".no_grow_sync", false) + // v.SetDefault("storage."+storageTypes[i]+".lock_timeout", "30s") + } + } + + // Object section + { + v.SetDefault("object.max_processing_size", 100) // size in MB, use 0 to remove restriction + v.SetDefault("object.workers_count", 5) + v.SetDefault("object.assembly", true) + v.SetDefault("object.window_size", 3) + + v.SetDefault("object.transformers.payload_limiter.max_payload_size", 5000) // size in KB + + // algorithm used for salt applying in range hash, for now only xor is available + v.SetDefault("object.salitor", "xor") + + // set true to check container ACL rules + v.SetDefault("object.check_acl", true) + + v.SetDefault("object.dial_timeout", "500ms") + rpcs := []string{"put", "get", "delete", "head", "search", "range", "range_hash"} + for i := range rpcs { + v.SetDefault("object."+rpcs[i]+".timeout", "5s") + v.SetDefault("object."+rpcs[i]+".log_errs", false) + } + } + + // Replication section + { + v.SetDefault("replication.manager.pool_size", 100) + v.SetDefault("replication.manager.pool_expansion_rate", 0.1) + v.SetDefault("replication.manager.read_pool_interval", "500ms") + v.SetDefault("replication.manager.push_task_timeout", "1s") + v.SetDefault("replication.manager.placement_honorer_enabled", true) + v.SetDefault("replication.manager.capacities.replicate", 1) + v.SetDefault("replication.manager.capacities.restore", 1) + v.SetDefault("replication.manager.capacities.garbage", 1) + + v.SetDefault("replication.placement_honorer.chan_capacity", 1) + v.SetDefault("replication.placement_honorer.result_timeout", "1s") + v.SetDefault("replication.placement_honorer.timeouts.put", "5s") + v.SetDefault("replication.placement_honorer.timeouts.get", "5s") + + v.SetDefault("replication.location_detector.chan_capacity", 1) + v.SetDefault("replication.location_detector.result_timeout", "1s") + v.SetDefault("replication.location_detector.timeouts.search", "5s") + + v.SetDefault("replication.storage_validator.chan_capacity", 1) + v.SetDefault("replication.storage_validator.result_timeout", "1s") + v.SetDefault("replication.storage_validator.salt_size", 64) // size in bytes + v.SetDefault("replication.storage_validator.max_payload_range_size", 64) // size in bytes + v.SetDefault("replication.storage_validator.payload_range_count", 3) + v.SetDefault("replication.storage_validator.salitor", "xor") + v.SetDefault("replication.storage_validator.timeouts.get", "5s") + v.SetDefault("replication.storage_validator.timeouts.head", "5s") + v.SetDefault("replication.storage_validator.timeouts.range_hash", "5s") + + v.SetDefault("replication.replicator.chan_capacity", 1) + v.SetDefault("replication.replicator.result_timeout", "1s") + v.SetDefault("replication.replicator.timeouts.put", "5s") + + v.SetDefault("replication.restorer.chan_capacity", 1) + v.SetDefault("replication.restorer.result_timeout", "1s") + v.SetDefault("replication.restorer.timeouts.get", "5s") + v.SetDefault("replication.restorer.timeouts.head", "5s") + } + + // PPROF section + { + v.SetDefault("pprof.enabled", true) + v.SetDefault("pprof.address", ":6060") + v.SetDefault("pprof.shutdown_ttl", "10s") + // v.SetDefault("pprof.read_timeout", "10s") + // v.SetDefault("pprof.read_header_timeout", "10s") + // v.SetDefault("pprof.write_timeout", "10s") + // v.SetDefault("pprof.idle_timeout", "10s") + // v.SetDefault("pprof.max_header_bytes", 1024) + } + + // Metrics section + { + v.SetDefault("metrics.enabled", true) + v.SetDefault("metrics.address", ":8090") + v.SetDefault("metrics.shutdown_ttl", "10s") + // v.SetDefault("metrics.read_header_timeout", "10s") + // v.SetDefault("metrics.write_timeout", "10s") + // v.SetDefault("metrics.idle_timeout", "10s") + // v.SetDefault("metrics.max_header_bytes", 1024) + } + + // Workers section + { + workers := []string{ + "peers", + "boot", + "replicator", + "metrics", + "event_listener", + } + + for i := range workers { + v.SetDefault("workers."+workers[i]+".immediately", true) + v.SetDefault("workers."+workers[i]+".disabled", false) + // v.SetDefault("workers."+workers[i]+".timer", "5s") // run worker every 5sec and reset timer after job + // v.SetDefault("workers."+workers[i]+".ticker", "5s") // run worker every 5sec + } + } + + // Morph section + { + + // Endpoint + v.SetDefault( + morph.EndpointOptPath(), + "http://morph_chain.localtest.nspcc.ru:30333", + ) + + // Dial timeout + v.SetDefault( + morph.DialTimeoutOptPath(), + 5*time.Second, + ) + + v.SetDefault( + morph.MagicNumberOptPath(), + uint32(netmode.PrivNet), + ) + + { // Event listener + // Endpoint + v.SetDefault( + morph.ListenerEndpointOptPath(), + "ws://morph_chain.localtest.nspcc.ru:30333/ws", + ) + + // Dial timeout + v.SetDefault( + morph.ListenerDialTimeoutOptPath(), + 5*time.Second, + ) + } + + { // Common parameters + for _, name := range morph.ContractNames { + // Script hash + v.SetDefault( + morph.ScriptHashOptPath(name), + "c77ecae9773ad0c619ad59f7f2dd6f585ddc2e70", // LE + ) + + // Invocation fee + v.SetDefault( + morph.InvocationFeeOptPath(name), + 0, + ) + } + } + + { // Container + // Set EACL method name + v.SetDefault( + morph.ContainerContractSetEACLOptPath(), + "SetEACL", + ) + + // Get EACL method name + v.SetDefault( + morph.ContainerContractEACLOptPath(), + "EACL", + ) + + // Put method name + v.SetDefault( + morph.ContainerContractPutOptPath(), + "Put", + ) + + // Get method name + v.SetDefault( + morph.ContainerContractGetOptPath(), + "Get", + ) + + // Delete method name + v.SetDefault( + morph.ContainerContractDelOptPath(), + "Delete", + ) + + // List method name + v.SetDefault( + morph.ContainerContractListOptPath(), + "List", + ) + } + + { // Reputation + // Put method name + v.SetDefault( + morph.ReputationContractPutOptPath(), + "Put", + ) + + // List method name + v.SetDefault( + morph.ReputationContractListOptPath(), + "List", + ) + } + + { // Netmap + // AddPeer method name + v.SetDefault( + morph.NetmapContractAddPeerOptPath(), + "AddPeer", + ) + + // New epoch method name + v.SetDefault( + morph.NetmapContractNewEpochOptPath(), + "NewEpoch", + ) + + // Netmap method name + v.SetDefault( + morph.NetmapContractNetmapOptPath(), + "Netmap", + ) + + // Update state method name + v.SetDefault( + morph.NetmapContractUpdateStateOptPath(), + "UpdateState", + ) + + // IR list method name + v.SetDefault( + morph.NetmapContractIRListOptPath(), + "InnerRingList", + ) + + // New epoch event type + v.SetDefault( + morph.ContractEventOptPath( + morph.NetmapContractName, + morph.NewEpochEventType, + ), + "NewEpoch", + ) + } + + { // Balance + // balanceOf method name + v.SetDefault( + morph.BalanceContractBalanceOfOptPath(), + "balanceOf", + ) + + // decimals method name + v.SetDefault( + morph.BalanceContractDecimalsOfOptPath(), + "decimals", + ) + } + } +} diff --git a/cmd/neofs-node/main.go b/cmd/neofs-node/main.go new file mode 100644 index 000000000..851f800c6 --- /dev/null +++ b/cmd/neofs-node/main.go @@ -0,0 +1,146 @@ +package main + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "flag" + "os" + "time" + + "github.com/nspcc-dev/neofs-api-go/service" + state2 "github.com/nspcc-dev/neofs-api-go/state" + crypto "github.com/nspcc-dev/neofs-crypto" + "github.com/nspcc-dev/neofs-node/lib/fix" + "github.com/nspcc-dev/neofs-node/lib/fix/config" + "github.com/nspcc-dev/neofs-node/lib/fix/web" + "github.com/nspcc-dev/neofs-node/lib/fix/worker" + "github.com/nspcc-dev/neofs-node/lib/muxer" + "github.com/nspcc-dev/neofs-node/misc" + "github.com/nspcc-dev/neofs-node/modules/node" + "github.com/nspcc-dev/neofs-node/services/public/state" + "github.com/pkg/errors" + "github.com/spf13/viper" + "go.uber.org/dig" + "go.uber.org/zap" + "google.golang.org/grpc" +) + +type params struct { + dig.In + + Debug web.Profiler `optional:"true"` + Metric web.Metrics `optional:"true"` + Worker worker.Workers `optional:"true"` + Muxer muxer.Mux + Logger *zap.Logger +} + +var ( + healthCheck bool + configFile string +) + +func runner(ctx context.Context, p params) error { + // create combined service, that would start/stop all + svc := fix.NewServices(p.Debug, p.Metric, p.Muxer, p.Worker) + + p.Logger.Info("start services") + svc.Start(ctx) + + <-ctx.Done() + + p.Logger.Info("stop services") + svc.Stop() + + return nil +} + +func check(err error) { + if err != nil { + panic(err) + } +} + +// FIXME: this is a copypaste from node settings constructor +func keyFromCfg(v *viper.Viper) (*ecdsa.PrivateKey, error) { + switch key := v.GetString("node.private_key"); key { + case "": + return nil, errors.New("`node.private_key` could not be empty") + case "generated": + return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + default: + return crypto.LoadPrivateKey(key) + } +} + +func runHealthCheck() { + if !healthCheck { + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + cfg, err := config.NewConfig(config.Params{ + File: configFile, + Prefix: misc.Prefix, + Name: misc.NodeName, + Version: misc.Version, + + AppDefaults: setDefaults, + }) + check(err) + + addr := cfg.GetString("node.address") + + key, err := keyFromCfg(cfg) + if err != nil { + check(err) + } + + con, err := grpc.DialContext(ctx, addr, + // TODO: we must provide grpc.WithInsecure() or set credentials + grpc.WithInsecure()) + check(err) + + req := new(state.HealthRequest) + req.SetTTL(service.NonForwardingTTL) + if err := service.SignRequestData(key, req); err != nil { + check(err) + } + + res, err := state2.NewStatusClient(con). + HealthCheck(ctx, req) + check(errors.Wrapf(err, "address: %q", addr)) + + var exitCode int + + if !res.Healthy { + exitCode = 2 + } + _, _ = os.Stdout.Write([]byte(res.Status + "\n")) + os.Exit(exitCode) +} + +func main() { + flag.BoolVar(&healthCheck, "health", healthCheck, "run health-check") + + // todo: if configFile is empty, we can check './config.yml' manually + flag.StringVar(&configFile, "config", configFile, "use config.yml file") + flag.Parse() + + runHealthCheck() + + fix.New(&fix.Settings{ + File: configFile, + Name: misc.NodeName, + Prefix: misc.Prefix, + Runner: runner, + Build: misc.Build, + Version: misc.Version, + + AppDefaults: setDefaults, + }, node.Module).RunAndCatch() +} diff --git a/go.mod b/go.mod new file mode 100644 index 000000000..be64ab5ed --- /dev/null +++ b/go.mod @@ -0,0 +1,48 @@ +module github.com/nspcc-dev/neofs-node + +go 1.14 + +require ( + bou.ke/monkey v1.0.2 + github.com/cenk/backoff v2.2.1+incompatible // indirect + github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a // indirect + github.com/fasthttp/router v1.0.2 + github.com/gogo/protobuf v1.3.1 + github.com/golang/protobuf v1.4.2 + github.com/google/uuid v1.1.1 + github.com/grpc-ecosystem/go-grpc-middleware v1.2.0 + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 + github.com/mr-tron/base58 v1.1.3 + github.com/multiformats/go-multiaddr v0.2.0 + github.com/multiformats/go-multiaddr-net v0.1.2 // v0.1.1 => v0.1.2 + github.com/multiformats/go-multihash v0.0.13 + github.com/nspcc-dev/hrw v1.0.9 + github.com/nspcc-dev/neo-go v0.90.0-pre.0.20200708064050-cf1e5243b90b + github.com/nspcc-dev/neofs-api-go v1.2.0 + github.com/nspcc-dev/neofs-crypto v0.3.0 + github.com/nspcc-dev/netmap v1.7.0 + github.com/panjf2000/ants/v2 v2.3.0 + github.com/peterbourgon/g2s v0.0.0-20170223122336-d4e7ad98afea // indirect + github.com/pkg/errors v0.9.1 + github.com/prometheus/client_golang v1.6.0 + github.com/rubyist/circuitbreaker v2.2.1+incompatible + github.com/soheilhy/cmux v0.1.4 + github.com/spaolacci/murmur3 v1.1.0 + github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/viper v1.7.0 + github.com/stretchr/testify v1.5.1 + github.com/valyala/fasthttp v1.9.0 + go.etcd.io/bbolt v1.3.4 + go.uber.org/atomic v1.5.1 + go.uber.org/dig v1.8.0 + go.uber.org/multierr v1.4.0 // indirect + go.uber.org/zap v1.13.0 + golang.org/x/crypto v0.0.0-20200117160349-530e935923ad // indirect + golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f // indirect + golang.org/x/tools v0.0.0-20200123022218-593de606220b // indirect + google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a + google.golang.org/grpc v1.29.1 +) + +// Used for debug reasons +// replace github.com/nspcc-dev/neofs-api-go => ../neofs-api-go diff --git a/go.sum b/go.sum new file mode 100644 index 000000000..a397a2427 --- /dev/null +++ b/go.sum @@ -0,0 +1,684 @@ +bou.ke/monkey v1.0.2 h1:kWcnsrCNUatbxncxR/ThdYqbytgOIArtYWqcQLQzKLI= +bou.ke/monkey v1.0.2/go.mod h1:OqickVX3tNx6t33n1xvtTtu85YN5s6cKwVug+oHMaIA= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/CityOfZion/neo-go v0.62.1-pre.0.20191114145240-e740fbe708f8 h1:OY7gLA7/h7fTjiAgmwpZoHdn6Wvfy64jeumBTVytXYc= +github.com/CityOfZion/neo-go v0.62.1-pre.0.20191114145240-e740fbe708f8/go.mod h1:MJCkWUBhi9pn/CrYO1Q3P687y2KeahrOPS9BD9LDGb0= +github.com/CityOfZion/neo-go v0.70.1-pre.0.20191209120015-fccb0085941e/go.mod h1:0enZl0az8xA6PVkwzEOwPWVJGqlt/GO4hA4kmQ5Xzig= +github.com/CityOfZion/neo-go v0.70.1-pre.0.20191212173117-32ac01130d4c h1:5larjPLQVyPPEqCV0GOf4hzbCYaBZSwXH4alfiluJqE= +github.com/CityOfZion/neo-go v0.70.1-pre.0.20191212173117-32ac01130d4c/go.mod h1:JtlHfeqLywZLswKIKFnAp+yzezY4Dji9qlfQKB2OD/I= +github.com/CityOfZion/neo-go v0.71.1-pre.0.20200129171427-f773ec69fb84 h1:gcTXk9aO+PhHudJNPFJ9H4RmKjdzz40Tvv2NE1BwRZ0= +github.com/CityOfZion/neo-go v0.71.1-pre.0.20200129171427-f773ec69fb84/go.mod h1:FLI526IrRWHmcsO+mHsCbj64pJZhwQFTLJZu+A4PGOA= +github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= +github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/Workiva/go-datastructures v1.0.50 h1:slDmfW6KCHcC7U+LP3DDBbm4fqTwZGn1beOFPfGaLvo= +github.com/Workiva/go-datastructures v1.0.50/go.mod h1:Z+F2Rca0qCsVYDS8z7bAGm8f3UkzuWYS/oBZz5a7VVA= +github.com/abiosoft/ishell v2.0.0+incompatible/go.mod h1:HQR9AqF2R3P4XXpMpI0NAzgHf/aS6+zVXRj14cVk9qg= +github.com/abiosoft/readline v0.0.0-20180607040430-155bce2042db h1:CjPUSXOiYptLbTdr1RceuZgSFDQ7U15ITERUGrUORx8= +github.com/abiosoft/readline v0.0.0-20180607040430-155bce2042db/go.mod h1:rB3B4rKii8V21ydCbIzH5hZiCQE7f5E9SzUb/ZZx530= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4gioE62mJzwPIB8+Tee4RNCL9ulrY= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 h1:45bxf7AZMwWcqkLzDAQugVEwedisr5nRJ1r+7LYnv0U= +github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= +github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI= +github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/awalterschulze/gographviz v0.0.0-20181013152038-b2885df04310 h1:t+qxRrRtwNiUYA+Xh2jSXhoG2grnMCMKX4Fg6lx9X1U= +github.com/awalterschulze/gographviz v0.0.0-20181013152038-b2885df04310/go.mod h1:GEV5wmg4YquNw7v1kkyoX9etIk8yVmXj+AkDHuuETHs= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/cenk/backoff v2.2.1+incompatible h1:djdFT7f4gF2ttuzRKPbMOWgZajgesItGLwG5FTQKmmE= +github.com/cenk/backoff v2.2.1+incompatible/go.mod h1:7FtoeaSnHoZnmZzz47cM35Y9nSW7tNyaidugnHTaFDE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.0 h1:yTUvW7Vhb89inJ+8irsUqiWjh8iT6sQPZiQzI6ReGkA= +github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= +github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgraph-io/badger/v2 v2.0.3 h1:inzdf6VF/NZ+tJ8RwwYMjJMvsOALTHYdozn0qSl6XJI= +github.com/dgraph-io/badger/v2 v2.0.3/go.mod h1:3KY8+bsP8wI0OEnQJAKpd4wIJW/Mm32yw2j/9FUVnIM= +github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3 h1:MQLRM35Pp0yAyBYksjbj1nZI/w6eyRY/mWoM1sFf4kU= +github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM= +github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= +github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= +github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= +github.com/fasthttp/router v1.0.2 h1:rdYdcAmwOLqWuFgc4afa409SYmuw4t0A66K5Ib+GT3I= +github.com/fasthttp/router v1.0.2/go.mod h1:Myk/ofrwtfiLSCIfbE44+e+PyP3mR6JhZg3AYzqwJI0= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BMXYYRWTLOJKlh+lOBt6nUQgXAfB7oVIQt5cNreqSLI= +github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:rZfgFAXFS/z/lEd6LJmf9HVZ1LkgYiHx5pHhV5DR16M= +github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-redis/redis v6.10.2+incompatible h1:SLbqrO/Ik1nhXA5/cbEs1P5MUBo1Qq4ihlNfGnnipPw= +github.com/go-redis/redis v6.10.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-yaml/yaml v2.1.0+incompatible h1:RYi2hDdss1u4YE7GwixGzWwVo47T8UQwnTLB6vQiq+o= +github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.0 h1:G8O7TerXerS4F6sx9OV7/nRfJdnXgHZu/S/7F2SN+UE= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0= +github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0 h1:0IKlLyQ3Hs9nDaiK5cSHAGmcQEIC8l2Ts1u6x5Dfrqg= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.8.2 h1:Bx0qjetmNjdFXASH02NSAREKpiaDwkO1DRZ3dV2KCcs= +github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= +github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9 h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771 h1:MHkK1uRtFbVqvAgvWxafZe54+5uBxLluGylDiKgdhwo= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mr-tron/base58 v1.1.2 h1:ZEw4I2EgPKDJ2iEw0cNmLB3ROrEmkOtXIkaG7wZg+78= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-multiaddr v0.2.0 h1:lR52sFwcTCuQb6bTfnXF6zA2XfyYvyd+5a9qECv/J90= +github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= +github.com/multiformats/go-multiaddr-net v0.1.2 h1:P7zcBH9FRETdPkDrylcXVjQLQ2t1JQtNItZULWNWgeg= +github.com/multiformats/go-multiaddr-net v0.1.2/go.mod h1:QsWt3XK/3hwvNxZJp92iMQKME1qHfpYmyIjFVsSOY6Y= +github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-varint v0.0.1 h1:TR/0rdQtnNxuN2IhiB639xC3tWM4IUi7DkTBVTdGW/M= +github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nspcc-dev/dbft v0.0.0-20191205084618-dacb1a30c254/go.mod h1:w1Ln2aT+dBlPhLnuZhBV+DfPEdS2CHWWLp5JTScY3bw= +github.com/nspcc-dev/dbft v0.0.0-20191209120240-0d6b7568d9ae/go.mod h1:3FjXOoHmA51EGfb5GS/HOv7VdmngNRTssSeQ729dvGY= +github.com/nspcc-dev/dbft v0.0.0-20200117124306-478e5cfbf03a h1:ajvxgEe9qY4vvoSmrADqdDx7hReodKTnT2IXN++qZG8= +github.com/nspcc-dev/dbft v0.0.0-20200117124306-478e5cfbf03a/go.mod h1:/YFK+XOxxg0Bfm6P92lY5eDSLYfp06XOdL8KAVgXjVk= +github.com/nspcc-dev/dbft v0.0.0-20200219114139-199d286ed6c1 h1:yEx9WznS+rjE0jl0dLujCxuZSIb+UTjF+005TJu/nNI= +github.com/nspcc-dev/dbft v0.0.0-20200219114139-199d286ed6c1/go.mod h1:O0qtn62prQSqizzoagHmuuKoz8QMkU3SzBoKdEvm3aQ= +github.com/nspcc-dev/dbft v0.0.0-20200623100921-5a182c20965e h1:QOT9slflIkEKb5wY0ZUC0dCmCgoqGlhOAh9+xWMIxfg= +github.com/nspcc-dev/dbft v0.0.0-20200623100921-5a182c20965e/go.mod h1:1FYQXSbb6/9HQIkoF8XO7W/S8N7AZRkBsgwbcXRvk0E= +github.com/nspcc-dev/hrw v1.0.8 h1:vwRuJXZXgkMvf473vFzeWGCfY1WBVeSHAEHvR4u3/Cg= +github.com/nspcc-dev/hrw v1.0.8/go.mod h1:l/W2vx83vMQo6aStyx2AuZrJ+07lGv2JQGlVkPG06MU= +github.com/nspcc-dev/hrw v1.0.9 h1:17VcAuTtrstmFppBjfRiia4K2wA/ukXZhLFS8Y8rz5Y= +github.com/nspcc-dev/hrw v1.0.9/go.mod h1:l/W2vx83vMQo6aStyx2AuZrJ+07lGv2JQGlVkPG06MU= +github.com/nspcc-dev/neo-go v0.73.1-pre.0.20200303142215-f5a1b928ce09/go.mod h1:pPYwPZ2ks+uMnlRLUyXOpLieaDQSEaf4NM3zHVbRjmg= +github.com/nspcc-dev/neo-go v0.90.0-pre.0.20200708064050-cf1e5243b90b h1:MIEMqbYh/jI4RYxfGFtFSjtEmGqrpzhv8Qcz6uGbSFY= +github.com/nspcc-dev/neo-go v0.90.0-pre.0.20200708064050-cf1e5243b90b/go.mod h1:Y27fkOIYUVt2yAoYkb833F45/q6pdLRdeAZKawHcpfE= +github.com/nspcc-dev/neofs-api-go v1.2.0 h1:8vovd8hvnoWS4qkSa6rhyMFLFvjLtNKar5vYRodf+y4= +github.com/nspcc-dev/neofs-api-go v1.2.0/go.mod h1:2tf31g2Ns/Z2ev5d8LZ/9f1VHIeY5LHpDbq4EsDhYM0= +github.com/nspcc-dev/neofs-crypto v0.2.0/go.mod h1:F/96fUzPM3wR+UGsPi3faVNmFlA9KAEAUQR7dMxZmNA= +github.com/nspcc-dev/neofs-crypto v0.2.3 h1:aca3X2aly92ENRbFK+kH6Hd+J9EQ4Eu6XMVoITSIKtc= +github.com/nspcc-dev/neofs-crypto v0.2.3/go.mod h1:8w16GEJbH6791ktVqHN9YRNH3s9BEEKYxGhlFnp0cDw= +github.com/nspcc-dev/neofs-crypto v0.3.0 h1:zlr3pgoxuzrmGCxc5W8dGVfA9Rro8diFvVnBg0L4ifM= +github.com/nspcc-dev/neofs-crypto v0.3.0/go.mod h1:8w16GEJbH6791ktVqHN9YRNH3s9BEEKYxGhlFnp0cDw= +github.com/nspcc-dev/netmap v1.7.0 h1:ak64xn/gPdgYw4tsqSSF7kAGQGbEpeuJEF3XwBX4L9Y= +github.com/nspcc-dev/netmap v1.7.0/go.mod h1:mhV3UOg9ljQmu0teQShD6+JYX09XY5gu2I4hIByCH9M= +github.com/nspcc-dev/rfc6979 v0.1.0 h1:Lwg7esRRoyK1Up/IN1vAef1EmvrBeMHeeEkek2fAJ6c= +github.com/nspcc-dev/rfc6979 v0.1.0/go.mod h1:exhIh1PdpDC5vQmyEsGvc4YDM/lyQp/452QxGq/UEso= +github.com/nspcc-dev/rfc6979 v0.2.0 h1:3e1WNxrN60/6N0DW7+UYisLeZJyfqZTNOjeV/toYvOE= +github.com/nspcc-dev/rfc6979 v0.2.0/go.mod h1:exhIh1PdpDC5vQmyEsGvc4YDM/lyQp/452QxGq/UEso= +github.com/nspcc-dev/tzhash v1.4.0 h1:RVIR+mxOBHl58CE99+DXtE31ylD5PEkZSoWqoj4fVjg= +github.com/nspcc-dev/tzhash v1.4.0/go.mod h1:Z8gp/VZbyWgPhaMp/KTmeoW5UTynp/N60g0jTtSzBws= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3 h1:OoxbjfXVZyod1fmWYhI7SEyaD8B00ynP3T+D5GiyHOY= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/panjf2000/ants/v2 v2.3.0 h1:+l+41IiyMmpcK+YhWs2dn3tgL6cg4cvJzup1mBGmU00= +github.com/panjf2000/ants/v2 v2.3.0/go.mod h1:LtwNaBX6OeF5qRtQlaeGndalVwJlS2ueur7uwoAHbPA= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/peterbourgon/g2s v0.0.0-20170223122336-d4e7ad98afea h1:sKwxy1H95npauwu8vtF95vG/syrL0p8fSZo/XlDg5gk= +github.com/peterbourgon/g2s v0.0.0-20170223122336-d4e7ad98afea/go.mod h1:1VcHEd3ro4QMoHfiNl/j7Jkln9+KQuorp0PItHMJYNg= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3 h1:9iH4JKXLzFbOAdtqv/a+j8aewx2Y8lAjAydhbaScPF8= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.2.1 h1:JnMpQc6ppsNgw9QPAGF6Dod479itz7lvlsMzzNayLOI= +github.com/prometheus/client_golang v1.2.1 h1:JnMpQc6ppsNgw9QPAGF6Dod479itz7lvlsMzzNayLOI= +github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= +github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= +github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= +github.com/prometheus/client_golang v1.6.0 h1:YVPodQOcK15POxhgARIvnDRVpLcuK8mglnMrWfyrw6A= +github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0 h1:7etb9YClo3a6HjLzfl6rIQaU+FDfi0VSX39io3aQ+DM= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= +github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084 h1:sofwID9zm4tzrgykg80hfFph1mryUeLRsUfoocVVmRY= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8= +github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI= +github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rubyist/circuitbreaker v2.2.1+incompatible h1:KUKd/pV8Geg77+8LNDwdow6rVCAYOp8+kHUyFvL6Mhk= +github.com/rubyist/circuitbreaker v2.2.1+incompatible/go.mod h1:Ycs3JgJADPuzJDwffe12k6BZT8hxVi6lFK+gWYJLN4A= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/savsgio/gotils v0.0.0-20200319105752-a9cc718f6a3f h1:XfUnevLK4O22at3R77FlyQHKwlQs75LELdsH2wRX2KQ= +github.com/savsgio/gotils v0.0.0-20200319105752-a9cc718f6a3f/go.mod h1:lHhJedqxCoHN+zMtwGNTXWmF0u9Jt363FYRhV6g0CdY= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/syndtr/goleveldb v0.0.0-20180307113352-169b1b37be73 h1:I2drr5K0tykBofr74ZEGliE/Hf6fNkEbcPyFvsy7wZk= +github.com/syndtr/goleveldb v0.0.0-20180307113352-169b1b37be73/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.9.0 h1:hNpmUdy/+ZXYpGy0OBfm7K0UQTzb73W0T0U4iJIVrMw= +github.com/valyala/fasthttp v1.9.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/gopher-lua v0.0.0-20190514113301-1cd887cd7036/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ= +github.com/yuin/gopher-lua v0.0.0-20191128022950-c6266f4fe8d7 h1:Y17pEjKgx2X0A69WQPGa8hx/Myzu+4NdUxlkZpbAYio= +github.com/yuin/gopher-lua v0.0.0-20191128022950-c6266f4fe8d7/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg= +go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.5.1 h1:rsqfU5vBkVknbhUGbAUwQKR2H4ItV8tjJ+6kJX4cxHM= +go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/dig v1.8.0 h1:1rR6hnL/bu1EVcjnRDN5kx1vbIjEJDTGhSQ2B3ddpcI= +go.uber.org/dig v1.8.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0 h1:sFPn2GLc3poCkfrpIXGhBD2X0CMIo4Q/zSULXrj/+uc= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E= +go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200117160349-530e935923ad h1:Jh8cai0fqIK+f6nG0UgPW5wFk8wmiMhM3AyciDBdtQg= +golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181228144115-9a3f9b0469bb/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY= +golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f h1:gWF768j/LaZugp8dyS4UwsslYCYz9XgFxvlgsn0n9H8= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180318012157-96caea41033d/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191030062658-86caa796c7ab h1:tpc/nJ4vD66vAk/2KN0sw/DvQIz2sKmCpWvyKtPmfMQ= +golang.org/x/tools v0.0.0-20191030062658-86caa796c7ab/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200123022218-593de606220b h1:ztSlcncMErSAUzXwnVO1iTPxHwtvOHBB26SGiyYXIEE= +golang.org/x/tools v0.0.0-20200123022218-593de606220b/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a h1:Ob5/580gVHBJZgXnff1cZDbG+xLtMVE5mDRTe+nIsX4= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +gopkg.in/abiosoft/ishell.v2 v2.0.0 h1:/J5yh3nWYSSGFjALcitTI9CLE0Tu27vBYHX0srotqOc= +gopkg.in/abiosoft/ishell.v2 v2.0.0/go.mod h1:sFp+cGtH6o4s1FtpVPTMcHq2yue+c4DGOVohJCPUzwY= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/internal/error.go b/internal/error.go new file mode 100644 index 000000000..7df160339 --- /dev/null +++ b/internal/error.go @@ -0,0 +1,7 @@ +package internal + +// Error is a custom error. +type Error string + +// Error is an implementation of error interface. +func (e Error) Error() string { return string(e) } diff --git a/lib/.gitkeep b/lib/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/lib/acl/action.go b/lib/acl/action.go new file mode 100644 index 000000000..a173d92e9 --- /dev/null +++ b/lib/acl/action.go @@ -0,0 +1,94 @@ +package acl + +import ( + "bytes" + + "github.com/nspcc-dev/neofs-api-go/acl" +) + +// RequestInfo is an interface of request information needed for extended ACL check. +type RequestInfo interface { + TypedHeaderSource + + // Must return the binary representation of request initiator's key. + Key() []byte + + // Must return true if request corresponds to operation type. + TypeOf(acl.OperationType) bool + + // Must return true if request has passed target. + TargetOf(acl.Target) bool +} + +// ExtendedACLChecker is an interface of extended ACL checking tool. +type ExtendedACLChecker interface { + // Must return an action according to the results of applying the ACL table rules to request. + // + // Must return ActionUndefined if it is unable to explicitly calculate the action. + Action(acl.ExtendedACLTable, RequestInfo) acl.ExtendedACLAction +} + +type extendedACLChecker struct{} + +// NewExtendedACLChecker creates a new extended ACL checking tool and returns ExtendedACLChecker interface. +func NewExtendedACLChecker() ExtendedACLChecker { + return new(extendedACLChecker) +} + +// Action returns an action for passed request based on information about it and ACL table. +// +// Returns action of the first suitable table record, or ActionUndefined in the absence thereof. +// +// If passed ExtendedACLTable is nil, ActionUndefined returns. +// If passed RequestInfo is nil, ActionUndefined returns. +func (s extendedACLChecker) Action(table acl.ExtendedACLTable, req RequestInfo) acl.ExtendedACLAction { + if table == nil { + return acl.ActionUndefined + } else if req == nil { + return acl.ActionUndefined + } + + for _, record := range table.Records() { + // check type of operation + if !req.TypeOf(record.OperationType()) { + continue + } + + // check target + if !targetMatches(req, record.TargetList()) { + continue + } + + // check headers + switch MatchFilters(req, record.HeaderFilters()) { + case mResUndefined: + // headers of some type could not be composed => allow + return acl.ActionAllow + case mResMatch: + return record.Action() + } + } + + return acl.ActionAllow +} + +// returns true if one of ExtendedACLTarget has suitable target OR suitable public key. +func targetMatches(req RequestInfo, list []acl.ExtendedACLTarget) bool { + rKey := req.Key() + + for _, target := range list { + // check public key match + for _, key := range target.KeyList() { + if bytes.Equal(key, rKey) { + return true + } + } + + // check target group match + if req.TargetOf(target.Target()) { + return true + } + } + + return false +} diff --git a/lib/acl/action_test.go b/lib/acl/action_test.go new file mode 100644 index 000000000..49e30eea8 --- /dev/null +++ b/lib/acl/action_test.go @@ -0,0 +1,163 @@ +package acl + +import ( + "testing" + + "github.com/nspcc-dev/neofs-api-go/acl" + "github.com/stretchr/testify/require" +) + +type testExtendedACLTable struct { + records []acl.ExtendedACLRecord +} + +type testRequestInfo struct { + headers []acl.TypedHeader + key []byte + opType acl.OperationType + target acl.Target +} + +type testEACLRecord struct { + opType acl.OperationType + filters []acl.HeaderFilter + targets []acl.ExtendedACLTarget + action acl.ExtendedACLAction +} + +type testEACLTarget struct { + target acl.Target + keys [][]byte +} + +func (s testEACLTarget) Target() acl.Target { + return s.target +} + +func (s testEACLTarget) KeyList() [][]byte { + return s.keys +} + +func (s testEACLRecord) OperationType() acl.OperationType { + return s.opType +} + +func (s testEACLRecord) HeaderFilters() []acl.HeaderFilter { + return s.filters +} + +func (s testEACLRecord) TargetList() []acl.ExtendedACLTarget { + return s.targets +} + +func (s testEACLRecord) Action() acl.ExtendedACLAction { + return s.action +} + +func (s testRequestInfo) HeadersOfType(typ acl.HeaderType) ([]acl.Header, bool) { + res := make([]acl.Header, 0, len(s.headers)) + + for i := range s.headers { + if s.headers[i].HeaderType() == typ { + res = append(res, s.headers[i]) + } + } + + return res, true +} + +func (s testRequestInfo) Key() []byte { + return s.key +} + +func (s testRequestInfo) TypeOf(t acl.OperationType) bool { + return s.opType == t +} + +func (s testRequestInfo) TargetOf(t acl.Target) bool { + return s.target == t +} + +func (s testExtendedACLTable) Records() []acl.ExtendedACLRecord { + return s.records +} + +func TestExtendedACLChecker_Action(t *testing.T) { + s := NewExtendedACLChecker() + + // nil ExtendedACLTable + require.Equal(t, acl.ActionUndefined, s.Action(nil, nil)) + + // create test ExtendedACLTable + table := new(testExtendedACLTable) + + // nil RequestInfo + require.Equal(t, acl.ActionUndefined, s.Action(table, nil)) + + // create test RequestInfo + req := new(testRequestInfo) + + // create test ExtendedACLRecord + record := new(testEACLRecord) + table.records = append(table.records, record) + + // set different OperationType + record.opType = acl.OperationType(3) + req.opType = record.opType + 1 + + require.Equal(t, acl.ActionAllow, s.Action(table, req)) + + // set equal OperationType + req.opType = record.opType + + // create test ExtendedACLTarget through group + target := new(testEACLTarget) + record.targets = append(record.targets, target) + + // set not matching ExtendedACLTarget + target.target = acl.Target(5) + req.target = target.target + 1 + + require.Equal(t, acl.ActionAllow, s.Action(table, req)) + + // set matching ExtendedACLTarget + req.target = target.target + + // create test HeaderFilter + fHeader := new(testTypedHeader) + hFilter := &testHeaderFilter{ + TypedHeader: fHeader, + } + record.filters = append(record.filters, hFilter) + + // create test TypedHeader + header := new(testTypedHeader) + req.headers = append(req.headers, header) + + // set not matching values + header.t = hFilter.HeaderType() + 1 + + require.Equal(t, acl.ActionAllow, s.Action(table, req)) + + // set matching values + header.k = "key" + header.v = "value" + + fHeader.t = header.HeaderType() + fHeader.k = header.Name() + fHeader.v = header.Value() + + hFilter.t = acl.StringEqual + + // set ExtendedACLAction + record.action = acl.ExtendedACLAction(7) + + require.Equal(t, record.action, s.Action(table, req)) + + // set matching ExtendedACLTarget through key + target.target = req.target + 1 + req.key = []byte{1, 2, 3} + target.keys = append(target.keys, req.key) + + require.Equal(t, record.action, s.Action(table, req)) +} diff --git a/lib/acl/basic.go b/lib/acl/basic.go new file mode 100644 index 000000000..eae2d7fa9 --- /dev/null +++ b/lib/acl/basic.go @@ -0,0 +1,179 @@ +package acl + +import ( + "github.com/nspcc-dev/neofs-api-go/acl" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-node/internal" +) + +type ( + // BasicChecker is an interface of the basic ACL control tool. + BasicChecker interface { + // Action returns true if request is allowed for this target. + Action(uint32, object.RequestType, acl.Target) (bool, error) + + // Bearer returns true if bearer token is allowed for this request. + Bearer(uint32, object.RequestType) (bool, error) + + // Extended returns true if extended ACL is allowed for this. + Extended(uint32) bool + + // Sticky returns true if sticky bit is set. + Sticky(uint32) bool + } + + // BasicACLChecker performs basic ACL check. + BasicACLChecker struct{} + + // MaskedBasicACLChecker performs all basic ACL checks, but applying + // mask on ACL first. It is useful, when some bits must be always + // set or unset. + MaskedBasicACLChecker struct { + BasicACLChecker + + andMask uint32 + orMask uint32 + } + + nibble struct { + value uint32 + } +) + +const ( + errUnknownRequest = internal.Error("unknown request type") + errUnknownTarget = internal.Error("unknown target type") +) + +const ( + aclFinalBit = 0x10000000 // 29th bit + aclStickyBit = 0x20000000 // 30th bit + + nibbleBBit = 0x1 + nibbleOBit = 0x2 + nibbleSBit = 0x4 + nibbleUBit = 0x8 + + // DefaultAndFilter is a default AND mask of basic ACL value of container. + DefaultAndFilter = 0xFFFFFFFF +) + +var ( + nibbleOffset = map[object.RequestType]uint32{ + object.RequestGet: 0, + object.RequestHead: 1 * 4, + object.RequestPut: 2 * 4, + object.RequestDelete: 3 * 4, + object.RequestSearch: 4 * 4, + object.RequestRange: 5 * 4, + object.RequestRangeHash: 6 * 4, + } +) + +// Action returns true if request is allowed for target. +func (c *BasicACLChecker) Action(rule uint32, req object.RequestType, t acl.Target) (bool, error) { + n, err := fetchNibble(rule, req) + if err != nil { + return false, err + } + + switch t { + case acl.Target_User: + return n.U(), nil + case acl.Target_System: + return n.S(), nil + case acl.Target_Others: + return n.O(), nil + default: + return false, errUnknownTarget + } +} + +// Bearer returns true if bearer token is allowed to use for this request +// as source of extended ACL. +func (c *BasicACLChecker) Bearer(rule uint32, req object.RequestType) (bool, error) { + n, err := fetchNibble(rule, req) + if err != nil { + return false, err + } + + return n.B(), nil +} + +// Extended returns true if extended ACL stored in the container are allowed +// to use. +func (c *BasicACLChecker) Extended(rule uint32) bool { + return rule&aclFinalBit != aclFinalBit +} + +// Sticky returns true if container is not allowed to store objects with +// owners different from request owner. +func (c *BasicACLChecker) Sticky(rule uint32) bool { + return rule&aclStickyBit == aclStickyBit +} + +func fetchNibble(rule uint32, req object.RequestType) (*nibble, error) { + offset, ok := nibbleOffset[req] + if !ok { + return nil, errUnknownRequest + } + + return &nibble{value: (rule >> offset) & 0xf}, nil +} + +// B returns true if `Bearer` bit set in the nibble. +func (n *nibble) B() bool { return n.value&nibbleBBit == nibbleBBit } + +// O returns true if `Others` bit set in the nibble. +func (n *nibble) O() bool { return n.value&nibbleOBit == nibbleOBit } + +// S returns true if `System` bit set in the nibble. +func (n *nibble) S() bool { return n.value&nibbleSBit == nibbleSBit } + +// U returns true if `User` bit set in the nibble. +func (n *nibble) U() bool { return n.value&nibbleUBit == nibbleUBit } + +// NewMaskedBasicACLChecker returns BasicChecker that applies predefined +// bit mask on basic ACL value. +func NewMaskedBasicACLChecker(or, and uint32) BasicChecker { + return MaskedBasicACLChecker{ + BasicACLChecker: BasicACLChecker{}, + andMask: and, + orMask: or, + } +} + +// Action returns true if request is allowed for target. +func (c MaskedBasicACLChecker) Action(rule uint32, req object.RequestType, t acl.Target) (bool, error) { + rule |= c.orMask + rule &= c.andMask + + return c.BasicACLChecker.Action(rule, req, t) +} + +// Bearer returns true if bearer token is allowed to use for this request +// as source of extended ACL. +func (c MaskedBasicACLChecker) Bearer(rule uint32, req object.RequestType) (bool, error) { + rule |= c.orMask + rule &= c.andMask + + return c.BasicACLChecker.Bearer(rule, req) +} + +// Extended returns true if extended ACL stored in the container are allowed +// to use. +func (c MaskedBasicACLChecker) Extended(rule uint32) bool { + rule |= c.orMask + rule &= c.andMask + + return c.BasicACLChecker.Extended(rule) +} + +// Sticky returns true if container is not allowed to store objects with +// owners different from request owner. +func (c MaskedBasicACLChecker) Sticky(rule uint32) bool { + rule |= c.orMask + rule &= c.andMask + + return c.BasicACLChecker.Sticky(rule) +} diff --git a/lib/acl/basic_test.go b/lib/acl/basic_test.go new file mode 100644 index 000000000..b379751f6 --- /dev/null +++ b/lib/acl/basic_test.go @@ -0,0 +1,116 @@ +package acl + +import ( + "math/bits" + "testing" + + "github.com/nspcc-dev/neofs-api-go/acl" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/stretchr/testify/require" +) + +func TestBasicACLChecker(t *testing.T) { + reqs := []object.RequestType{ + object.RequestGet, + object.RequestHead, + object.RequestPut, + object.RequestDelete, + object.RequestSearch, + object.RequestRange, + object.RequestRangeHash, + } + + targets := []acl.Target{ + acl.Target_Others, + acl.Target_System, + acl.Target_User, + } + + checker := new(BasicACLChecker) + + t.Run("verb permissions", func(t *testing.T) { + mask := uint32(1) + + for i := range reqs { + res, err := checker.Bearer(mask, reqs[i]) + require.NoError(t, err) + require.True(t, res) + + mask = bits.Reverse32(mask) + res, err = checker.Bearer(mask, reqs[i]) + require.NoError(t, err) + require.False(t, res) + + mask = bits.Reverse32(mask) + + for j := range targets { + mask <<= 1 + res, err = checker.Action(mask, reqs[i], targets[j]) + require.NoError(t, err) + require.True(t, res) + + mask = bits.Reverse32(mask) + res, err = checker.Action(mask, reqs[i], targets[j]) + require.NoError(t, err) + require.False(t, res) + + mask = bits.Reverse32(mask) + } + mask <<= 1 + } + }) + + t.Run("unknown verb", func(t *testing.T) { + mask := uint32(1) + _, err := checker.Bearer(mask, -1) + require.Error(t, err) + + mask = 2 + _, err = checker.Action(mask, -1, acl.Target_Others) + require.Error(t, err) + }) + + t.Run("unknown action", func(t *testing.T) { + mask := uint32(2) + _, err := checker.Action(mask, object.RequestGet, -1) + require.Error(t, err) + }) + + t.Run("extended acl permission", func(t *testing.T) { + // set F-bit + mask := uint32(0) | aclFinalBit + require.False(t, checker.Extended(mask)) + + // unset F-bit + mask = bits.Reverse32(mask) + require.True(t, checker.Extended(mask)) + }) + + t.Run("sticky bit permission", func(t *testing.T) { + mask := uint32(0x20000000) + require.True(t, checker.Sticky(mask)) + + mask = bits.Reverse32(mask) + require.False(t, checker.Sticky(mask)) + }) +} + +// todo: add tests like in basic acl checker +func TestNeoFSMaskedBasicACLChecker(t *testing.T) { + const orFilter = 0x04040444 // this OR filter will be used in neofs-node + checker := NewMaskedBasicACLChecker(orFilter, DefaultAndFilter) + + reqs := []object.RequestType{ + object.RequestGet, + object.RequestHead, + object.RequestPut, + object.RequestSearch, + object.RequestRangeHash, + } + + for i := range reqs { + res, err := checker.Action(0, reqs[i], acl.Target_System) + require.NoError(t, err) + require.True(t, res) + } +} diff --git a/lib/acl/binary.go b/lib/acl/binary.go new file mode 100644 index 000000000..a1cf6e50b --- /dev/null +++ b/lib/acl/binary.go @@ -0,0 +1,129 @@ +package acl + +import ( + "context" + "encoding/binary" + "io" + + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-node/internal" +) + +// BinaryEACLKey is a binary EACL storage key. +type BinaryEACLKey struct { + cid refs.CID +} + +// BinaryEACLValue is a binary EACL storage value. +type BinaryEACLValue struct { + eacl []byte + + sig []byte +} + +// BinaryExtendedACLSource is an interface of storage of binary extended ACL tables with read access. +type BinaryExtendedACLSource interface { + // Must return binary extended ACL table by key. + GetBinaryEACL(context.Context, BinaryEACLKey) (BinaryEACLValue, error) +} + +// BinaryExtendedACLStore is an interface of storage of binary extended ACL tables. +type BinaryExtendedACLStore interface { + BinaryExtendedACLSource + + // Must store binary extended ACL table for key. + PutBinaryEACL(context.Context, BinaryEACLKey, BinaryEACLValue) error +} + +// ErrNilBinaryExtendedACLStore is returned by function that expect a non-nil +// BinaryExtendedACLStore, but received nil. +const ErrNilBinaryExtendedACLStore = internal.Error("binary extended ACL store is nil") + +const sliceLenSize = 4 + +var eaclEndianness = binary.BigEndian + +// CID is a container ID getter. +func (s BinaryEACLKey) CID() refs.CID { + return s.cid +} + +// SetCID is a container ID setter. +func (s *BinaryEACLKey) SetCID(v refs.CID) { + s.cid = v +} + +// EACL is a binary extended ACL table getter. +func (s BinaryEACLValue) EACL() []byte { + return s.eacl +} + +// SetEACL is a binary extended ACL table setter. +func (s *BinaryEACLValue) SetEACL(v []byte) { + s.eacl = v +} + +// Signature is an EACL signature getter. +func (s BinaryEACLValue) Signature() []byte { + return s.sig +} + +// SetSignature is an EACL signature setter. +func (s *BinaryEACLValue) SetSignature(v []byte) { + s.sig = v +} + +// MarshalBinary returns a binary representation of BinaryEACLValue. +func (s BinaryEACLValue) MarshalBinary() ([]byte, error) { + data := make([]byte, sliceLenSize+len(s.eacl)+sliceLenSize+len(s.sig)) + + off := 0 + + eaclEndianness.PutUint32(data[off:], uint32(len(s.eacl))) + off += sliceLenSize + + off += copy(data[off:], s.eacl) + + eaclEndianness.PutUint32(data[off:], uint32(len(s.sig))) + off += sliceLenSize + + copy(data[off:], s.sig) + + return data, nil +} + +// UnmarshalBinary unmarshals BinaryEACLValue from bytes. +func (s *BinaryEACLValue) UnmarshalBinary(data []byte) (err error) { + err = io.ErrUnexpectedEOF + off := 0 + + if len(data[off:]) < sliceLenSize { + return + } + + aclLn := eaclEndianness.Uint32(data[off:]) + off += 4 + + if uint32(len(data[off:])) < aclLn { + return + } + + s.eacl = make([]byte, aclLn) + off += copy(s.eacl, data[off:]) + + if len(data[off:]) < sliceLenSize { + return + } + + sigLn := eaclEndianness.Uint32(data[off:]) + off += 4 + + if uint32(len(data[off:])) < sigLn { + return + } + + s.sig = make([]byte, sigLn) + copy(s.sig, data[off:]) + + return nil +} diff --git a/lib/acl/binary_test.go b/lib/acl/binary_test.go new file mode 100644 index 000000000..eefb59ab5 --- /dev/null +++ b/lib/acl/binary_test.go @@ -0,0 +1,27 @@ +package acl + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestBinaryEACLValue(t *testing.T) { + s := BinaryEACLValue{} + + eacl := []byte{1, 2, 3} + s.SetEACL(eacl) + require.Equal(t, eacl, s.EACL()) + + sig := []byte{4, 5, 6} + s.SetSignature(sig) + require.Equal(t, sig, s.Signature()) + + data, err := s.MarshalBinary() + require.NoError(t, err) + + s2 := BinaryEACLValue{} + require.NoError(t, s2.UnmarshalBinary(data)) + + require.Equal(t, s, s2) +} diff --git a/lib/acl/extended.go b/lib/acl/extended.go new file mode 100644 index 000000000..20695bc6e --- /dev/null +++ b/lib/acl/extended.go @@ -0,0 +1,29 @@ +package acl + +import ( + "context" + + "github.com/nspcc-dev/neofs-api-go/acl" + "github.com/nspcc-dev/neofs-api-go/refs" +) + +// TypedHeaderSource is a various types of header set interface. +type TypedHeaderSource interface { + // Must return list of Header of particular type. + // Must return false if there is no ability to compose header list. + HeadersOfType(acl.HeaderType) ([]acl.Header, bool) +} + +// ExtendedACLSource is an interface of storage of extended ACL tables with read access. +type ExtendedACLSource interface { + // Must return extended ACL table by container ID key. + GetExtendedACLTable(context.Context, refs.CID) (acl.ExtendedACLTable, error) +} + +// ExtendedACLStore is an interface of storage of extended ACL tables. +type ExtendedACLStore interface { + ExtendedACLSource + + // Must store extended ACL table for container ID key. + PutExtendedACLTable(context.Context, refs.CID, acl.ExtendedACLTable) error +} diff --git a/lib/acl/header.go b/lib/acl/header.go new file mode 100644 index 000000000..8c779b3b6 --- /dev/null +++ b/lib/acl/header.go @@ -0,0 +1,234 @@ +package acl + +import ( + "strconv" + + "github.com/nspcc-dev/neofs-api-go/acl" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/service" +) + +type objectHeaderSource struct { + obj *object.Object +} + +type typedHeader struct { + n string + v string + t acl.HeaderType +} + +type extendedHeadersWrapper struct { + hdrSrc service.ExtendedHeadersSource +} + +type typedExtendedHeader struct { + hdr service.ExtendedHeader +} + +func newTypedObjSysHdr(name, value string) acl.TypedHeader { + return &typedHeader{ + n: name, + v: value, + t: acl.HdrTypeObjSys, + } +} + +// Name is a name field getter. +func (s typedHeader) Name() string { + return s.n +} + +// Value is a value field getter. +func (s typedHeader) Value() string { + return s.v +} + +// HeaderType is a type field getter. +func (s typedHeader) HeaderType() acl.HeaderType { + return s.t +} + +// TypedHeaderSourceFromObject wraps passed object and returns TypedHeaderSource interface. +func TypedHeaderSourceFromObject(obj *object.Object) TypedHeaderSource { + return &objectHeaderSource{ + obj: obj, + } +} + +// HeaderOfType gathers object headers of passed type and returns Header list. +// +// If value of some header can not be calculated (e.g. nil extended header), it does not appear in list. +// +// Always returns true. +func (s objectHeaderSource) HeadersOfType(typ acl.HeaderType) ([]acl.Header, bool) { + if s.obj == nil { + return nil, true + } + + var res []acl.Header + + switch typ { + case acl.HdrTypeObjUsr: + objHeaders := s.obj.GetHeaders() + + res = make([]acl.Header, 0, len(objHeaders)) // 7 system header fields + + for i := range objHeaders { + if h := newTypedObjectExtendedHeader(objHeaders[i]); h != nil { + res = append(res, h) + } + } + case acl.HdrTypeObjSys: + res = make([]acl.Header, 0, 7) + + sysHdr := s.obj.GetSystemHeader() + + created := sysHdr.GetCreatedAt() + + res = append(res, + // ID + newTypedObjSysHdr( + acl.HdrObjSysNameID, + sysHdr.ID.String(), + ), + + // CID + newTypedObjSysHdr( + acl.HdrObjSysNameCID, + sysHdr.CID.String(), + ), + + // OwnerID + newTypedObjSysHdr( + acl.HdrObjSysNameOwnerID, + sysHdr.OwnerID.String(), + ), + + // Version + newTypedObjSysHdr( + acl.HdrObjSysNameVersion, + strconv.FormatUint(sysHdr.GetVersion(), 10), + ), + + // PayloadLength + newTypedObjSysHdr( + acl.HdrObjSysNamePayloadLength, + strconv.FormatUint(sysHdr.GetPayloadLength(), 10), + ), + + // CreatedAt.UnitTime + newTypedObjSysHdr( + acl.HdrObjSysNameCreatedUnix, + strconv.FormatUint(uint64(created.GetUnixTime()), 10), + ), + + // CreatedAt.Epoch + newTypedObjSysHdr( + acl.HdrObjSysNameCreatedEpoch, + strconv.FormatUint(created.GetEpoch(), 10), + ), + ) + } + + return res, true +} + +func newTypedObjectExtendedHeader(h object.Header) acl.TypedHeader { + val := h.GetValue() + if val == nil { + return nil + } + + res := new(typedHeader) + res.t = acl.HdrTypeObjSys + + switch hdr := val.(type) { + case *object.Header_UserHeader: + if hdr.UserHeader == nil { + return nil + } + + res.t = acl.HdrTypeObjUsr + res.n = hdr.UserHeader.GetKey() + res.v = hdr.UserHeader.GetValue() + case *object.Header_Link: + if hdr.Link == nil { + return nil + } + + switch hdr.Link.GetType() { + case object.Link_Previous: + res.n = acl.HdrObjSysLinkPrev + case object.Link_Next: + res.n = acl.HdrObjSysLinkNext + case object.Link_Child: + res.n = acl.HdrObjSysLinkChild + case object.Link_Parent: + res.n = acl.HdrObjSysLinkPar + case object.Link_StorageGroup: + res.n = acl.HdrObjSysLinkSG + default: + return nil + } + + res.v = hdr.Link.ID.String() + default: + return nil + } + + return res +} + +// TypedHeaderSourceFromExtendedHeaders wraps passed ExtendedHeadersSource and returns TypedHeaderSource interface. +func TypedHeaderSourceFromExtendedHeaders(hdrSrc service.ExtendedHeadersSource) TypedHeaderSource { + return &extendedHeadersWrapper{ + hdrSrc: hdrSrc, + } +} + +// Name returns the result of Key method. +func (s typedExtendedHeader) Name() string { + return s.hdr.Key() +} + +// Value returns the result of Value method. +func (s typedExtendedHeader) Value() string { + return s.hdr.Value() +} + +// HeaderType always returns HdrTypeRequest. +func (s typedExtendedHeader) HeaderType() acl.HeaderType { + return acl.HdrTypeRequest +} + +// TypedHeaders gathers extended request headers and returns TypedHeader list. +// +// Nil headers are ignored. +// +// Always returns true. +func (s extendedHeadersWrapper) HeadersOfType(typ acl.HeaderType) ([]acl.Header, bool) { + if s.hdrSrc == nil { + return nil, true + } + + var res []acl.Header + + if typ == acl.HdrTypeRequest { + hs := s.hdrSrc.ExtendedHeaders() + + res = make([]acl.Header, 0, len(hs)) + + for i := range hs { + if hs[i] == nil { + continue + } + + res = append(res, &typedExtendedHeader{ + hdr: hs[i], + }) + } + } + + return res, true +} diff --git a/lib/acl/headers_test.go b/lib/acl/headers_test.go new file mode 100644 index 000000000..236e084d2 --- /dev/null +++ b/lib/acl/headers_test.go @@ -0,0 +1,60 @@ +package acl + +import ( + "testing" + + "github.com/nspcc-dev/neofs-api-go/acl" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/stretchr/testify/require" +) + +func TestNewTypedObjectExtendedHeader(t *testing.T) { + var res acl.TypedHeader + + hdr := object.Header{} + + // nil value + require.Nil(t, newTypedObjectExtendedHeader(hdr)) + + // UserHeader + { + key := "key" + val := "val" + hdr.Value = &object.Header_UserHeader{ + UserHeader: &object.UserHeader{ + Key: key, + Value: val, + }, + } + + res = newTypedObjectExtendedHeader(hdr) + require.Equal(t, acl.HdrTypeObjUsr, res.HeaderType()) + require.Equal(t, key, res.Name()) + require.Equal(t, val, res.Value()) + } + + { // Link + link := new(object.Link) + link.ID = object.ID{1, 2, 3} + + hdr.Value = &object.Header_Link{ + Link: link, + } + + check := func(lt object.Link_Type, name string) { + link.Type = lt + + res = newTypedObjectExtendedHeader(hdr) + + require.Equal(t, acl.HdrTypeObjSys, res.HeaderType()) + require.Equal(t, name, res.Name()) + require.Equal(t, link.ID.String(), res.Value()) + } + + check(object.Link_Previous, acl.HdrObjSysLinkPrev) + check(object.Link_Next, acl.HdrObjSysLinkNext) + check(object.Link_Parent, acl.HdrObjSysLinkPar) + check(object.Link_Child, acl.HdrObjSysLinkChild) + check(object.Link_StorageGroup, acl.HdrObjSysLinkSG) + } +} diff --git a/lib/acl/match.go b/lib/acl/match.go new file mode 100644 index 000000000..7d4289cb4 --- /dev/null +++ b/lib/acl/match.go @@ -0,0 +1,94 @@ +package acl + +import ( + "github.com/nspcc-dev/neofs-api-go/acl" +) + +// Maps MatchType to corresponding function. +// 1st argument of function - header value, 2nd - header filter. +var mMatchFns = map[acl.MatchType]func(acl.Header, acl.Header) bool{ + acl.StringEqual: stringEqual, + + acl.StringNotEqual: stringNotEqual, +} + +const ( + mResUndefined = iota + mResMatch + mResMismatch +) + +// MatchFilters checks if passed source carry at least one header that satisfies passed filters. +// +// Nil header does not satisfy any filter. Any header does not satisfy nil filter. +// +// Returns mResMismatch if passed TypedHeaderSource is nil. +// Returns mResMatch if passed filters are empty. +// +// If headers for some of the HeaderType could not be composed, mResUndefined returns. +func MatchFilters(src TypedHeaderSource, filters []acl.HeaderFilter) int { + if src == nil { + return mResMismatch + } else if len(filters) == 0 { + return mResMatch + } + + matched := 0 + + for _, filter := range filters { + // prevent NPE + if filter == nil { + continue + } + + headers, ok := src.HeadersOfType(filter.HeaderType()) + if !ok { + return mResUndefined + } + + // get headers of filtering type + for _, header := range headers { + // prevent NPE + if header == nil { + continue + } + + // check header name + if header.Name() != filter.Name() { + continue + } + + // get match function + matchFn, ok := mMatchFns[filter.MatchType()] + if !ok { + continue + } + + // check match + if !matchFn(header, filter) { + continue + } + + // increment match counter + matched++ + + break + } + } + + res := mResMismatch + + if matched >= len(filters) { + res = mResMatch + } + + return res +} + +func stringEqual(header, filter acl.Header) bool { + return header.Value() == filter.Value() +} + +func stringNotEqual(header, filter acl.Header) bool { + return header.Value() != filter.Value() +} diff --git a/lib/acl/match_test.go b/lib/acl/match_test.go new file mode 100644 index 000000000..123f852bc --- /dev/null +++ b/lib/acl/match_test.go @@ -0,0 +1,192 @@ +package acl + +import ( + "testing" + + "github.com/nspcc-dev/neofs-api-go/acl" + "github.com/stretchr/testify/require" +) + +type testTypedHeader struct { + t acl.HeaderType + k string + v string +} + +type testHeaderSrc struct { + hs []acl.TypedHeader +} + +type testHeaderFilter struct { + acl.TypedHeader + t acl.MatchType +} + +func (s testHeaderFilter) MatchType() acl.MatchType { + return s.t +} + +func (s testHeaderSrc) HeadersOfType(typ acl.HeaderType) ([]acl.Header, bool) { + res := make([]acl.Header, 0, len(s.hs)) + + for i := range s.hs { + if s.hs[i].HeaderType() == typ { + res = append(res, s.hs[i]) + } + } + + return res, true +} + +func (s testTypedHeader) Name() string { + return s.k +} + +func (s testTypedHeader) Value() string { + return s.v +} + +func (s testTypedHeader) HeaderType() acl.HeaderType { + return s.t +} + +func TestMatchFilters(t *testing.T) { + // nil TypedHeaderSource + require.Equal(t, mResMismatch, MatchFilters(nil, nil)) + + // empty HeaderFilter list + require.Equal(t, mResMatch, MatchFilters(new(testHeaderSrc), nil)) + + k := "key" + v := "value" + ht := acl.HeaderType(1) + + items := []struct { + // list of Key-Value-HeaderType for headers construction + hs []interface{} + // list of Key-Value-HeaderType-MatchType for filters construction + fs []interface{} + exp int + }{ + { // different HeaderType + hs: []interface{}{ + k, v, ht, + }, + fs: []interface{}{ + k, v, ht + 1, acl.StringEqual, + }, + exp: mResMismatch, + }, + { // different keys + hs: []interface{}{ + k, v, ht, + }, + fs: []interface{}{ + k + "1", v, ht, acl.StringEqual, + }, + exp: mResMismatch, + }, + { // equal values, StringEqual + hs: []interface{}{ + k, v, ht, + }, + fs: []interface{}{ + k, v, ht, acl.StringEqual, + }, + exp: mResMatch, + }, + { // equal values, StringNotEqual + hs: []interface{}{ + k, v, ht, + }, + fs: []interface{}{ + k, v, ht, acl.StringNotEqual, + }, + exp: mResMismatch, + }, + { // not equal values, StringEqual + hs: []interface{}{ + k, v, ht, + }, + fs: []interface{}{ + k, v + "1", ht, acl.StringEqual, + }, + exp: mResMismatch, + }, + { // not equal values, StringNotEqual + hs: []interface{}{ + k, v, ht, + }, + fs: []interface{}{ + k, v + "1", ht, acl.StringNotEqual, + }, + exp: mResMatch, + }, + { // one header, two filters + hs: []interface{}{ + k, v, ht, + }, + fs: []interface{}{ + k, v + "1", ht, acl.StringNotEqual, + k, v, ht, acl.StringEqual, + }, + exp: mResMatch, + }, + { // two headers, one filter + hs: []interface{}{ + k, v + "1", ht, + k, v, ht, + }, + fs: []interface{}{ + k, v, ht, acl.StringEqual, + }, + exp: mResMatch, + }, + { + hs: []interface{}{ + k, v + "1", acl.HdrTypeRequest, + k, v, acl.HdrTypeObjUsr, + }, + fs: []interface{}{ + k, v, acl.HdrTypeRequest, acl.StringNotEqual, + k, v, acl.HdrTypeObjUsr, acl.StringEqual, + }, + exp: mResMatch, + }, + } + + for _, item := range items { + headers := make([]acl.TypedHeader, 0) + + for i := 0; i < len(item.hs); i += 3 { + headers = append(headers, &testTypedHeader{ + t: item.hs[i+2].(acl.HeaderType), + k: item.hs[i].(string), + v: item.hs[i+1].(string), + }) + } + + filters := make([]acl.HeaderFilter, 0) + + for i := 0; i < len(item.fs); i += 4 { + filters = append(filters, &testHeaderFilter{ + TypedHeader: &testTypedHeader{ + t: item.fs[i+2].(acl.HeaderType), + k: item.fs[i].(string), + v: item.fs[i+1].(string), + }, + t: item.fs[i+3].(acl.MatchType), + }) + } + + require.Equal(t, + item.exp, + MatchFilters( + &testHeaderSrc{ + hs: headers, + }, + filters, + ), + ) + } +} diff --git a/lib/blockchain/event/event.go b/lib/blockchain/event/event.go new file mode 100644 index 000000000..d614844ce --- /dev/null +++ b/lib/blockchain/event/event.go @@ -0,0 +1,31 @@ +package event + +// Type is a notification event enumeration type. +type Type string + +// Event is an interface that is +// provided by Neo:Morph event structures. +type Event interface { + MorphEvent() +} + +// Equal compares two Type values and +// returns true if they are equal. +func (t Type) Equal(t2 Type) bool { + return string(t) == string(t2) +} + +// String returns casted to string Type. +func (t Type) String() string { + return string(t) +} + +// TypeFromBytes converts bytes slice to Type. +func TypeFromBytes(data []byte) Type { + return Type(data) +} + +// TypeFromString converts string to Type. +func TypeFromString(str string) Type { + return Type(str) +} diff --git a/lib/blockchain/event/handler.go b/lib/blockchain/event/handler.go new file mode 100644 index 000000000..2d9c5b774 --- /dev/null +++ b/lib/blockchain/event/handler.go @@ -0,0 +1,22 @@ +package event + +// Handler is an Event processing function. +type Handler func(Event) + +// HandlerInfo is a structure that groups +// the parameters of the handler of particular +// contract event. +type HandlerInfo struct { + scriptHashWithType + + h Handler +} + +// SetHandler is an event handler setter. +func (s *HandlerInfo) SetHandler(v Handler) { + s.h = v +} + +func (s HandlerInfo) handler() Handler { + return s.h +} diff --git a/lib/blockchain/event/listener.go b/lib/blockchain/event/listener.go new file mode 100644 index 000000000..2dcfceb3c --- /dev/null +++ b/lib/blockchain/event/listener.go @@ -0,0 +1,309 @@ +package event + +import ( + "context" + "sync" + + "github.com/nspcc-dev/neo-go/pkg/rpc/response/result" + "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/blockchain/goclient" + "github.com/nspcc-dev/neofs-node/lib/blockchain/subscriber" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +// Listener is an interface of smart contract notification event listener. +type Listener interface { + // Must start the event listener. + // + // Must listen to events with the parser installed. + // + // Must return an error if event listening could not be started. + Listen(context.Context) + + // Must set the parser of particular contract event. + // + // Parser of each event must be set once. All parsers must be set before Listen call. + // + // Must ignore nil parsers and all calls after listener has been started. + SetParser(ParserInfo) + + // Must register the event handler for particular notification event of contract. + // + // The specified handler must be called after each capture and parsing of the event + // + // Must ignore nil handlers. + RegisterHandler(HandlerInfo) +} + +// ListenerParams is a group of parameters +// for Listener constructor. +type ListenerParams struct { + Logger *zap.Logger + + Subscriber subscriber.Subscriber +} + +type listener struct { + mtx *sync.RWMutex + + once *sync.Once + + started bool + + parsers map[scriptHashWithType]Parser + + handlers map[scriptHashWithType][]Handler + + log *zap.Logger + + subscriber subscriber.Subscriber +} + +const ( + newListenerFailMsg = "could not instantiate Listener" + + errNilLogger = internal.Error("nil logger") + + errNilSubscriber = internal.Error("nil event subscriber") +) + +// Listen starts the listening for events with registered handlers. +// +// Executes once, all subsequent calls do nothing. +// +// Returns an error if listener was already started. +func (s listener) Listen(ctx context.Context) { + s.once.Do(func() { + if err := s.listen(ctx); err != nil { + s.log.Error("could not start listen to events", + zap.String("error", err.Error()), + ) + } + }) +} + +func (s listener) listen(ctx context.Context) error { + // create the list of listening contract hashes + hashes := make([]util.Uint160, 0) + + // fill the list with the contracts with set event parsers. + s.mtx.RLock() + for hashType := range s.parsers { + scHash := hashType.scriptHash() + + // prevent repetitions + for _, hash := range hashes { + if hash.Equals(scHash) { + continue + } + } + + hashes = append(hashes, hashType.scriptHash()) + } + + // mark listener as started + s.started = true + + s.mtx.RUnlock() + + chEvent, err := s.subscriber.SubscribeForNotification(hashes...) + if err != nil { + return err + } + + s.listenLoop(ctx, chEvent) + + return nil +} + +func (s listener) listenLoop(ctx context.Context, chEvent <-chan *result.NotificationEvent) { +loop: + for { + select { + case <-ctx.Done(): + s.log.Warn("stop event listener by context", + zap.String("error", ctx.Err().Error()), + ) + break loop + case notifyEvent, ok := <-chEvent: + if !ok { + s.log.Warn("stop event listener by channel") + break loop + } else if notifyEvent == nil { + s.log.Warn("nil notification event was caught") + continue loop + } + + s.parseAndHandle(notifyEvent) + } + } +} + +func (s listener) parseAndHandle(notifyEvent *result.NotificationEvent) { + log := s.log.With( + zap.String("script hash LE", notifyEvent.Contract.StringLE()), + ) + + // stack item must be an array of items + arr, err := goclient.ArrayFromStackParameter(notifyEvent.Item) + if err != nil { + log.Warn("stack item is not an array type", + zap.String("error", err.Error()), + ) + + return + } else if len(arr) == 0 { + log.Warn("stack item array is empty") + return + } + + // first item must be a byte array + typBytes, err := goclient.BytesFromStackParameter(arr[0]) + if err != nil { + log.Warn("first array item is not a byte array", + zap.String("error", err.Error()), + ) + + return + } + + // calculate event type from bytes + typEvent := TypeFromBytes(typBytes) + + log = log.With( + zap.Stringer("event type", typEvent), + ) + + // get the event parser + keyEvent := scriptHashWithType{} + keyEvent.SetScriptHash(notifyEvent.Contract) + keyEvent.SetType(typEvent) + + s.mtx.RLock() + parser, ok := s.parsers[keyEvent] + s.mtx.RUnlock() + + if !ok { + log.Warn("event parser not set") + + return + } + + // parse the notification event + event, err := parser(arr[1:]) + if err != nil { + log.Warn("could not parse notification event", + zap.String("error", err.Error()), + ) + + return + } + + // handler the event + s.mtx.RLock() + handlers := s.handlers[keyEvent] + s.mtx.RUnlock() + + if len(handlers) == 0 { + log.Info("handlers for parsed notification event were not registered", + zap.Any("event", event), + ) + + return + } + + for _, handler := range handlers { + handler(event) + } +} + +// SetParser sets the parser of particular contract event. +// +// Ignores nil and already set parsers. +// Ignores the parser if listener is started. +func (s listener) SetParser(p ParserInfo) { + log := s.log.With( + zap.String("script hash LE", p.scriptHash().StringLE()), + zap.Stringer("event type", p.getType()), + ) + + parser := p.parser() + if parser == nil { + log.Info("ignore nil event parser") + return + } + + s.mtx.Lock() + defer s.mtx.Unlock() + + // check if the listener was started + if s.started { + log.Warn("listener has been already started, ignore parser") + return + } + + // add event parser + if _, ok := s.parsers[p.scriptHashWithType]; !ok { + s.parsers[p.scriptHashWithType] = p.parser() + } + + log.Info("registered new event parser") +} + +// RegisterHandler registers the handler for particular notification event of contract. +// +// Ignores nil handlers. +// Ignores handlers of event without parser. +func (s listener) RegisterHandler(p HandlerInfo) { + log := s.log.With( + zap.String("script hash LE", p.scriptHash().StringLE()), + zap.Stringer("event type", p.getType()), + ) + + handler := p.handler() + if handler == nil { + log.Warn("ignore nil event handler") + return + } + + // check if parser was set + s.mtx.RLock() + _, ok := s.parsers[p.scriptHashWithType] + s.mtx.RUnlock() + + if !ok { + log.Warn("ignore handler of event w/o parser") + return + } + + // add event handler + s.mtx.Lock() + s.handlers[p.scriptHashWithType] = append( + s.handlers[p.scriptHashWithType], + p.handler(), + ) + s.mtx.Unlock() + + log.Info("registered new event handler") +} + +// NewListener create the notification event listener instance and returns Listener interface. +func NewListener(p ListenerParams) (Listener, error) { + switch { + case p.Logger == nil: + return nil, errors.Wrap(errNilLogger, newListenerFailMsg) + case p.Subscriber == nil: + return nil, errors.Wrap(errNilSubscriber, newListenerFailMsg) + } + + return &listener{ + mtx: new(sync.RWMutex), + once: new(sync.Once), + parsers: make(map[scriptHashWithType]Parser), + handlers: make(map[scriptHashWithType][]Handler), + log: p.Logger, + subscriber: p.Subscriber, + }, nil +} diff --git a/lib/blockchain/event/netmap/epoch.go b/lib/blockchain/event/netmap/epoch.go new file mode 100644 index 000000000..2445b85a1 --- /dev/null +++ b/lib/blockchain/event/netmap/epoch.go @@ -0,0 +1,39 @@ +package netmap + +import ( + "github.com/nspcc-dev/neo-go/pkg/smartcontract" + "github.com/nspcc-dev/neofs-node/lib/blockchain/event" + "github.com/nspcc-dev/neofs-node/lib/blockchain/goclient" + "github.com/pkg/errors" +) + +// NewEpoch is a new epoch Neo:Morph event. +type NewEpoch struct { + num uint64 +} + +// MorphEvent implements Neo:Morph Event interface. +func (NewEpoch) MorphEvent() {} + +// EpochNumber returns new epoch number. +func (s NewEpoch) EpochNumber() uint64 { + return s.num +} + +// ParseNewEpoch is a parser of new epoch notification event. +// +// Result is type of NewEpoch. +func ParseNewEpoch(prms []smartcontract.Parameter) (event.Event, error) { + if ln := len(prms); ln != 1 { + return nil, event.WrongNumberOfParameters(1, ln) + } + + prmEpochNum, err := goclient.IntFromStackParameter(prms[0]) + if err != nil { + return nil, errors.Wrap(err, "could not get integer epoch number") + } + + return NewEpoch{ + num: uint64(prmEpochNum), + }, nil +} diff --git a/lib/blockchain/event/netmap/epoch_test.go b/lib/blockchain/event/netmap/epoch_test.go new file mode 100644 index 000000000..48342697b --- /dev/null +++ b/lib/blockchain/event/netmap/epoch_test.go @@ -0,0 +1,47 @@ +package netmap + +import ( + "testing" + + "github.com/nspcc-dev/neo-go/pkg/smartcontract" + "github.com/nspcc-dev/neofs-node/lib/blockchain/event" + "github.com/stretchr/testify/require" +) + +func TestParseNewEpoch(t *testing.T) { + t.Run("wrong number of parameters", func(t *testing.T) { + prms := []smartcontract.Parameter{ + {}, + {}, + } + + _, err := ParseNewEpoch(prms) + require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error()) + }) + + t.Run("wrong first parameter type", func(t *testing.T) { + _, err := ParseNewEpoch([]smartcontract.Parameter{ + { + Type: smartcontract.ByteArrayType, + }, + }) + + require.Error(t, err) + }) + + t.Run("correct behavior", func(t *testing.T) { + epochNum := uint64(100) + + ev, err := ParseNewEpoch([]smartcontract.Parameter{ + { + Type: smartcontract.IntegerType, + Value: int64(epochNum), + }, + }) + + require.NoError(t, err) + require.Equal(t, NewEpoch{ + num: epochNum, + }, ev) + }) +} diff --git a/lib/blockchain/event/parser.go b/lib/blockchain/event/parser.go new file mode 100644 index 000000000..f0fdbc093 --- /dev/null +++ b/lib/blockchain/event/parser.go @@ -0,0 +1,53 @@ +package event + +import ( + "github.com/nspcc-dev/neo-go/pkg/smartcontract" + "github.com/pkg/errors" +) + +// Parser is a function that constructs Event +// from the StackItem list. +type Parser func([]smartcontract.Parameter) (Event, error) + +// ParserInfo is a structure that groups +// the parameters of particular contract +// notification event parser. +type ParserInfo struct { + scriptHashWithType + + p Parser +} + +type wrongPrmNumber struct { + exp, act int +} + +// WrongNumberOfParameters returns an error about wrong number of smart contract parameters. +func WrongNumberOfParameters(exp, act int) error { + return &wrongPrmNumber{ + exp: exp, + act: act, + } +} + +func (s wrongPrmNumber) Error() string { + return errors.Errorf("wrong parameter count: expected %d, has %d", s.exp, s.act).Error() +} + +// SetParser is an event parser setter. +func (s *ParserInfo) SetParser(v Parser) { + s.p = v +} + +func (s ParserInfo) parser() Parser { + return s.p +} + +// SetType is an event type setter. +func (s *ParserInfo) SetType(v Type) { + s.typ = v +} + +func (s ParserInfo) getType() Type { + return s.typ +} diff --git a/lib/blockchain/event/utils.go b/lib/blockchain/event/utils.go new file mode 100644 index 000000000..66ef187d0 --- /dev/null +++ b/lib/blockchain/event/utils.go @@ -0,0 +1,34 @@ +package event + +import "github.com/nspcc-dev/neo-go/pkg/util" + +type scriptHashValue struct { + hash util.Uint160 +} + +type typeValue struct { + typ Type +} + +type scriptHashWithType struct { + scriptHashValue + typeValue +} + +// SetScriptHash is a script hash setter. +func (s *scriptHashValue) SetScriptHash(v util.Uint160) { + s.hash = v +} + +func (s scriptHashValue) scriptHash() util.Uint160 { + return s.hash +} + +// SetType is an event type setter. +func (s *typeValue) SetType(v Type) { + s.typ = v +} + +func (s typeValue) getType() Type { + return s.typ +} diff --git a/lib/blockchain/goclient/client.go b/lib/blockchain/goclient/client.go new file mode 100644 index 000000000..977c9b800 --- /dev/null +++ b/lib/blockchain/goclient/client.go @@ -0,0 +1,190 @@ +package goclient + +import ( + "context" + "crypto/ecdsa" + "encoding/hex" + "time" + + "github.com/nspcc-dev/neo-go/pkg/config/netmode" + "github.com/nspcc-dev/neo-go/pkg/core/transaction" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/nspcc-dev/neo-go/pkg/rpc/client" + sc "github.com/nspcc-dev/neo-go/pkg/smartcontract" + "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/nspcc-dev/neo-go/pkg/wallet" + crypto "github.com/nspcc-dev/neofs-crypto" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +type ( + // Params is a group of Client's constructor parameters. + Params struct { + Log *zap.Logger + Key *ecdsa.PrivateKey + Endpoint string + Magic netmode.Magic + DialTimeout time.Duration + } + + // Client is a neo-go wrapper that provides smart-contract invocation interface. + Client struct { + log *zap.Logger + cli *client.Client + acc *wallet.Account + } +) + +// ErrNilClient is returned by functions that expect +// a non-nil Client, but received nil. +const ErrNilClient = internal.Error("go client is nil") + +// HaltState returned if TestInvoke function processed without panic. +const HaltState = "HALT" + +// ErrMissingFee is returned by functions that expect +// a positive invocation fee, but received non-positive. +const ErrMissingFee = internal.Error("invocation fee must be positive") + +var ( + errNilParams = errors.New("chain/client: config was not provided to the constructor") + + errNilLogger = errors.New("chain/client: logger was not provided to the constructor") + + errNilKey = errors.New("chain/client: private key was not provided to the constructor") +) + +// Invoke invokes contract method by sending transaction into blockchain. +// Supported args types: int64, string, util.Uint160, []byte and bool. +// +// If passed fee is non-positive, ErrMissingFee returns. +func (c *Client) Invoke(contract util.Uint160, fee util.Fixed8, method string, args ...interface{}) error { + var params []sc.Parameter + for i := range args { + param, err := toStackParameter(args[i]) + if err != nil { + return err + } + + params = append(params, param) + } + + cosigner := []transaction.Cosigner{ + { + Account: c.acc.PrivateKey().PublicKey().GetScriptHash(), + Scopes: transaction.Global, + }, + } + + resp, err := c.cli.InvokeFunction(contract, method, params, cosigner) + if err != nil { + return err + } + + if len(resp.Script) == 0 { + return errors.New("chain/client: got empty invocation script from neo node") + } + + script, err := hex.DecodeString(resp.Script) + if err != nil { + return errors.New("chain/client: can't decode invocation script from neo node") + } + + txHash, err := c.cli.SignAndPushInvocationTx(script, c.acc, 0, fee, cosigner) + if err != nil { + return err + } + + c.log.Debug("neo client invoke", + zap.String("method", method), + zap.Stringer("tx_hash", txHash)) + + return nil +} + +// TestInvoke invokes contract method locally in neo-go node. This method should +// be used to read data from smart-contract. +func (c *Client) TestInvoke(contract util.Uint160, method string, args ...interface{}) ([]sc.Parameter, error) { + var params = make([]sc.Parameter, 0, len(args)) + + for i := range args { + p, err := toStackParameter(args[i]) + if err != nil { + return nil, err + } + + params = append(params, p) + } + + cosigner := []transaction.Cosigner{ + { + Account: c.acc.PrivateKey().PublicKey().GetScriptHash(), + Scopes: transaction.Global, + }, + } + + val, err := c.cli.InvokeFunction(contract, method, params, cosigner) + if err != nil { + return nil, err + } + + if val.State != HaltState { + return nil, errors.Errorf("chain/client: contract execution finished with state %s", val.State) + } + + return val.Stack, nil +} + +// New is a Client constructor. +func New(ctx context.Context, p *Params) (*Client, error) { + switch { + case p == nil: + return nil, errNilParams + case p.Log == nil: + return nil, errNilLogger + case p.Key == nil: + return nil, errNilKey + } + + privKeyBytes := crypto.MarshalPrivateKey(p.Key) + + wif, err := keys.WIFEncode(privKeyBytes, keys.WIFVersion, true) + if err != nil { + return nil, err + } + + account, err := wallet.NewAccountFromWIF(wif) + if err != nil { + return nil, err + } + + cli, err := client.New(ctx, p.Endpoint, client.Options{ + DialTimeout: p.DialTimeout, + Network: p.Magic, + }) + if err != nil { + return nil, err + } + + return &Client{log: p.Log, cli: cli, acc: account}, nil +} + +func toStackParameter(value interface{}) (sc.Parameter, error) { + var result = sc.Parameter{ + Value: value, + } + + // todo: add more types + switch value.(type) { + case []byte: + result.Type = sc.ByteArrayType + case int64: // TODO: add other numerical types + result.Type = sc.IntegerType + default: + return result, errors.Errorf("chain/client: unsupported parameter %v", value) + } + + return result, nil +} diff --git a/lib/blockchain/goclient/client_test.go b/lib/blockchain/goclient/client_test.go new file mode 100644 index 000000000..90d2c271a --- /dev/null +++ b/lib/blockchain/goclient/client_test.go @@ -0,0 +1,33 @@ +package goclient + +import ( + "testing" + + sc "github.com/nspcc-dev/neo-go/pkg/smartcontract" + "github.com/stretchr/testify/require" +) + +func TestToStackParameter(t *testing.T) { + items := []struct { + value interface{} + expType sc.ParamType + }{ + { + value: []byte{1, 2, 3}, + expType: sc.ByteArrayType, + }, + { + value: int64(100), + expType: sc.IntegerType, + }, + } + + for _, item := range items { + t.Run(item.expType.String()+" to stack parameter", func(t *testing.T) { + res, err := toStackParameter(item.value) + require.NoError(t, err) + require.Equal(t, item.expType, res.Type) + require.Equal(t, item.value, res.Value) + }) + } +} diff --git a/lib/blockchain/goclient/util.go b/lib/blockchain/goclient/util.go new file mode 100644 index 000000000..82e30f49b --- /dev/null +++ b/lib/blockchain/goclient/util.go @@ -0,0 +1,131 @@ +package goclient + +import ( + "encoding/binary" + + sc "github.com/nspcc-dev/neo-go/pkg/smartcontract" + "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/pkg/errors" +) + +/* + Use these function to parse stack parameters obtained from `TestInvoke` + function to native go types. You should know upfront return types of invoked + method. +*/ + +// BoolFromStackParameter receives boolean value from the value of a smart contract parameter. +func BoolFromStackParameter(param sc.Parameter) (bool, error) { + switch param.Type { + case sc.BoolType: + val, ok := param.Value.(bool) + if !ok { + return false, errors.Errorf("chain/client: can't convert %T to boolean", param.Value) + } + + return val, nil + case sc.IntegerType: + val, ok := param.Value.(int64) + if !ok { + return false, errors.Errorf("chain/client: can't convert %T to boolean", param.Value) + } + + return val > 0, nil + case sc.ByteArrayType: + val, ok := param.Value.([]byte) + if !ok { + return false, errors.Errorf("chain/client: can't convert %T to boolean", param.Value) + } + + return len(val) != 0, nil + default: + return false, errors.Errorf("chain/client: %s is not a bool type", param.Type) + } +} + +// IntFromStackParameter receives numerical value from the value of a smart contract parameter. +func IntFromStackParameter(param sc.Parameter) (int64, error) { + switch param.Type { + case sc.IntegerType: + val, ok := param.Value.(int64) + if !ok { + return 0, errors.Errorf("chain/client: can't convert %T to integer", param.Value) + } + + return val, nil + case sc.ByteArrayType: + val, ok := param.Value.([]byte) + if !ok || len(val) > 8 { + return 0, errors.Errorf("chain/client: can't convert %T to integer", param.Value) + } + + res := make([]byte, 8) + copy(res[:len(val)], val) + + return int64(binary.LittleEndian.Uint64(res)), nil + default: + return 0, errors.Errorf("chain/client: %s is not an integer type", param.Type) + } +} + +// BytesFromStackParameter receives binary value from the value of a smart contract parameter. +func BytesFromStackParameter(param sc.Parameter) ([]byte, error) { + if param.Type != sc.ByteArrayType { + return nil, errors.Errorf("chain/client: %s is not a byte array type", param.Type) + } + + val, ok := param.Value.([]byte) + if !ok { + return nil, errors.Errorf("chain/client: can't convert %T to byte slice", param.Value) + } + + return val, nil +} + +// ArrayFromStackParameter returns the slice contract parameters from passed parameter. +// +// If passed parameter carries boolean false value, (nil, nil) returns. +func ArrayFromStackParameter(param sc.Parameter) ([]sc.Parameter, error) { + if param.Type == sc.BoolType && !param.Value.(bool) { + return nil, nil + } + + if param.Type != sc.ArrayType { + return nil, errors.Errorf("chain/client: %s is not an array type", param.Type) + } + + val, ok := param.Value.([]sc.Parameter) + if !ok { + return nil, errors.Errorf("chain/client: can't convert %T to parameter slice", param.Value) + } + + return val, nil +} + +// StringFromStackParameter receives string value from the value of a smart contract parameter. +func StringFromStackParameter(param sc.Parameter) (string, error) { + switch param.Type { + case sc.StringType: + val, ok := param.Value.(string) + if !ok { + return "", errors.Errorf("chain/client: can't convert %T to string", param.Value) + } + + return val, nil + case sc.ByteArrayType: + val, ok := param.Value.([]byte) + if !ok { + return "", errors.Errorf("chain/client: can't convert %T to string", param.Value) + } + + return string(val), nil + default: + return "", errors.Errorf("chain/client: %s is not a string type", param.Type) + } +} + +// ReadStorage of the contract directly. Use it for debug, try to obtain +// smart-contract data from contract method with TestInvoke function. +func ReadStorage(c *Client, contract util.Uint160, key []byte) ([]byte, error) { + return c.cli.GetStorageByHash(contract, key) +} diff --git a/lib/blockchain/goclient/util_test.go b/lib/blockchain/goclient/util_test.go new file mode 100644 index 000000000..5752e2dda --- /dev/null +++ b/lib/blockchain/goclient/util_test.go @@ -0,0 +1,145 @@ +package goclient + +import ( + "testing" + + sc "github.com/nspcc-dev/neo-go/pkg/smartcontract" + "github.com/stretchr/testify/require" +) + +var ( + stringParam = sc.Parameter{ + Type: sc.StringType, + Value: "Hello World", + } + + intParam = sc.Parameter{ + Type: sc.IntegerType, + Value: int64(1), + } + + byteWithIntParam = sc.Parameter{ + Type: sc.ByteArrayType, + Value: []byte{0x0a}, + } + + byteArrayParam = sc.Parameter{ + Type: sc.ByteArrayType, + Value: []byte("Hello World"), + } + + emptyByteArrayParam = sc.Parameter{ + Type: sc.ByteArrayType, + Value: []byte{}, + } + + trueBoolParam = sc.Parameter{ + Type: sc.BoolType, + Value: true, + } + + falseBoolParam = sc.Parameter{ + Type: sc.BoolType, + Value: false, + } + + arrayParam = sc.Parameter{ + Type: sc.ArrayType, + Value: []sc.Parameter{intParam, byteArrayParam}, + } +) + +func TestBoolFromStackParameter(t *testing.T) { + t.Run("true assert", func(t *testing.T) { + val, err := BoolFromStackParameter(trueBoolParam) + require.NoError(t, err) + require.True(t, val) + + val, err = BoolFromStackParameter(intParam) + require.NoError(t, err) + require.True(t, val) + }) + + t.Run("false assert", func(t *testing.T) { + val, err := BoolFromStackParameter(falseBoolParam) + require.NoError(t, err) + require.False(t, val) + + val, err = BoolFromStackParameter(emptyByteArrayParam) + require.NoError(t, err) + require.False(t, val) + }) + + t.Run("incorrect assert", func(t *testing.T) { + _, err := BoolFromStackParameter(stringParam) + require.Error(t, err) + }) +} + +func TestArrayFromStackParameter(t *testing.T) { + t.Run("correct assert", func(t *testing.T) { + val, err := ArrayFromStackParameter(arrayParam) + require.NoError(t, err) + require.Len(t, val, len(arrayParam.Value.([]sc.Parameter))) + }) + t.Run("incorrect assert", func(t *testing.T) { + _, err := ArrayFromStackParameter(byteArrayParam) + require.Error(t, err) + }) + t.Run("boolean false case", func(t *testing.T) { + val, err := ArrayFromStackParameter(falseBoolParam) + require.NoError(t, err) + require.Nil(t, val) + }) +} + +func TestBytesFromStackParameter(t *testing.T) { + t.Run("correct assert", func(t *testing.T) { + val, err := BytesFromStackParameter(byteArrayParam) + require.NoError(t, err) + require.Equal(t, byteArrayParam.Value.([]byte), val) + }) + + t.Run("incorrect assert", func(t *testing.T) { + _, err := BytesFromStackParameter(stringParam) + require.Error(t, err) + }) +} + +func TestIntFromStackParameter(t *testing.T) { + t.Run("correct assert", func(t *testing.T) { + val, err := IntFromStackParameter(intParam) + require.NoError(t, err) + require.Equal(t, intParam.Value.(int64), val) + + val, err = IntFromStackParameter(byteWithIntParam) + require.NoError(t, err) + require.Equal(t, int64(0x0a), val) + + val, err = IntFromStackParameter(emptyByteArrayParam) + require.NoError(t, err) + require.Equal(t, int64(0), val) + }) + + t.Run("incorrect assert", func(t *testing.T) { + _, err := IntFromStackParameter(byteArrayParam) + require.Error(t, err) + }) +} + +func TestStringFromStackParameter(t *testing.T) { + t.Run("correct assert", func(t *testing.T) { + val, err := StringFromStackParameter(stringParam) + require.NoError(t, err) + require.Equal(t, stringParam.Value.(string), val) + + val, err = StringFromStackParameter(byteArrayParam) + require.NoError(t, err) + require.Equal(t, string(byteArrayParam.Value.([]byte)), val) + }) + + t.Run("incorrect assert", func(t *testing.T) { + _, err := StringFromStackParameter(intParam) + require.Error(t, err) + }) +} diff --git a/lib/blockchain/subscriber/subscriber.go b/lib/blockchain/subscriber/subscriber.go new file mode 100644 index 000000000..5d2528e97 --- /dev/null +++ b/lib/blockchain/subscriber/subscriber.go @@ -0,0 +1,151 @@ +package subscriber + +import ( + "context" + "errors" + "sync" + "time" + + "github.com/nspcc-dev/neo-go/pkg/rpc/client" + "github.com/nspcc-dev/neo-go/pkg/rpc/response" + "github.com/nspcc-dev/neo-go/pkg/rpc/response/result" + "github.com/nspcc-dev/neo-go/pkg/util" + "go.uber.org/zap" +) + +type ( + // Subscriber is an interface of the NotificationEvent listener. + Subscriber interface { + SubscribeForNotification(...util.Uint160) (<-chan *result.NotificationEvent, error) + UnsubscribeForNotification() + } + + subscriber struct { + *sync.RWMutex + log *zap.Logger + client *client.WSClient + + notify chan *result.NotificationEvent + notifyIDs map[util.Uint160]string + } + + // Params is a group of Subscriber constructor parameters. + Params struct { + Log *zap.Logger + Endpoint string + DialTimeout time.Duration + } +) + +var ( + errNilParams = errors.New("chain/subscriber: config was not provided to the constructor") + + errNilLogger = errors.New("chain/subscriber: logger was not provided to the constructor") +) + +func (s *subscriber) SubscribeForNotification(contracts ...util.Uint160) (<-chan *result.NotificationEvent, error) { + s.Lock() + defer s.Unlock() + + notifyIDs := make(map[util.Uint160]string, len(contracts)) + + for i := range contracts { + // do not subscribe to already subscribed contracts + if _, ok := s.notifyIDs[contracts[i]]; ok { + continue + } + + // subscribe to contract notifications + id, err := s.client.SubscribeForExecutionNotifications(&contracts[i]) + if err != nil { + // if there is some error, undo all subscriptions and return error + for _, id := range notifyIDs { + _ = s.client.Unsubscribe(id) + } + + return nil, err + } + + // save notification id + notifyIDs[contracts[i]] = id + } + + // update global map of subscribed contracts + for contract, id := range notifyIDs { + s.notifyIDs[contract] = id + } + + return s.notify, nil +} + +func (s *subscriber) UnsubscribeForNotification() { + s.Lock() + defer s.Unlock() + + for i := range s.notifyIDs { + err := s.client.Unsubscribe(s.notifyIDs[i]) + if err != nil { + s.log.Error("unsubscribe for notification", + zap.String("event", s.notifyIDs[i]), + zap.Error(err)) + } + + delete(s.notifyIDs, i) + } +} + +func (s *subscriber) routeNotifications(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case notification := <-s.client.Notifications: + switch notification.Type { + case response.NotificationEventID: + notification, ok := notification.Value.(*result.NotificationEvent) + if !ok { + s.log.Error("can't cast notify event to the notify struct") + continue + } + + s.notify <- notification + default: + s.log.Debug("unsupported notification from the chain", + zap.Uint8("type", uint8(notification.Type)), + ) + } + } + } +} + +// New is a constructs Neo:Morph event listener and returns Subscriber interface. +func New(ctx context.Context, p *Params) (Subscriber, error) { + switch { + case p == nil: + return nil, errNilParams + case p.Log == nil: + return nil, errNilLogger + } + + wsClient, err := client.NewWS(ctx, p.Endpoint, client.Options{ + DialTimeout: p.DialTimeout, + }) + if err != nil { + return nil, err + } + + sub := &subscriber{ + RWMutex: new(sync.RWMutex), + log: p.Log, + client: wsClient, + notify: make(chan *result.NotificationEvent), + notifyIDs: make(map[util.Uint160]string), + } + + // Worker listens all events from neo-go websocket and puts them + // into corresponding channel. It may be notifications, transactions, + // new blocks. For now only notifications. + go sub.routeNotifications(ctx) + + return sub, nil +} diff --git a/lib/boot/bootstrap_test.go b/lib/boot/bootstrap_test.go new file mode 100644 index 000000000..206e2562e --- /dev/null +++ b/lib/boot/bootstrap_test.go @@ -0,0 +1,24 @@ +package boot + +import ( + "testing" + + "github.com/nspcc-dev/neofs-api-go/bootstrap" + "github.com/stretchr/testify/require" +) + +func TestBootstrapPeerParams(t *testing.T) { + s := BootstrapPeerParams{} + + nodeInfo := &bootstrap.NodeInfo{ + Address: "address", + PubKey: []byte{1, 2, 3}, + Options: []string{ + "opt1", + "opt2", + }, + } + s.SetNodeInfo(nodeInfo) + + require.Equal(t, nodeInfo, s.NodeInfo()) +} diff --git a/lib/boot/bootstrapper.go b/lib/boot/bootstrapper.go new file mode 100644 index 000000000..f97e6a789 --- /dev/null +++ b/lib/boot/bootstrapper.go @@ -0,0 +1,31 @@ +package boot + +import ( + "github.com/nspcc-dev/neofs-api-go/bootstrap" + "github.com/nspcc-dev/neofs-node/internal" +) + +// BootstrapPeerParams is a group of parameters +// for storage node bootstrap. +type BootstrapPeerParams struct { + info *bootstrap.NodeInfo +} + +// PeerBootstrapper is an interface of the NeoFS node bootstrap tool. +type PeerBootstrapper interface { + AddPeer(BootstrapPeerParams) error +} + +// ErrNilPeerBootstrapper is returned by functions that expect +// a non-nil PeerBootstrapper, but received nil. +const ErrNilPeerBootstrapper = internal.Error("peer bootstrapper is nil") + +// SetNodeInfo is a node info setter. +func (s *BootstrapPeerParams) SetNodeInfo(v *bootstrap.NodeInfo) { + s.info = v +} + +// NodeInfo is a node info getter. +func (s BootstrapPeerParams) NodeInfo() *bootstrap.NodeInfo { + return s.info +} diff --git a/lib/boot/storage.go b/lib/boot/storage.go new file mode 100644 index 000000000..9043576ce --- /dev/null +++ b/lib/boot/storage.go @@ -0,0 +1,46 @@ +package boot + +import ( + "context" + + "go.uber.org/zap" +) + +// StorageBootParams is a group of parameters +// for storage node bootstrap operation. +type StorageBootParams struct { + BootstrapPeerParams +} + +// StorageBootController is an entity that performs +// registration of a storage node in NeoFS network. +type StorageBootController struct { + peerBoot PeerBootstrapper + + bootPrm StorageBootParams + + log *zap.Logger +} + +// SetPeerBootstrapper is a PeerBootstrapper setter. +func (s *StorageBootController) SetPeerBootstrapper(v PeerBootstrapper) { + s.peerBoot = v +} + +// SetBootParams is a storage node bootstrap parameters setter. +func (s *StorageBootController) SetBootParams(v StorageBootParams) { + s.bootPrm = v +} + +// SetLogger is a logging component setter. +func (s *StorageBootController) SetLogger(v *zap.Logger) { + s.log = v +} + +// Bootstrap registers storage node in NeoFS system. +func (s StorageBootController) Bootstrap(context.Context) { + // register peer in NeoFS network + if err := s.peerBoot.AddPeer(s.bootPrm.BootstrapPeerParams); err != nil && s.log != nil { + s.log.Error("could not register storage node in network") + } +} diff --git a/lib/buckets/boltdb/boltdb.go b/lib/buckets/boltdb/boltdb.go new file mode 100644 index 000000000..4310151b1 --- /dev/null +++ b/lib/buckets/boltdb/boltdb.go @@ -0,0 +1,109 @@ +package boltdb + +import ( + "io/ioutil" + "log" + "os" + "path" + + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/pkg/errors" + "github.com/spf13/viper" + "go.etcd.io/bbolt" +) + +type ( + bucket struct { + db *bbolt.DB + name []byte + } + + // Options groups the BoltDB bucket's options. + Options struct { + bbolt.Options + Name []byte + Path string + Perm os.FileMode + } +) + +const ( + defaultFilePermission = 0777 + + errEmptyPath = internal.Error("database empty path") +) + +var _ core.Bucket = (*bucket)(nil) + +func makeCopy(val []byte) []byte { + tmp := make([]byte, len(val)) + copy(tmp, val) + + return tmp +} + +// NewOptions prepares options for badger instance. +func NewOptions(name core.BucketType, v *viper.Viper) (opts Options, err error) { + key := string(name) + opts = Options{ + Options: bbolt.Options{ + // set defaults: + Timeout: bbolt.DefaultOptions.Timeout, + FreelistType: bbolt.DefaultOptions.FreelistType, + + // set config options: + NoSync: v.GetBool(key + ".no_sync"), + ReadOnly: v.GetBool(key + ".read_only"), + NoGrowSync: v.GetBool(key + ".no_grow_sync"), + NoFreelistSync: v.GetBool(key + ".no_freelist_sync"), + + PageSize: v.GetInt(key + ".page_size"), + MmapFlags: v.GetInt(key + ".mmap_flags"), + InitialMmapSize: v.GetInt(key + ".initial_mmap_size"), + }, + + Name: []byte(name), + Perm: defaultFilePermission, + Path: v.GetString(key + ".path"), + } + + if opts.Path == "" { + return opts, errEmptyPath + } + + if tmp := v.GetDuration(key + ".lock_timeout"); tmp > 0 { + opts.Timeout = tmp + } + + if perm := v.GetUint32(key + ".perm"); perm != 0 { + opts.Perm = os.FileMode(perm) + } + + base := path.Dir(opts.Path) + if err := os.MkdirAll(base, opts.Perm); err != nil { + return opts, errors.Wrapf(err, "could not use `%s` dir", base) + } + + return opts, nil +} + +// NewBucket creates badger-bucket instance. +func NewBucket(opts *Options) (core.Bucket, error) { + log.SetOutput(ioutil.Discard) // disable default logger + + db, err := bbolt.Open(opts.Path, opts.Perm, &opts.Options) + if err != nil { + return nil, err + } + + err = db.Update(func(tx *bbolt.Tx) error { + _, err := tx.CreateBucketIfNotExists(opts.Name) + return err + }) + if err != nil { + return nil, err + } + + return &bucket{db: db, name: opts.Name}, nil +} diff --git a/lib/buckets/boltdb/methods.go b/lib/buckets/boltdb/methods.go new file mode 100644 index 000000000..b302a7dbd --- /dev/null +++ b/lib/buckets/boltdb/methods.go @@ -0,0 +1,94 @@ +package boltdb + +import ( + "os" + + "github.com/mr-tron/base58" + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/pkg/errors" + "go.etcd.io/bbolt" +) + +// Get value by key or return error. +func (b *bucket) Get(key []byte) (data []byte, err error) { + err = b.db.View(func(txn *bbolt.Tx) error { + txn.Bucket(b.name).Cursor().Seek(key) + val := txn.Bucket(b.name).Get(key) + if val == nil { + return errors.Wrapf(core.ErrNotFound, "key=%s", base58.Encode(key)) + } + + data = makeCopy(val) + return nil + }) + + return +} + +// Set value for key. +func (b *bucket) Set(key, value []byte) error { + return b.db.Update(func(txn *bbolt.Tx) error { + k, v := makeCopy(key), makeCopy(value) + return txn.Bucket(b.name).Put(k, v) + }) +} + +// Del removes item from bucket by key. +func (b *bucket) Del(key []byte) error { + return b.db.Update(func(txn *bbolt.Tx) error { + return txn.Bucket(b.name).Delete(key) + }) +} + +// Has checks key exists. +func (b *bucket) Has(key []byte) bool { + _, err := b.Get(key) + return !errors.Is(errors.Cause(err), core.ErrNotFound) +} + +// Size returns size of database. +func (b *bucket) Size() int64 { + info, err := os.Stat(b.db.Path()) + if err != nil { + return 0 + } + + return info.Size() +} + +// List all items in bucket. +func (b *bucket) List() ([][]byte, error) { + var items [][]byte + + if err := b.db.View(func(txn *bbolt.Tx) error { + return txn.Bucket(b.name).ForEach(func(k, _ []byte) error { + items = append(items, makeCopy(k)) + return nil + }) + }); err != nil { + return nil, err + } + + return items, nil +} + +// Filter elements by filter closure. +func (b *bucket) Iterate(handler core.FilterHandler) error { + if handler == nil { + return core.ErrNilFilterHandler + } + + return b.db.View(func(txn *bbolt.Tx) error { + return txn.Bucket(b.name).ForEach(func(k, v []byte) error { + if !handler(makeCopy(k), makeCopy(v)) { + return core.ErrIteratingAborted + } + return nil + }) + }) +} + +// Close bucket database. +func (b *bucket) Close() error { + return b.db.Close() +} diff --git a/lib/buckets/boltdb/methods_test.go b/lib/buckets/boltdb/methods_test.go new file mode 100644 index 000000000..dc9517d73 --- /dev/null +++ b/lib/buckets/boltdb/methods_test.go @@ -0,0 +1,95 @@ +package boltdb + +import ( + "encoding/binary" + "io/ioutil" + "os" + "strings" + "testing" + "time" + + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/pkg/errors" + "github.com/spf13/viper" + "github.com/stretchr/testify/require" +) + +var config = strings.NewReader(` +storage: + test_bucket: + bucket: boltdb + path: ./temp/storage/test_bucket + perm: 0777 +`) + +func TestBucket(t *testing.T) { + file, err := ioutil.TempFile("", "test_bolt_db") + require.NoError(t, err) + require.NoError(t, file.Close()) + + v := viper.New() + require.NoError(t, v.ReadConfig(config)) + + // -- // + _, err = NewOptions("storage.test_bucket", v) + require.EqualError(t, err, errEmptyPath.Error()) + + v.SetDefault("storage.test_bucket.path", file.Name()) + v.SetDefault("storage.test_bucket.timeout", time.Millisecond*100) + // -- // + + opts, err := NewOptions("storage.test_bucket", v) + require.NoError(t, err) + + db, err := NewBucket(&opts) + require.NoError(t, err) + + require.NotPanics(t, func() { db.Size() }) + + var ( + count = uint64(10) + expected = []byte("test") + ) + + for i := uint64(0); i < count; i++ { + key := make([]byte, 8) + binary.BigEndian.PutUint64(key, i) + + require.False(t, db.Has(key)) + + val, err := db.Get(key) + require.EqualError(t, errors.Cause(err), core.ErrNotFound.Error()) + require.Empty(t, val) + + require.NoError(t, db.Set(key, expected)) + + require.True(t, db.Has(key)) + + val, err = db.Get(key) + require.NoError(t, err) + require.Equal(t, expected, val) + + keys, err := db.List() + require.NoError(t, err) + require.Len(t, keys, 1) + require.Equal(t, key, keys[0]) + + require.EqualError(t, db.Iterate(nil), core.ErrNilFilterHandler.Error()) + + items, err := core.ListBucketItems(db, func(_, _ []byte) bool { return true }) + require.NoError(t, err) + require.Len(t, items, 1) + require.Equal(t, key, items[0].Key) + require.Equal(t, val, items[0].Val) + + require.NoError(t, db.Del(key)) + require.False(t, db.Has(key)) + + val, err = db.Get(key) + require.EqualError(t, errors.Cause(err), core.ErrNotFound.Error()) + require.Empty(t, val) + } + + require.NoError(t, db.Close()) + require.NoError(t, os.RemoveAll(file.Name())) +} diff --git a/lib/buckets/boltdb/plugin/main.go b/lib/buckets/boltdb/plugin/main.go new file mode 100644 index 000000000..04a8f9f22 --- /dev/null +++ b/lib/buckets/boltdb/plugin/main.go @@ -0,0 +1,25 @@ +package main + +import ( + "github.com/nspcc-dev/neofs-node/lib/buckets/boltdb" + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/pkg/errors" + "github.com/spf13/viper" +) + +var _ = PrepareBucket + +// PrepareBucket is interface method for bucket. +func PrepareBucket(name core.BucketType, v *viper.Viper) (db core.Bucket, err error) { + var opts boltdb.Options + + if opts, err = boltdb.NewOptions("storage."+name, v); err != nil { + err = errors.Wrapf(err, "%q: could not prepare options", name) + return + } else if db, err = boltdb.NewBucket(&opts); err != nil { + err = errors.Wrapf(err, "%q: could not prepare bucket", name) + return + } + + return +} diff --git a/lib/buckets/fsbucket/bucket.go b/lib/buckets/fsbucket/bucket.go new file mode 100644 index 000000000..029d509c9 --- /dev/null +++ b/lib/buckets/fsbucket/bucket.go @@ -0,0 +1,101 @@ +package fsbucket + +import ( + "os" + + "github.com/mr-tron/base58" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/pkg/errors" + "github.com/spf13/viper" + "go.uber.org/atomic" +) + +type ( + bucket struct { + dir string + perm os.FileMode + } + + treeBucket struct { + dir string + perm os.FileMode + + depth int + prefixLength int + sz *atomic.Int64 + } +) + +const ( + defaultDirectory = "fsbucket" + defaultPermissions = 0755 + defaultDepth = 2 + defaultPrefixLen = 2 +) + +const errShortKey = internal.Error("key is too short for tree fs bucket") + +var _ core.Bucket = (*bucket)(nil) + +func stringifyKey(key []byte) string { + return base58.Encode(key) +} + +func decodeKey(key string) []byte { + k, err := base58.Decode(key) + if err != nil { + panic(err) // it can fail only for not base58 strings + } + + return k +} + +// NewBucket creates new in-memory bucket instance. +func NewBucket(name core.BucketType, v *viper.Viper) (core.Bucket, error) { + var ( + key = "storage." + string(name) + dir string + perm os.FileMode + + prefixLen int + depth int + ) + + if dir = v.GetString(key + ".directory"); dir == "" { + dir = defaultDirectory + } + + if perm = os.FileMode(v.GetInt(key + ".permissions")); perm == 0 { + perm = defaultPermissions + } + + if depth = v.GetInt(key + ".depth"); depth <= 0 { + depth = defaultDepth + } + + if prefixLen = v.GetInt(key + ".prefix_len"); prefixLen <= 0 { + prefixLen = defaultPrefixLen + } + + if err := os.MkdirAll(dir, perm); err != nil { + return nil, errors.Wrapf(err, "could not create bucket %s", string(name)) + } + + if v.GetBool(key + ".tree_enabled") { + b := &treeBucket{ + dir: dir, + perm: perm, + depth: depth, + prefixLength: prefixLen, + } + b.sz = atomic.NewInt64(b.size()) + + return b, nil + } + + return &bucket{ + dir: dir, + perm: perm, + }, nil +} diff --git a/lib/buckets/fsbucket/methods.go b/lib/buckets/fsbucket/methods.go new file mode 100644 index 000000000..9aeaf45f2 --- /dev/null +++ b/lib/buckets/fsbucket/methods.go @@ -0,0 +1,107 @@ +package fsbucket + +import ( + "io/ioutil" + "os" + "path" + "path/filepath" + + "github.com/nspcc-dev/neofs-node/lib/core" +) + +// Get value by key. +func (b *bucket) Get(key []byte) ([]byte, error) { + p := path.Join(b.dir, stringifyKey(key)) + if _, err := os.Stat(p); os.IsNotExist(err) { + return nil, core.ErrNotFound + } + + return ioutil.ReadFile(p) +} + +// Set value by key. +func (b *bucket) Set(key, value []byte) error { + p := path.Join(b.dir, stringifyKey(key)) + + return ioutil.WriteFile(p, value, b.perm) +} + +// Del value by key. +func (b *bucket) Del(key []byte) error { + p := path.Join(b.dir, stringifyKey(key)) + if _, err := os.Stat(p); os.IsNotExist(err) { + return core.ErrNotFound + } + + return os.Remove(p) +} + +// Has checks key exists. +func (b *bucket) Has(key []byte) bool { + p := path.Join(b.dir, stringifyKey(key)) + _, err := os.Stat(p) + + return err == nil +} + +func listing(root string, fn func(path string, info os.FileInfo) error) error { + return filepath.Walk(root, func(p string, info os.FileInfo, err error) error { + if err != nil || info.IsDir() { + return err + } + + if fn == nil { + return nil + } + + return fn(p, info) + }) +} + +// Size of bucket. +func (b *bucket) Size() (size int64) { + err := listing(b.dir, func(_ string, info os.FileInfo) error { + size += info.Size() + return nil + }) + + if err != nil { + size = 0 + } + + return +} + +// List all bucket items. +func (b *bucket) List() ([][]byte, error) { + buckets := make([][]byte, 0) + + err := listing(b.dir, func(p string, info os.FileInfo) error { + buckets = append(buckets, decodeKey(info.Name())) + return nil + }) + + return buckets, err +} + +// Filter bucket items by closure. +func (b *bucket) Iterate(handler core.FilterHandler) error { + return listing(b.dir, func(p string, info os.FileInfo) error { + key := decodeKey(info.Name()) + val, err := ioutil.ReadFile(p) + if err != nil { + return err + } + + if !handler(key, val) { + return core.ErrIteratingAborted + } + + return nil + }) +} + +// Close bucket (just empty). +func (b *bucket) Close() error { + return os.RemoveAll(b.dir) +} diff --git a/lib/buckets/fsbucket/queue.go b/lib/buckets/fsbucket/queue.go new file mode 100644 index 000000000..e2b036162 --- /dev/null +++ b/lib/buckets/fsbucket/queue.go @@ -0,0 +1,44 @@ +package fsbucket + +import "sync" + +type ( + queue struct { + *sync.RWMutex + buf []elem + } + + elem struct { + depth int + prefix string + path string + } +) + +func newQueue(n int) *queue { + return &queue{ + RWMutex: new(sync.RWMutex), + buf: make([]elem, 0, n), + } +} + +func (q *queue) Len() int { + return len(q.buf) +} + +func (q *queue) Push(s elem) { + q.Lock() + q.buf = append(q.buf, s) + q.Unlock() +} + +func (q *queue) Pop() (s elem) { + q.Lock() + if len(q.buf) > 0 { + s = q.buf[0] + q.buf = q.buf[1:] + } + q.Unlock() + + return +} diff --git a/lib/buckets/fsbucket/treemethods.go b/lib/buckets/fsbucket/treemethods.go new file mode 100644 index 000000000..1a1927a82 --- /dev/null +++ b/lib/buckets/fsbucket/treemethods.go @@ -0,0 +1,261 @@ +package fsbucket + +import ( + "encoding/hex" + "io/ioutil" + "os" + "path" + "strings" + + "github.com/nspcc-dev/neofs-node/lib/core" +) + +const queueCap = 1000 + +func stringifyHexKey(key []byte) string { + return hex.EncodeToString(key) +} + +func decodeHexKey(key string) ([]byte, error) { + k, err := hex.DecodeString(key) + if err != nil { + return nil, err + } + + return k, nil +} + +// treePath returns slice of the dir names that contain the path +// and filename, e.g. 0xabcdef => []string{"ab", "cd"}, "abcdef". +// In case of errors - return nil slice. +func (b *treeBucket) treePath(key []byte) ([]string, string) { + filename := stringifyHexKey(key) + if len(filename) <= b.prefixLength*b.depth { + return nil, filename + } + + filepath := filename + dirs := make([]string, 0, b.depth) + + for i := 0; i < b.depth; i++ { + dirs = append(dirs, filepath[:b.prefixLength]) + filepath = filepath[b.prefixLength:] + } + + return dirs, filename +} + +// Get value by key. +func (b *treeBucket) Get(key []byte) ([]byte, error) { + dirPaths, filename := b.treePath(key) + if dirPaths == nil { + return nil, errShortKey + } + + p := path.Join(b.dir, path.Join(dirPaths...), filename) + + if _, err := os.Stat(p); os.IsNotExist(err) { + return nil, core.ErrNotFound + } + + return ioutil.ReadFile(p) +} + +// Set value by key. +func (b *treeBucket) Set(key, value []byte) error { + dirPaths, filename := b.treePath(key) + if dirPaths == nil { + return errShortKey + } + + var ( + dirPath = path.Join(dirPaths...) + p = path.Join(b.dir, dirPath, filename) + ) + + if err := os.MkdirAll(path.Join(b.dir, dirPath), b.perm); err != nil { + return err + } + + err := ioutil.WriteFile(p, value, b.perm) + if err == nil { + b.sz.Add(int64(len(value))) + } + + return err +} + +// Del value by key. +func (b *treeBucket) Del(key []byte) error { + dirPaths, filename := b.treePath(key) + if dirPaths == nil { + return errShortKey + } + + var ( + err error + fi os.FileInfo + p = path.Join(b.dir, path.Join(dirPaths...), filename) + ) + + if fi, err = os.Stat(p); os.IsNotExist(err) { + return core.ErrNotFound + } else if err = os.Remove(p); err == nil { + b.sz.Sub(fi.Size()) + } + + return err +} + +// Has checks if key exists. +func (b *treeBucket) Has(key []byte) bool { + dirPaths, filename := b.treePath(key) + if dirPaths == nil { + return false + } + + p := path.Join(b.dir, path.Join(dirPaths...), filename) + + _, err := os.Stat(p) + + return err == nil +} + +// There might be two implementation of listing method: simple with `filepath.Walk()` +// or more complex implementation with path checks, BFS etc. `filepath.Walk()` might +// be slow in large dirs due to sorting operations and non controllable depth. +func (b *treeBucket) listing(root string, fn func(path string, info os.FileInfo) error) error { + // todo: DFS might be better since it won't store many files in queue. + // todo: queue length can be specified as a parameter + q := newQueue(queueCap) + q.Push(elem{path: root}) + + for q.Len() > 0 { + e := q.Pop() + + s, err := os.Lstat(e.path) + if err != nil { + // might be better to log and ignore + return err + } + + // check if it is correct file + if !s.IsDir() { + // we accept files that located in excepted depth and have correct prefix + // e.g. file 'abcdef0123' => /ab/cd/abcdef0123 + if e.depth == b.depth+1 && strings.HasPrefix(s.Name(), e.prefix) { + err = fn(e.path, s) + if err != nil { + // might be better to log and ignore + return err + } + } + + continue + } + + // ignore dirs with inappropriate length or depth + if e.depth > b.depth || (e.depth > 0 && len(s.Name()) > b.prefixLength) { + continue + } + + files, err := readDirNames(e.path) + if err != nil { + // might be better to log and ignore + return err + } + + for i := range files { + // add prefix of all dirs in path except root dir + var prefix string + if e.depth > 0 { + prefix = e.prefix + s.Name() + } + + q.Push(elem{ + depth: e.depth + 1, + prefix: prefix, + path: path.Join(e.path, files[i]), + }) + } + } + + return nil +} + +// Size returns the size of the bucket in bytes. +func (b *treeBucket) Size() int64 { + return b.sz.Load() +} + +func (b *treeBucket) size() (size int64) { + err := b.listing(b.dir, func(_ string, info os.FileInfo) error { + size += info.Size() + return nil + }) + + if err != nil { + size = 0 + } + + return +} + +// List all bucket items. +func (b *treeBucket) List() ([][]byte, error) { + buckets := make([][]byte, 0) + + err := b.listing(b.dir, func(p string, info os.FileInfo) error { + key, err := decodeHexKey(info.Name()) + if err != nil { + return err + } + buckets = append(buckets, key) + return nil + }) + + return buckets, err +} + +// Filter bucket items by closure. +func (b *treeBucket) Iterate(handler core.FilterHandler) error { + return b.listing(b.dir, func(p string, info os.FileInfo) error { + val, err := ioutil.ReadFile(path.Join(b.dir, p)) + if err != nil { + return err + } + + key, err := decodeHexKey(info.Name()) + if err != nil { + return err + } + + if !handler(key, val) { + return core.ErrIteratingAborted + } + + return nil + }) +} + +// Close bucket (remove all available data). +func (b *treeBucket) Close() error { + return os.RemoveAll(b.dir) +} + +// readDirNames copies `filepath.readDirNames()` without sorting the output. +func readDirNames(dirname string) ([]string, error) { + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + + names, err := f.Readdirnames(-1) + if err != nil { + return nil, err + } + + f.Close() + + return names, nil +} diff --git a/lib/buckets/fsbucket/treemethods_test.go b/lib/buckets/fsbucket/treemethods_test.go new file mode 100644 index 000000000..f0e88e554 --- /dev/null +++ b/lib/buckets/fsbucket/treemethods_test.go @@ -0,0 +1,324 @@ +package fsbucket + +import ( + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "io/ioutil" + "os" + "path" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + + "github.com/nspcc-dev/neofs-node/lib/core" +) + +func prepareTree(badFiles bool) (string, error) { + name := make([]byte, 32) + root, err := ioutil.TempDir("", "treeBucket_test") + if err != nil { + return "", err + } + + // paths must contain strings with hex ascii symbols + paths := [][]string{ + {root, "abcd"}, + {root, "abcd", "cdef"}, + {root, "abcd", "cd01"}, + {root, "0123", "2345"}, + {root, "0123", "2345", "4567"}, + } + + dirs := make([]string, len(paths)) + + for i := range paths { + dirs[i] = path.Join(paths[i]...) + + err = os.MkdirAll(dirs[i], 0700) + if err != nil { + return "", err + } + + // create couple correct files + for j := 0; j < 2; j++ { + _, err := rand.Read(name) + if err != nil { + return "", err + } + + filePrefix := new(strings.Builder) + for k := 1; k < len(paths[i]); k++ { + filePrefix.WriteString(paths[i][k]) + } + filePrefix.WriteString(hex.EncodeToString(name)) + + file, err := os.OpenFile(path.Join(dirs[i], filePrefix.String()), os.O_CREATE, 0700) + if err != nil { + return "", err + } + file.Close() + } + + if !badFiles { + continue + } + + // create one bad file + _, err := rand.Read(name) + if err != nil { + return "", err + } + + file, err := os.OpenFile(path.Join(dirs[i], "fff"+hex.EncodeToString(name)), os.O_CREATE, 0700) + if err != nil { + return "", err + } + file.Close() + } + + return root, nil +} + +func TestTreebucket_List(t *testing.T) { + root, err := prepareTree(true) + require.NoError(t, err) + defer os.RemoveAll(root) + + b := treeBucket{ + dir: root, + perm: 0700, + depth: 1, + prefixLength: 4, + } + results, err := b.List() + require.NoError(t, err) + require.Len(t, results, 2) + + b.depth = 2 + results, err = b.List() + require.NoError(t, err) + require.Len(t, results, 6) + + b.depth = 3 + results, err = b.List() + require.NoError(t, err) + require.Len(t, results, 2) + + b.depth = 4 + results, err = b.List() + require.NoError(t, err) + require.Len(t, results, 0) +} + +func TestTreebucket(t *testing.T) { + root, err := prepareTree(true) + require.NoError(t, err) + defer os.RemoveAll(root) + + b := treeBucket{ + dir: root, + perm: 0700, + depth: 2, + prefixLength: 4, + sz: atomic.NewInt64(0), + } + + results, err := b.List() + require.NoError(t, err) + require.Len(t, results, 6) + + t.Run("Get", func(t *testing.T) { + for i := range results { + _, err = b.Get(results[i]) + require.NoError(t, err) + } + _, err = b.Get([]byte("Hello world!")) + require.Error(t, err) + }) + + t.Run("Has", func(t *testing.T) { + for i := range results { + require.True(t, b.Has(results[i])) + } + require.False(t, b.Has([]byte("Unknown key"))) + }) + + t.Run("Set", func(t *testing.T) { + keyHash := sha256.Sum256([]byte("Set this key")) + key := keyHash[:] + value := make([]byte, 32) + rand.Read(value) + + // set sha256 key + err := b.Set(key, value) + require.NoError(t, err) + + require.True(t, b.Has(key)) + data, err := b.Get(key) + require.NoError(t, err) + require.Equal(t, data, value) + + filename := hex.EncodeToString(key) + _, err = os.Lstat(path.Join(root, filename[:4], filename[4:8], filename)) + require.NoError(t, err) + + // set key that cannot be placed in the required dir depth + key, err = hex.DecodeString("abcdef") + require.NoError(t, err) + + err = b.Set(key, value) + require.Error(t, err) + }) + + t.Run("Delete", func(t *testing.T) { + keyHash := sha256.Sum256([]byte("Delete this key")) + key := keyHash[:] + value := make([]byte, 32) + rand.Read(value) + + err := b.Set(key, value) + require.NoError(t, err) + + // delete sha256 key + err = b.Del(key) + require.NoError(t, err) + + _, err = b.Get(key) + require.Error(t, err) + filename := hex.EncodeToString(key) + _, err = os.Lstat(path.Join(root, filename[:4], filename[4:8], filename)) + require.Error(t, err) + }) +} + +func TestTreebucket_Close(t *testing.T) { + root, err := prepareTree(true) + require.NoError(t, err) + defer os.RemoveAll(root) + + b := treeBucket{ + dir: root, + perm: 0700, + depth: 2, + prefixLength: 4, + } + err = b.Close() + require.NoError(t, err) + + _, err = os.Lstat(root) + require.Error(t, err) +} + +func TestTreebucket_Size(t *testing.T) { + root, err := prepareTree(true) + require.NoError(t, err) + defer os.RemoveAll(root) + + var size int64 = 1024 + key := []byte("Set this key") + value := make([]byte, size) + rand.Read(value) + + b := treeBucket{ + dir: root, + perm: 0700, + depth: 2, + prefixLength: 4, + sz: atomic.NewInt64(0), + } + + err = b.Set(key, value) + require.NoError(t, err) + require.Equal(t, size, b.Size()) +} + +func BenchmarkTreebucket_List(b *testing.B) { + root, err := prepareTree(false) + defer os.RemoveAll(root) + if err != nil { + b.Error(err) + } + + treeFSBucket := &treeBucket{ + dir: root, + perm: 0755, + depth: 2, + prefixLength: 4, + } + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + _, err := treeFSBucket.List() + if err != nil { + b.Error(err) + } + } +} + +func BenchmarkFilewalkBucket_List(b *testing.B) { + root, err := prepareTree(false) + defer os.RemoveAll(root) + if err != nil { + b.Error(err) + } + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + buckets := make([]core.BucketItem, 0) + + filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if err != nil || info.IsDir() { + return nil + } + + val, err := ioutil.ReadFile(path) + if err != nil { + return err + } + + key, err := decodeHexKey(info.Name()) + if err != nil { + return err + } + + buckets = append(buckets, core.BucketItem{ + Key: key, + Val: val, + }) + + return nil + }) + } +} + +func BenchmarkTreeBucket_Size(b *testing.B) { + root, err := prepareTree(false) + defer os.RemoveAll(root) + if err != nil { + b.Error(err) + } + + treeFSBucket := &treeBucket{ + dir: root, + perm: 0755, + depth: 2, + prefixLength: 4, + } + + treeFSBucket.sz = atomic.NewInt64(treeFSBucket.size()) + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + _ = treeFSBucket.Size() + } +} diff --git a/lib/buckets/init.go b/lib/buckets/init.go new file mode 100644 index 000000000..ea4c5756d --- /dev/null +++ b/lib/buckets/init.go @@ -0,0 +1,64 @@ +package buckets + +import ( + "plugin" + "strings" + + "github.com/nspcc-dev/neofs-node/lib/buckets/boltdb" + "github.com/nspcc-dev/neofs-node/lib/buckets/fsbucket" + "github.com/nspcc-dev/neofs-node/lib/buckets/inmemory" + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/pkg/errors" + "github.com/spf13/viper" + "go.uber.org/zap" +) + +const ( + // BoltDBBucket is a name of BoltDB bucket. + BoltDBBucket = "boltdb" + + // InMemoryBucket is a name RAM bucket. + InMemoryBucket = "in-memory" + + // FileSystemBucket is a name of file system bucket. + FileSystemBucket = "fsbucket" + + bucketSymbol = "PrepareBucket" +) + +// NewBucket is a bucket's constructor. +func NewBucket(name core.BucketType, l *zap.Logger, v *viper.Viper) (core.Bucket, error) { + bucket := v.GetString("storage." + string(name) + ".bucket") + + l.Info("initialize bucket", + zap.String("name", string(name)), + zap.String("bucket", bucket)) + + switch strings.ToLower(bucket) { + case FileSystemBucket: + return fsbucket.NewBucket(name, v) + + case InMemoryBucket: + return inmemory.NewBucket(name, v), nil + + case BoltDBBucket: + opts, err := boltdb.NewOptions("storage."+name, v) + if err != nil { + return nil, err + } + + return boltdb.NewBucket(&opts) + default: + instance, err := plugin.Open(bucket) + if err != nil { + return nil, errors.Wrapf(err, "could not load bucket: `%s`", bucket) + } + + sym, err := instance.Lookup(bucketSymbol) + if err != nil { + return nil, errors.Wrapf(err, "could not find bucket signature: `%s`", bucket) + } + + return sym.(func(core.BucketType, *viper.Viper) (core.Bucket, error))(name, v) + } +} diff --git a/lib/buckets/inmemory/bucket.go b/lib/buckets/inmemory/bucket.go new file mode 100644 index 000000000..b5f48316c --- /dev/null +++ b/lib/buckets/inmemory/bucket.go @@ -0,0 +1,60 @@ +package inmemory + +import ( + "sync" + + "github.com/mr-tron/base58" + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/spf13/viper" +) + +type ( + bucket struct { + *sync.RWMutex + items map[string][]byte + } +) + +const ( + defaultCapacity = 100 +) + +var ( + _ core.Bucket = (*bucket)(nil) + + // for in usage + _ = NewBucket +) + +func stringifyKey(key []byte) string { + return base58.Encode(key) +} + +func decodeKey(key string) []byte { + k, err := base58.Decode(key) + if err != nil { + panic(err) // it can fail only for not base58 strings + } + + return k +} + +func makeCopy(val []byte) []byte { + tmp := make([]byte, len(val)) + copy(tmp, val) + + return tmp +} + +// NewBucket creates new in-memory bucket instance. +func NewBucket(name core.BucketType, v *viper.Viper) core.Bucket { + var capacity int + if capacity = v.GetInt("storage." + string(name) + ".capacity"); capacity <= 0 { + capacity = defaultCapacity + } + + return &bucket{ + RWMutex: new(sync.RWMutex), + items: make(map[string][]byte, capacity), + } +} diff --git a/lib/buckets/inmemory/methods.go b/lib/buckets/inmemory/methods.go new file mode 100644 index 000000000..7e1685c70 --- /dev/null +++ b/lib/buckets/inmemory/methods.go @@ -0,0 +1,107 @@ +package inmemory + +import ( + "unsafe" + + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/pkg/errors" +) + +// Get value by key. +func (b *bucket) Get(key []byte) ([]byte, error) { + k := stringifyKey(key) + + b.RLock() + val, ok := b.items[k] + result := makeCopy(val) + b.RUnlock() + + if !ok { + return nil, errors.Wrapf(core.ErrNotFound, "key=`%s`", k) + } + + return result, nil +} + +// Set value by key. +func (b *bucket) Set(key, value []byte) error { + k := stringifyKey(key) + + b.Lock() + b.items[k] = makeCopy(value) + b.Unlock() + + return nil +} + +// Del value by key. +func (b *bucket) Del(key []byte) error { + k := stringifyKey(key) + + b.Lock() + delete(b.items, k) + b.Unlock() + + return nil +} + +// Has checks key exists. +func (b *bucket) Has(key []byte) bool { + k := stringifyKey(key) + + b.RLock() + _, ok := b.items[k] + b.RUnlock() + + return ok +} + +// Size size of bucket. +func (b *bucket) Size() int64 { + b.RLock() + // TODO we must replace in future + size := unsafe.Sizeof(b.items) + b.RUnlock() + + return int64(size) +} + +func (b *bucket) List() ([][]byte, error) { + var result = make([][]byte, 0) + + b.RLock() + for key := range b.items { + result = append(result, decodeKey(key)) + } + b.RUnlock() + + return result, nil +} + +// Filter items by closure. +func (b *bucket) Iterate(handler core.FilterHandler) error { + if handler == nil { + return core.ErrNilFilterHandler + } + + b.RLock() + for key, val := range b.items { + k, v := decodeKey(key), makeCopy(val) + + if !handler(k, v) { + return core.ErrIteratingAborted + } + } + b.RUnlock() + + return nil +} + +// Close bucket (just empty). +func (b *bucket) Close() error { + b.Lock() + b.items = make(map[string][]byte) + b.Unlock() + + return nil +} diff --git a/lib/container/alias.go b/lib/container/alias.go new file mode 100644 index 000000000..cb2cdf3c6 --- /dev/null +++ b/lib/container/alias.go @@ -0,0 +1,15 @@ +package container + +import ( + "github.com/nspcc-dev/neofs-api-go/container" + "github.com/nspcc-dev/neofs-api-go/refs" +) + +// Container is a type alias of Container. +type Container = container.Container + +// CID is a type alias of CID. +type CID = refs.CID + +// OwnerID is a type alias of OwnerID. +type OwnerID = refs.OwnerID diff --git a/lib/container/storage.go b/lib/container/storage.go new file mode 100644 index 000000000..5192a3b2e --- /dev/null +++ b/lib/container/storage.go @@ -0,0 +1,134 @@ +package container + +import ( + "context" +) + +// GetParams is a group of parameters for container receiving operation. +type GetParams struct { + ctxValue + + cidValue +} + +// GetResult is a group of values returned by container receiving operation. +type GetResult struct { + cnrValue +} + +// PutParams is a group of parameters for container storing operation. +type PutParams struct { + ctxValue + + cnrValue +} + +// PutResult is a group of values returned by container storing operation. +type PutResult struct { + cidValue +} + +// DeleteParams is a group of parameters for container removal operation. +type DeleteParams struct { + ctxValue + + cidValue + + ownerID OwnerID +} + +// DeleteResult is a group of values returned by container removal operation. +type DeleteResult struct{} + +// ListParams is a group of parameters for container listing operation. +type ListParams struct { + ctxValue + + ownerIDList []OwnerID +} + +// ListResult is a group of values returned by container listing operation. +type ListResult struct { + cidList []CID +} + +type cnrValue struct { + cnr *Container +} + +type cidValue struct { + cid CID +} + +type ctxValue struct { + ctx context.Context +} + +// Storage is an interface of the storage of NeoFS containers. +type Storage interface { + GetContainer(GetParams) (*GetResult, error) + PutContainer(PutParams) (*PutResult, error) + DeleteContainer(DeleteParams) (*DeleteResult, error) + ListContainers(ListParams) (*ListResult, error) + // TODO: add EACL methods +} + +// Context is a context getter. +func (s ctxValue) Context() context.Context { + return s.ctx +} + +// SetContext is a context setter. +func (s *ctxValue) SetContext(v context.Context) { + s.ctx = v +} + +// CID is a container ID getter. +func (s cidValue) CID() CID { + return s.cid +} + +// SetCID is a container ID getter. +func (s *cidValue) SetCID(v CID) { + s.cid = v +} + +// Container is a container getter. +func (s cnrValue) Container() *Container { + return s.cnr +} + +// SetContainer is a container setter. +func (s *cnrValue) SetContainer(v *Container) { + s.cnr = v +} + +// OwnerID is an owner ID getter. +func (s DeleteParams) OwnerID() OwnerID { + return s.ownerID +} + +// SetOwnerID is an owner ID setter. +func (s *DeleteParams) SetOwnerID(v OwnerID) { + s.ownerID = v +} + +// OwnerIDList is an owner ID list getter. +func (s ListParams) OwnerIDList() []OwnerID { + return s.ownerIDList +} + +// SetOwnerIDList is an owner ID list setter. +func (s *ListParams) SetOwnerIDList(v ...OwnerID) { + s.ownerIDList = v +} + +// CIDList is a container ID list getter. +func (s ListResult) CIDList() []CID { + return s.cidList +} + +// SetCIDList is a container ID list setter. +func (s *ListResult) SetCIDList(v []CID) { + s.cidList = v +} diff --git a/lib/container/storage_test.go b/lib/container/storage_test.go new file mode 100644 index 000000000..77f386514 --- /dev/null +++ b/lib/container/storage_test.go @@ -0,0 +1,83 @@ +package container + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGetParams(t *testing.T) { + p := new(GetParams) + + cid := CID{1, 2, 3} + p.SetCID(cid) + + require.Equal(t, cid, p.CID()) +} + +func TestGetResult(t *testing.T) { + r := new(GetResult) + + cnr := &Container{ + OwnerID: OwnerID{1, 2, 3}, + } + r.SetContainer(cnr) + + require.Equal(t, cnr, r.Container()) +} + +func TestPutParams(t *testing.T) { + p := new(PutParams) + + cnr := &Container{ + OwnerID: OwnerID{1, 2, 3}, + } + p.SetContainer(cnr) + + require.Equal(t, cnr, p.Container()) +} + +func TestPutResult(t *testing.T) { + r := new(PutResult) + + cid := CID{1, 2, 3} + r.SetCID(cid) + + require.Equal(t, cid, r.CID()) +} + +func TestDeleteParams(t *testing.T) { + p := new(DeleteParams) + + ownerID := OwnerID{1, 2, 3} + p.SetOwnerID(ownerID) + require.Equal(t, ownerID, p.OwnerID()) + + cid := CID{4, 5, 6} + p.SetCID(cid) + require.Equal(t, cid, p.CID()) +} + +func TestListParams(t *testing.T) { + p := new(ListParams) + + ownerIDList := []OwnerID{ + {1, 2, 3}, + {4, 5, 6}, + } + p.SetOwnerIDList(ownerIDList...) + + require.Equal(t, ownerIDList, p.OwnerIDList()) +} + +func TestListResult(t *testing.T) { + r := new(ListResult) + + cidList := []CID{ + {1, 2, 3}, + {4, 5, 6}, + } + r.SetCIDList(cidList) + + require.Equal(t, cidList, r.CIDList()) +} diff --git a/lib/core/storage.go b/lib/core/storage.go new file mode 100644 index 000000000..27e22f6d7 --- /dev/null +++ b/lib/core/storage.go @@ -0,0 +1,94 @@ +package core + +import ( + "github.com/nspcc-dev/neofs-node/internal" + "github.com/pkg/errors" +) + +type ( + // BucketType is name of bucket + BucketType string + + // FilterHandler where you receive key/val in your closure + FilterHandler func(key, val []byte) bool + + // BucketItem used in filter + BucketItem struct { + Key []byte + Val []byte + } + + // Bucket is sub-store interface + Bucket interface { + Get(key []byte) ([]byte, error) + Set(key, value []byte) error + Del(key []byte) error + Has(key []byte) bool + Size() int64 + List() ([][]byte, error) + Iterate(FilterHandler) error + // Steam can be implemented by badger.Stream, but not for now + // Stream(ctx context.Context, key []byte, cb func(io.ReadWriter) error) error + Close() error + } + + // Storage component interface + Storage interface { + GetBucket(name BucketType) (Bucket, error) + Size() int64 + Close() error + } +) + +const ( + // BlobStore is a blob bucket name. + BlobStore BucketType = "blob" + + // MetaStore is a meta bucket name. + MetaStore BucketType = "meta" + + // SpaceMetricsStore is a space metrics bucket name. + SpaceMetricsStore BucketType = "space-metrics" +) + +var ( + // ErrNilFilterHandler when FilterHandler is empty + ErrNilFilterHandler = errors.New("handler can't be nil") + + // ErrNotFound is returned by key-value storage methods + // that could not find element by key. + ErrNotFound = internal.Error("key not found") +) + +// ErrIteratingAborted is returned by storage iterator +// after iteration has been interrupted. +var ErrIteratingAborted = errors.New("iteration aborted") + +var errEmptyBucket = errors.New("empty bucket") + +func (t BucketType) String() string { return string(t) } + +// ListBucketItems performs iteration over Bucket and returns the full list of its items. +func ListBucketItems(b Bucket, h FilterHandler) ([]BucketItem, error) { + if b == nil { + return nil, errEmptyBucket + } else if h == nil { + return nil, ErrNilFilterHandler + } + + items := make([]BucketItem, 0) + + if err := b.Iterate(func(key, val []byte) bool { + if h(key, val) { + items = append(items, BucketItem{ + Key: key, + Val: val, + }) + } + return true + }); err != nil { + return nil, err + } + + return items, nil +} diff --git a/lib/core/storage_test.go b/lib/core/storage_test.go new file mode 100644 index 000000000..a4b451117 --- /dev/null +++ b/lib/core/storage_test.go @@ -0,0 +1,65 @@ +package core + +import ( + "crypto/rand" + "testing" + + "github.com/stretchr/testify/require" +) + +type testBucket struct { + Bucket + + items []BucketItem +} + +func (s *testBucket) Iterate(f FilterHandler) error { + for i := range s.items { + if !f(s.items[i].Key, s.items[i].Val) { + return ErrIteratingAborted + } + } + + return nil +} + +func TestListBucketItems(t *testing.T) { + _, err := ListBucketItems(nil, nil) + require.EqualError(t, err, errEmptyBucket.Error()) + + b := new(testBucket) + + _, err = ListBucketItems(b, nil) + require.EqualError(t, err, ErrNilFilterHandler.Error()) + + var ( + count = 10 + ln = 10 + items = make([]BucketItem, 0, count) + ) + + for i := 0; i < count; i++ { + items = append(items, BucketItem{ + Key: testData(t, ln), + Val: testData(t, ln), + }) + } + + b.items = items + + res, err := ListBucketItems(b, func(key, val []byte) bool { return true }) + require.NoError(t, err) + require.Equal(t, items, res) + + res, err = ListBucketItems(b, func(key, val []byte) bool { return false }) + require.NoError(t, err) + require.Empty(t, res) +} + +func testData(t *testing.T, sz int) []byte { + d := make([]byte, sz) + _, err := rand.Read(d) + require.NoError(t, err) + + return d +} diff --git a/lib/core/validator.go b/lib/core/validator.go new file mode 100644 index 000000000..ca66a93a1 --- /dev/null +++ b/lib/core/validator.go @@ -0,0 +1,22 @@ +package core + +import ( + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-node/internal" +) + +// ErrMissingKeySignPairs is returned by functions that expect +// a non-empty SignKeyPair slice, but received empty. +const ErrMissingKeySignPairs = internal.Error("missing key-signature pairs") + +// VerifyRequestWithSignatures checks if request has signatures and all of them are valid. +// +// Returns ErrMissingKeySignPairs if request does not have signatures. +// Otherwise, behaves like service.VerifyRequestData. +func VerifyRequestWithSignatures(req service.RequestVerifyData) error { + if len(req.GetSignKeyPairs()) == 0 { + return ErrMissingKeySignPairs + } + + return service.VerifyRequestData(req) +} diff --git a/lib/core/verify.go b/lib/core/verify.go new file mode 100644 index 000000000..57b80663f --- /dev/null +++ b/lib/core/verify.go @@ -0,0 +1,69 @@ +package core + +import ( + "context" + + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-api-go/service" + crypto "github.com/nspcc-dev/neofs-crypto" + "github.com/nspcc-dev/neofs-node/internal" +) + +// OwnerKeyContainer is an interface of the container of owner's ID and key pair with read access. +type OwnerKeyContainer interface { + GetOwnerID() refs.OwnerID + GetOwnerKey() []byte +} + +// OwnerKeyVerifier is an interface of OwnerKeyContainer validator. +type OwnerKeyVerifier interface { + // Must check if OwnerKeyContainer satisfies a certain criterion. + // Nil error is equivalent to matching the criterion. + VerifyKey(context.Context, OwnerKeyContainer) error +} + +type neoKeyVerifier struct{} + +// ErrNilOwnerKeyContainer is returned by functions that expect a non-nil +// OwnerKeyContainer, but received nil. +const ErrNilOwnerKeyContainer = internal.Error("owner-key container is nil") + +// ErrNilOwnerKeyVerifier is returned by functions that expect a non-nil +// OwnerKeyVerifier, but received nil. +const ErrNilOwnerKeyVerifier = internal.Error("owner-key verifier is nil") + +// NewNeoKeyVerifier creates a new Neo owner key verifier and return a OwnerKeyVerifier interface. +func NewNeoKeyVerifier() OwnerKeyVerifier { + return new(neoKeyVerifier) +} + +// VerifyKey checks if the public key converts to owner ID. +// +// If passed OwnerKeyContainer is nil, ErrNilOwnerKeyContainer returns. +// If public key cannot be unmarshaled, service.ErrInvalidPublicKeyBytes returns. +// If public key is not converted to owner ID, service.ErrWrongOwner returns. +// With neo:morph adoption public key can be unrelated to owner ID. In this +// case VerifyKey should call NeoFS.ID smart-contract to check whether public +// key is bounded with owner ID. If there is no bound, then return +// service.ErrWrongOwner. +func (s neoKeyVerifier) VerifyKey(_ context.Context, src OwnerKeyContainer) error { + if src == nil { + return ErrNilOwnerKeyContainer + } + + pubKey := crypto.UnmarshalPublicKey(src.GetOwnerKey()) + if pubKey == nil { + return service.ErrInvalidPublicKeyBytes + } + + ownerFromKey, err := refs.NewOwnerID(pubKey) + if err != nil { + return err + } + + if !ownerFromKey.Equal(src.GetOwnerID()) { + return service.ErrWrongOwner + } + + return nil +} diff --git a/lib/fix/catch.go b/lib/fix/catch.go new file mode 100644 index 000000000..c0bb5a653 --- /dev/null +++ b/lib/fix/catch.go @@ -0,0 +1,59 @@ +package fix + +import ( + "fmt" + "reflect" + + "go.uber.org/zap" +) + +func (a *app) Catch(err error) { + if err == nil { + return + } + + if a.log == nil { + panic(err) + } + + a.log.Fatal("Can't run app", + zap.Error(err)) +} + +// CatchTrace catch errors for debugging +// use that function just for debug your application. +func (a *app) CatchTrace(err error) { + if err == nil { + return + } + + // digging into the root of the problem + for { + var ( + ok bool + v = reflect.ValueOf(err) + fn reflect.Value + ) + + if v.Type().Kind() != reflect.Struct { + break + } + + if !v.FieldByName("Reason").IsValid() { + break + } + + if v.FieldByName("Func").IsValid() { + fn = v.FieldByName("Func") + } + + fmt.Printf("Place: %#v\nReason: %s\n\n", fn, err) + + if err, ok = v.FieldByName("Reason").Interface().(error); !ok { + err = v.Interface().(error) + break + } + } + + panic(err) +} diff --git a/lib/fix/config/config.go b/lib/fix/config/config.go new file mode 100644 index 000000000..fa9e860c4 --- /dev/null +++ b/lib/fix/config/config.go @@ -0,0 +1,53 @@ +package config + +import ( + "strings" + + "github.com/spf13/viper" +) + +// Params groups the parameters of configuration. +type Params struct { + File string + Type string + Prefix string + Name string + Version string + + AppDefaults func(v *viper.Viper) +} + +// NewConfig is a configuration tool's constructor. +func NewConfig(p Params) (v *viper.Viper, err error) { + v = viper.New() + v.SetEnvPrefix(p.Prefix) + v.AutomaticEnv() + v.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + + v.SetDefault("app.name", p.Name) + v.SetDefault("app.version", p.Version) + + if p.AppDefaults != nil { + p.AppDefaults(v) + } + + if p.fromFile() { + v.SetConfigFile(p.File) + v.SetConfigType(p.safeType()) + + err = v.ReadInConfig() + } + + return v, err +} + +func (p Params) fromFile() bool { + return p.File != "" +} + +func (p Params) safeType() string { + if p.Type == "" { + p.Type = "yaml" + } + return strings.ToLower(p.Type) +} diff --git a/lib/fix/fix.go b/lib/fix/fix.go new file mode 100644 index 000000000..7fd4e9df3 --- /dev/null +++ b/lib/fix/fix.go @@ -0,0 +1,112 @@ +package fix + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/nspcc-dev/neofs-node/lib/fix/config" + "github.com/nspcc-dev/neofs-node/lib/fix/logger" + "github.com/nspcc-dev/neofs-node/lib/fix/module" + "github.com/nspcc-dev/neofs-node/misc" + "github.com/pkg/errors" + "github.com/spf13/viper" + "go.uber.org/dig" + "go.uber.org/zap" +) + +type ( + // App is an interface of executable application. + App interface { + Run() error + RunAndCatch() + } + + app struct { + err error + log *zap.Logger + di *dig.Container + runner interface{} + } + + // Settings groups the application parameters. + Settings struct { + File string + Type string + Name string + Prefix string + Build string + Version string + Runner interface{} + + AppDefaults func(v *viper.Viper) + } +) + +func (a *app) RunAndCatch() { + err := a.Run() + + if errors.Is(err, context.Canceled) { + return + } + + if ok, _ := strconv.ParseBool(misc.Debug); ok { + a.CatchTrace(err) + } + + a.Catch(err) +} + +func (a *app) Run() error { + if a.err != nil { + return a.err + } + + // setup app logger: + if err := a.di.Invoke(func(l *zap.Logger) { + a.log = l + }); err != nil { + return err + } + + return a.di.Invoke(a.runner) +} + +// New is an application constructor. +func New(s *Settings, mod module.Module) App { + var ( + a app + err error + ) + + a.di = dig.New(dig.DeferAcyclicVerification()) + a.runner = s.Runner + + if s.Prefix == "" { + s.Prefix = s.Name + } + + mod = mod.Append( + module.Module{ + {Constructor: logger.NewLogger}, + {Constructor: NewGracefulContext}, + {Constructor: func() (*viper.Viper, error) { + return config.NewConfig(config.Params{ + File: s.File, + Type: s.Type, + Prefix: strings.ToUpper(s.Prefix), + Name: s.Name, + Version: fmt.Sprintf("%s(%s)", s.Version, s.Build), + + AppDefaults: s.AppDefaults, + }) + }}, + }) + + if err = module.Provide(a.di, mod); err != nil { + a.err = err + } + + return &a +} diff --git a/lib/fix/grace.go b/lib/fix/grace.go new file mode 100644 index 000000000..3343b8ea4 --- /dev/null +++ b/lib/fix/grace.go @@ -0,0 +1,26 @@ +package fix + +import ( + "context" + "os" + "os/signal" + "syscall" + + "go.uber.org/zap" +) + +// NewGracefulContext returns graceful context. +func NewGracefulContext(l *zap.Logger) context.Context { + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + ch := make(chan os.Signal, 1) + signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP) + sig := <-ch + l.Info("received signal", + zap.String("signal", sig.String())) + cancel() + }() + + return ctx +} diff --git a/lib/fix/logger/logger.go b/lib/fix/logger/logger.go new file mode 100644 index 000000000..4f10ee11c --- /dev/null +++ b/lib/fix/logger/logger.go @@ -0,0 +1,90 @@ +package logger + +import ( + "strings" + + "github.com/spf13/viper" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +const ( + formatJSON = "json" + formatConsole = "console" + + defaultSamplingInitial = 100 + defaultSamplingThereafter = 100 +) + +func safeLevel(lvl string) zap.AtomicLevel { + switch strings.ToLower(lvl) { + case "debug": + return zap.NewAtomicLevelAt(zap.DebugLevel) + case "warn": + return zap.NewAtomicLevelAt(zap.WarnLevel) + case "error": + return zap.NewAtomicLevelAt(zap.ErrorLevel) + case "fatal": + return zap.NewAtomicLevelAt(zap.FatalLevel) + case "panic": + return zap.NewAtomicLevelAt(zap.PanicLevel) + default: + return zap.NewAtomicLevelAt(zap.InfoLevel) + } +} + +// NewLogger is a logger's constructor. +func NewLogger(v *viper.Viper) (*zap.Logger, error) { + c := zap.NewProductionConfig() + + c.OutputPaths = []string{"stdout"} + c.ErrorOutputPaths = []string{"stdout"} + + if v.IsSet("logger.sampling") { + c.Sampling = &zap.SamplingConfig{ + Initial: defaultSamplingInitial, + Thereafter: defaultSamplingThereafter, + } + + if val := v.GetInt("logger.sampling.initial"); val > 0 { + c.Sampling.Initial = val + } + + if val := v.GetInt("logger.sampling.thereafter"); val > 0 { + c.Sampling.Thereafter = val + } + } + + // logger level + c.Level = safeLevel(v.GetString("logger.level")) + traceLvl := safeLevel(v.GetString("logger.trace_level")) + + // logger format + switch f := v.GetString("logger.format"); strings.ToLower(f) { + case formatConsole: + c.Encoding = formatConsole + default: + c.Encoding = formatJSON + } + + // logger time + c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder + + l, err := c.Build( + // enable trace only for current log-level + zap.AddStacktrace(traceLvl)) + if err != nil { + return nil, err + } + + if v.GetBool("logger.no_disclaimer") { + return l, nil + } + + name := v.GetString("app.name") + version := v.GetString("app.version") + + return l.With( + zap.String("app_name", name), + zap.String("app_version", version)), nil +} diff --git a/lib/fix/module/module.go b/lib/fix/module/module.go new file mode 100644 index 000000000..9e33f48e4 --- /dev/null +++ b/lib/fix/module/module.go @@ -0,0 +1,35 @@ +package module + +import ( + "go.uber.org/dig" +) + +type ( + // Module type + Module []*Provider + + // Provider struct + Provider struct { + Constructor interface{} + Options []dig.ProvideOption + } +) + +// Append module to target module and return new module +func (m Module) Append(mods ...Module) Module { + var result = m + for _, mod := range mods { + result = append(result, mod...) + } + return result +} + +// Provide set providers functions to DI container +func Provide(dic *dig.Container, providers Module) error { + for _, p := range providers { + if err := dic.Provide(p.Constructor, p.Options...); err != nil { + return err + } + } + return nil +} diff --git a/lib/fix/services.go b/lib/fix/services.go new file mode 100644 index 000000000..59a1a169e --- /dev/null +++ b/lib/fix/services.go @@ -0,0 +1,46 @@ +package fix + +import ( + "context" +) + +type ( + // Service interface + Service interface { + Start(context.Context) + Stop() + } + + combiner []Service +) + +var _ Service = (combiner)(nil) + +// NewServices creates single runner. +func NewServices(items ...Service) Service { + var svc = make(combiner, 0, len(items)) + + for _, item := range items { + if item == nil { + continue + } + + svc = append(svc, item) + } + + return svc +} + +// Start all services. +func (c combiner) Start(ctx context.Context) { + for _, svc := range c { + svc.Start(ctx) + } +} + +// Stop all services. +func (c combiner) Stop() { + for _, svc := range c { + svc.Stop() + } +} diff --git a/lib/fix/web/http.go b/lib/fix/web/http.go new file mode 100644 index 000000000..19941eb6e --- /dev/null +++ b/lib/fix/web/http.go @@ -0,0 +1,114 @@ +package web + +import ( + "context" + "net/http" + "sync/atomic" + "time" + + "github.com/spf13/viper" + "go.uber.org/zap" +) + +type ( + httpParams struct { + Key string + Viper *viper.Viper + Logger *zap.Logger + Handler http.Handler + } + + httpServer struct { + name string + started *int32 + logger *zap.Logger + shutdownTTL time.Duration + server server + } +) + +func (h *httpServer) Start(ctx context.Context) { + if h == nil { + return + } + + if !atomic.CompareAndSwapInt32(h.started, 0, 1) { + h.logger.Info("http: already started", + zap.String("server", h.name)) + return + } + + go func() { + if err := h.server.serve(ctx); err != nil { + if err != http.ErrServerClosed { + h.logger.Error("http: could not start server", + zap.Error(err)) + } + } + }() +} + +func (h *httpServer) Stop() { + if h == nil { + return + } + + if !atomic.CompareAndSwapInt32(h.started, 1, 0) { + h.logger.Info("http: already stopped", + zap.String("server", h.name)) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), h.shutdownTTL) + defer cancel() + + h.logger.Debug("http: try to stop server", + zap.String("server", h.name)) + + if err := h.server.shutdown(ctx); err != nil { + h.logger.Error("http: could not stop server", + zap.Error(err)) + } +} + +const defaultShutdownTTL = 30 * time.Second + +func newHTTPServer(p httpParams) *httpServer { + var ( + address string + shutdown time.Duration + ) + + if address = p.Viper.GetString(p.Key + ".address"); address == "" { + p.Logger.Info("Empty bind address, skip", + zap.String("server", p.Key)) + return nil + } + if p.Handler == nil { + p.Logger.Info("Empty handler, skip", + zap.String("server", p.Key)) + return nil + } + + p.Logger.Info("Create http.Server", + zap.String("server", p.Key), + zap.String("address", address)) + + if shutdown = p.Viper.GetDuration(p.Key + ".shutdown_ttl"); shutdown <= 0 { + shutdown = defaultShutdownTTL + } + + return &httpServer{ + name: p.Key, + started: new(int32), + logger: p.Logger, + shutdownTTL: shutdown, + server: newServer(params{ + Address: address, + Name: p.Key, + Config: p.Viper, + Logger: p.Logger, + Handler: p.Handler, + }), + } +} diff --git a/lib/fix/web/metrics.go b/lib/fix/web/metrics.go new file mode 100644 index 000000000..951b17f2a --- /dev/null +++ b/lib/fix/web/metrics.go @@ -0,0 +1,32 @@ +package web + +import ( + "context" + + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/spf13/viper" + "go.uber.org/zap" +) + +// Metrics is an interface of metric tool. +type Metrics interface { + Start(ctx context.Context) + Stop() +} + +const metricsKey = "metrics" + +// NewMetrics is a metric tool's constructor. +func NewMetrics(l *zap.Logger, v *viper.Viper) Metrics { + if !v.GetBool(metricsKey + ".enabled") { + l.Debug("metrics server disabled") + return nil + } + + return newHTTPServer(httpParams{ + Key: metricsKey, + Viper: v, + Logger: l, + Handler: promhttp.Handler(), + }) +} diff --git a/lib/fix/web/pprof.go b/lib/fix/web/pprof.go new file mode 100644 index 000000000..da5a331b8 --- /dev/null +++ b/lib/fix/web/pprof.go @@ -0,0 +1,44 @@ +package web + +import ( + "context" + "expvar" + "net/http" + "net/http/pprof" + + "github.com/spf13/viper" + "go.uber.org/zap" +) + +// Profiler is an interface of profiler. +type Profiler interface { + Start(ctx context.Context) + Stop() +} + +const profilerKey = "pprof" + +// NewProfiler is a profiler's constructor. +func NewProfiler(l *zap.Logger, v *viper.Viper) Profiler { + if !v.GetBool(profilerKey + ".enabled") { + l.Debug("pprof server disabled") + return nil + } + + mux := http.NewServeMux() + + mux.Handle("/debug/vars", expvar.Handler()) + + mux.HandleFunc("/debug/pprof/", pprof.Index) + mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + mux.HandleFunc("/debug/pprof/profile", pprof.Profile) + mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + mux.HandleFunc("/debug/pprof/trace", pprof.Trace) + + return newHTTPServer(httpParams{ + Key: profilerKey, + Viper: v, + Logger: l, + Handler: mux, + }) +} diff --git a/lib/fix/web/server.go b/lib/fix/web/server.go new file mode 100644 index 000000000..e4fcb845c --- /dev/null +++ b/lib/fix/web/server.go @@ -0,0 +1,62 @@ +package web + +import ( + "context" + "net/http" + + "github.com/spf13/viper" + "go.uber.org/zap" +) + +type ( + // Server is an interface of server. + server interface { + serve(ctx context.Context) error + shutdown(ctx context.Context) error + } + + contextServer struct { + logger *zap.Logger + server *http.Server + } + + params struct { + Address string + Name string + Config *viper.Viper + Logger *zap.Logger + Handler http.Handler + } +) + +func newServer(p params) server { + return &contextServer{ + logger: p.Logger, + server: &http.Server{ + Addr: p.Address, + Handler: p.Handler, + ReadTimeout: p.Config.GetDuration(p.Name + ".read_timeout"), + ReadHeaderTimeout: p.Config.GetDuration(p.Name + ".read_header_timeout"), + WriteTimeout: p.Config.GetDuration(p.Name + ".write_timeout"), + IdleTimeout: p.Config.GetDuration(p.Name + ".idle_timeout"), + MaxHeaderBytes: p.Config.GetInt(p.Name + ".max_header_bytes"), + }, + } +} + +func (cs *contextServer) serve(ctx context.Context) error { + go func() { + <-ctx.Done() + + if err := cs.server.Close(); err != nil { + cs.logger.Info("something went wrong", + zap.Error(err)) + } + }() + + return cs.server.ListenAndServe() +} + +func (cs *contextServer) shutdown(ctx context.Context) error { + return cs.server.Shutdown(ctx) +} diff --git a/lib/fix/worker/worker.go b/lib/fix/worker/worker.go new file mode 100644 index 000000000..c6cbd13b4 --- /dev/null +++ b/lib/fix/worker/worker.go @@ -0,0 +1,79 @@ +package worker + +import ( + "context" + "sync" + "sync/atomic" + "time" +) + +type ( + // Workers is an interface of worker tool. + Workers interface { + Start(context.Context) + Stop() + + Add(Job Handler) + } + + workers struct { + cancel context.CancelFunc + started *int32 + wg *sync.WaitGroup + jobs []Handler + } + + // Handler is a worker's handling function. + Handler func(ctx context.Context) + + // Jobs is a map of worker names to handlers. + Jobs map[string]Handler + + // Job groups the parameters of worker's job. + Job struct { + Disabled bool + Immediately bool + Timer time.Duration + Ticker time.Duration + Handler Handler + } +) + +// New is a constructor of workers. +func New() Workers { + return &workers{ + started: new(int32), + wg: new(sync.WaitGroup), + } +} + +func (w *workers) Add(job Handler) { + w.jobs = append(w.jobs, job) +} + +func (w *workers) Stop() { + if !atomic.CompareAndSwapInt32(w.started, 1, 0) { + // already stopped + return + } + + w.cancel() + w.wg.Wait() +} + +func (w *workers) Start(ctx context.Context) { + if !atomic.CompareAndSwapInt32(w.started, 0, 1) { + // already started + return + } + + ctx, w.cancel = context.WithCancel(ctx) + for _, job := range w.jobs { + w.wg.Add(1) + + go func(handler Handler) { + defer w.wg.Done() + handler(ctx) + }(job) + } +} diff --git a/lib/implementations/acl.go b/lib/implementations/acl.go new file mode 100644 index 000000000..ce3fd58ad --- /dev/null +++ b/lib/implementations/acl.go @@ -0,0 +1,392 @@ +package implementations + +import ( + "context" + + sc "github.com/nspcc-dev/neo-go/pkg/smartcontract" + libacl "github.com/nspcc-dev/neofs-api-go/acl" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/acl" + "github.com/nspcc-dev/neofs-node/lib/blockchain/goclient" + "github.com/nspcc-dev/neofs-node/lib/container" + + "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/pkg/errors" +) + +// Consider moving ACLHelper implementation to the ACL library. + +type ( + // ACLHelper is an interface, that provides useful functions + // for ACL object pre-processor. + ACLHelper interface { + BasicACLGetter + ContainerOwnerChecker + } + + // BasicACLGetter helper provides function to return basic ACL value. + BasicACLGetter interface { + GetBasicACL(context.Context, CID) (uint32, error) + } + + // ContainerOwnerChecker checks owner of the container. + ContainerOwnerChecker interface { + IsContainerOwner(context.Context, CID, refs.OwnerID) (bool, error) + } + + aclHelper struct { + cnr container.Storage + } +) + +type binaryEACLSource struct { + binaryStore acl.BinaryExtendedACLSource +} + +// StaticContractClient is a wrapper over Neo:Morph client +// that invokes single smart contract methods with fixed fee. +type StaticContractClient struct { + // neo-go client instance + client *goclient.Client + + // contract script-hash + scScriptHash util.Uint160 + + // invocation fee + fee util.Fixed8 +} + +// MorphContainerContract is a wrapper over StaticContractClient +// for Container contract calls. +type MorphContainerContract struct { + // NeoFS Container smart-contract + containerContract StaticContractClient + + // set EACL method name of container contract + eaclSetMethodName string + + // get EACL method name of container contract + eaclGetMethodName string + + // get container method name of container contract + cnrGetMethodName string + + // put container method name of container contract + cnrPutMethodName string + + // delete container method name of container contract + cnrDelMethodName string + + // list containers method name of container contract + cnrListMethodName string +} + +const ( + errNewACLHelper = internal.Error("cannot create ACLHelper instance") +) + +// GetBasicACL returns basic ACL of the container. +func (h aclHelper) GetBasicACL(ctx context.Context, cid CID) (uint32, error) { + gp := container.GetParams{} + gp.SetContext(ctx) + gp.SetCID(cid) + + gResp, err := h.cnr.GetContainer(gp) + if err != nil { + return 0, err + } + + return gResp.Container().BasicACL, nil +} + +// IsContainerOwner returns true if provided id is an owner container. +func (h aclHelper) IsContainerOwner(ctx context.Context, cid CID, id refs.OwnerID) (bool, error) { + gp := container.GetParams{} + gp.SetContext(ctx) + gp.SetCID(cid) + + gResp, err := h.cnr.GetContainer(gp) + if err != nil { + return false, err + } + + return gResp.Container().OwnerID.Equal(id), nil +} + +// NewACLHelper returns implementation of the ACLHelper interface. +func NewACLHelper(cnr container.Storage) (ACLHelper, error) { + if cnr == nil { + return nil, errNewACLHelper + } + + return aclHelper{cnr}, nil +} + +// ExtendedACLSourceFromBinary wraps BinaryExtendedACLSource and returns ExtendedACLSource. +// +// If passed BinaryExtendedACLSource is nil, acl.ErrNilBinaryExtendedACLStore returns. +func ExtendedACLSourceFromBinary(v acl.BinaryExtendedACLSource) (acl.ExtendedACLSource, error) { + if v == nil { + return nil, acl.ErrNilBinaryExtendedACLStore + } + + return &binaryEACLSource{ + binaryStore: v, + }, nil +} + +// GetExtendedACLTable receives eACL table in a binary representation from storage, +// unmarshals it and returns ExtendedACLTable interface. +func (s binaryEACLSource) GetExtendedACLTable(ctx context.Context, cid refs.CID) (libacl.ExtendedACLTable, error) { + key := acl.BinaryEACLKey{} + key.SetCID(cid) + + val, err := s.binaryStore.GetBinaryEACL(ctx, key) + if err != nil { + return nil, err + } + + eacl := val.EACL() + + // TODO: verify signature + + res := libacl.WrapEACLTable(nil) + + return res, res.UnmarshalBinary(eacl) +} + +// NewStaticContractClient initializes a new StaticContractClient. +// +// If passed Client is nil, goclient.ErrNilClient returns. +func NewStaticContractClient(client *goclient.Client, scHash util.Uint160, fee util.Fixed8) (StaticContractClient, error) { + res := StaticContractClient{ + client: client, + scScriptHash: scHash, + fee: fee, + } + + var err error + if client == nil { + err = goclient.ErrNilClient + } + + return res, err +} + +// Invoke calls Invoke method of goclient with predefined script hash and fee. +// Supported args types are the same as in goclient. +// +// If Client is not initialized, goclient.ErrNilClient returns. +func (s StaticContractClient) Invoke(method string, args ...interface{}) error { + if s.client == nil { + return goclient.ErrNilClient + } + + return s.client.Invoke( + s.scScriptHash, + s.fee, + method, + args..., + ) +} + +// TestInvoke calls TestInvoke method of goclient with predefined script hash. +// +// If Client is not initialized, goclient.ErrNilClient returns. +func (s StaticContractClient) TestInvoke(method string, args ...interface{}) ([]sc.Parameter, error) { + if s.client == nil { + return nil, goclient.ErrNilClient + } + + return s.client.TestInvoke( + s.scScriptHash, + method, + args..., + ) +} + +// SetContainerContractClient is a container contract client setter. +func (s *MorphContainerContract) SetContainerContractClient(v StaticContractClient) { + s.containerContract = v +} + +// SetEACLGetMethodName is a container contract Get EACL method name setter. +func (s *MorphContainerContract) SetEACLGetMethodName(v string) { + s.eaclGetMethodName = v +} + +// SetEACLSetMethodName is a container contract Set EACL method name setter. +func (s *MorphContainerContract) SetEACLSetMethodName(v string) { + s.eaclSetMethodName = v +} + +// SetContainerGetMethodName is a container contract Get method name setter. +func (s *MorphContainerContract) SetContainerGetMethodName(v string) { + s.cnrGetMethodName = v +} + +// SetContainerPutMethodName is a container contract Put method name setter. +func (s *MorphContainerContract) SetContainerPutMethodName(v string) { + s.cnrPutMethodName = v +} + +// SetContainerDeleteMethodName is a container contract Delete method name setter. +func (s *MorphContainerContract) SetContainerDeleteMethodName(v string) { + s.cnrDelMethodName = v +} + +// SetContainerListMethodName is a container contract List method name setter. +func (s *MorphContainerContract) SetContainerListMethodName(v string) { + s.cnrListMethodName = v +} + +// GetBinaryEACL performs the test invocation call of GetEACL method of NeoFS Container contract. +func (s *MorphContainerContract) GetBinaryEACL(_ context.Context, key acl.BinaryEACLKey) (acl.BinaryEACLValue, error) { + res := acl.BinaryEACLValue{} + + prms, err := s.containerContract.TestInvoke( + s.eaclGetMethodName, + key.CID().Bytes(), + ) + if err != nil { + return res, err + } else if ln := len(prms); ln != 1 { + return res, errors.Errorf("unexpected stack parameter count: %d", ln) + } + + eacl, err := goclient.BytesFromStackParameter(prms[0]) + if err == nil { + res.SetEACL(eacl) + } + + return res, err +} + +// PutBinaryEACL invokes the call of SetEACL method of NeoFS Container contract. +func (s *MorphContainerContract) PutBinaryEACL(_ context.Context, key acl.BinaryEACLKey, val acl.BinaryEACLValue) error { + return s.containerContract.Invoke( + s.eaclSetMethodName, + key.CID().Bytes(), + val.EACL(), + val.Signature(), + ) +} + +// GetContainer performs the test invocation call of Get method of NeoFS Container contract. +func (s *MorphContainerContract) GetContainer(p container.GetParams) (*container.GetResult, error) { + prms, err := s.containerContract.TestInvoke( + s.cnrGetMethodName, + p.CID().Bytes(), + ) + if err != nil { + return nil, errors.Wrap(err, "could not perform test invocation") + } else if ln := len(prms); ln != 1 { + return nil, errors.Errorf("unexpected stack item count: %d", ln) + } + + cnrBytes, err := goclient.BytesFromStackParameter(prms[0]) + if err != nil { + return nil, errors.Wrap(err, "could not get byte array from stack item") + } + + cnr := new(container.Container) + if err := cnr.Unmarshal(cnrBytes); err != nil { + return nil, errors.Wrap(err, "could not unmarshal container from bytes") + } + + res := new(container.GetResult) + res.SetContainer(cnr) + + return res, nil +} + +// PutContainer invokes the call of Put method of NeoFS Container contract. +func (s *MorphContainerContract) PutContainer(p container.PutParams) (*container.PutResult, error) { + cnr := p.Container() + + cid, err := cnr.ID() + if err != nil { + return nil, errors.Wrap(err, "could not calculate container ID") + } + + cnrBytes, err := cnr.Marshal() + if err != nil { + return nil, errors.Wrap(err, "could not marshal container") + } + + if err := s.containerContract.Invoke( + s.cnrPutMethodName, + cnr.OwnerID.Bytes(), + cnrBytes, + []byte{}, + ); err != nil { + return nil, errors.Wrap(err, "could not invoke contract method") + } + + res := new(container.PutResult) + res.SetCID(cid) + + return res, nil +} + +// DeleteContainer invokes the call of Delete method of NeoFS Container contract. +func (s *MorphContainerContract) DeleteContainer(p container.DeleteParams) (*container.DeleteResult, error) { + if err := s.containerContract.Invoke( + s.cnrDelMethodName, + p.CID().Bytes(), + p.OwnerID().Bytes(), + []byte{}, + ); err != nil { + return nil, errors.Wrap(err, "could not invoke contract method") + } + + return new(container.DeleteResult), nil +} + +// ListContainers performs the test invocation call of Get method of NeoFS Container contract. +// +// If owner ID list in parameters is non-empty, bytes of first owner are attached to call. +func (s *MorphContainerContract) ListContainers(p container.ListParams) (*container.ListResult, error) { + args := make([]interface{}, 0, 1) + + if ownerIDList := p.OwnerIDList(); len(ownerIDList) > 0 { + args = append(args, ownerIDList[0].Bytes()) + } + + prms, err := s.containerContract.TestInvoke( + s.cnrListMethodName, + args..., + ) + if err != nil { + return nil, errors.Wrap(err, "could not perform test invocation") + } else if ln := len(prms); ln != 1 { + return nil, errors.Errorf("unexpected stack item count: %d", ln) + } + + prms, err = goclient.ArrayFromStackParameter(prms[0]) + if err != nil { + return nil, errors.Wrap(err, "could not get stack item array from stack item") + } + + cidList := make([]CID, 0, len(prms)) + + for i := range prms { + cidBytes, err := goclient.BytesFromStackParameter(prms[i]) + if err != nil { + return nil, errors.Wrap(err, "could not get byte array from stack item") + } + + cid, err := refs.CIDFromBytes(cidBytes) + if err != nil { + return nil, errors.Wrap(err, "could not get container ID from bytes") + } + + cidList = append(cidList, cid) + } + + res := new(container.ListResult) + res.SetCIDList(cidList) + + return res, nil +} diff --git a/lib/implementations/acl_test.go b/lib/implementations/acl_test.go new file mode 100644 index 000000000..cb462de74 --- /dev/null +++ b/lib/implementations/acl_test.go @@ -0,0 +1,19 @@ +package implementations + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestStaticContractClient(t *testing.T) { + s := new(StaticContractClient) + + require.NotPanics(t, func() { + _, _ = s.TestInvoke("") + }) + + require.NotPanics(t, func() { + _ = s.Invoke("") + }) +} diff --git a/lib/implementations/balance.go b/lib/implementations/balance.go new file mode 100644 index 000000000..d535c0eaf --- /dev/null +++ b/lib/implementations/balance.go @@ -0,0 +1,141 @@ +package implementations + +import ( + "github.com/nspcc-dev/neo-go/pkg/encoding/address" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-node/lib/blockchain/goclient" + "github.com/pkg/errors" +) + +// MorphBalanceContract is a wrapper over NeoFS Balance contract client +// that provides an interface of manipulations with user funds. +type MorphBalanceContract struct { + // NeoFS Balance smart-contract + balanceContract StaticContractClient + + // "balance of" method name of balance contract + balanceOfMethodName string + + // decimals method name of balance contract + decimalsMethodName string +} + +// BalanceOfParams is a structure that groups the parameters +// for NeoFS user balance receiving operation. +type BalanceOfParams struct { + owner refs.OwnerID +} + +// BalanceOfResult is a structure that groups the values +// of the result of NeoFS user balance receiving operation. +type BalanceOfResult struct { + amount int64 +} + +// DecimalsParams is a structure that groups the parameters +// for NeoFS token decimals receiving operation. +type DecimalsParams struct { +} + +// DecimalsResult is a structure that groups the values +// of the result of NeoFS token decimals receiving operation. +type DecimalsResult struct { + dec int64 +} + +// SetBalanceContractClient is a Balance contract client setter. +func (s *MorphBalanceContract) SetBalanceContractClient(v StaticContractClient) { + s.balanceContract = v +} + +// SetBalanceOfMethodName is a Balance contract balanceOf method name setter. +func (s *MorphBalanceContract) SetBalanceOfMethodName(v string) { + s.balanceOfMethodName = v +} + +// SetDecimalsMethodName is a Balance contract decimals method name setter. +func (s *MorphBalanceContract) SetDecimalsMethodName(v string) { + s.decimalsMethodName = v +} + +// BalanceOf performs the test invocation call of balanceOf method of NeoFS Balance contract. +func (s MorphBalanceContract) BalanceOf(p BalanceOfParams) (*BalanceOfResult, error) { + owner := p.OwnerID() + + u160, err := address.StringToUint160(owner.String()) + if err != nil { + return nil, errors.Wrap(err, "could not convert wallet address to Uint160") + } + + prms, err := s.balanceContract.TestInvoke( + s.balanceOfMethodName, + u160.BytesBE(), + ) + if err != nil { + return nil, errors.Wrap(err, "could not perform test invocation") + } else if ln := len(prms); ln != 1 { + return nil, errors.Errorf("unexpected stack item count (balanceOf): %d", ln) + } + + amount, err := goclient.IntFromStackParameter(prms[0]) + if err != nil { + return nil, errors.Wrap(err, "could not get integer stack item from stack item (amount)") + } + + res := new(BalanceOfResult) + res.SetAmount(amount) + + return res, nil +} + +// Decimals performs the test invocation call of decimals method of NeoFS Balance contract. +func (s MorphBalanceContract) Decimals(DecimalsParams) (*DecimalsResult, error) { + prms, err := s.balanceContract.TestInvoke( + s.decimalsMethodName, + ) + if err != nil { + return nil, errors.Wrap(err, "could not perform test invocation") + } else if ln := len(prms); ln != 1 { + return nil, errors.Errorf("unexpected stack item count (decimals): %d", ln) + } + + dec, err := goclient.IntFromStackParameter(prms[0]) + if err != nil { + return nil, errors.Wrap(err, "could not get integer stack item from stack item (decimal)") + } + + res := new(DecimalsResult) + res.SetDecimals(dec) + + return res, nil +} + +// SetOwnerID is an owner ID setter. +func (s *BalanceOfParams) SetOwnerID(v refs.OwnerID) { + s.owner = v +} + +// OwnerID is an owner ID getter. +func (s BalanceOfParams) OwnerID() refs.OwnerID { + return s.owner +} + +// SetAmount is an funds amount setter. +func (s *BalanceOfResult) SetAmount(v int64) { + s.amount = v +} + +// Amount is an funds amount getter. +func (s BalanceOfResult) Amount() int64 { + return s.amount +} + +// SetDecimals is a decimals setter. +func (s *DecimalsResult) SetDecimals(v int64) { + s.dec = v +} + +// Decimals is a decimals getter. +func (s DecimalsResult) Decimals() int64 { + return s.dec +} diff --git a/lib/implementations/balance_test.go b/lib/implementations/balance_test.go new file mode 100644 index 000000000..c9b571c8a --- /dev/null +++ b/lib/implementations/balance_test.go @@ -0,0 +1,35 @@ +package implementations + +import ( + "testing" + + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/stretchr/testify/require" +) + +func TestBalanceOfParams(t *testing.T) { + s := BalanceOfParams{} + + owner := refs.OwnerID{1, 2, 3} + s.SetOwnerID(owner) + + require.Equal(t, owner, s.OwnerID()) +} + +func TestBalanceOfResult(t *testing.T) { + s := BalanceOfResult{} + + amount := int64(100) + s.SetAmount(amount) + + require.Equal(t, amount, s.Amount()) +} + +func TestDecimalsResult(t *testing.T) { + s := DecimalsResult{} + + dec := int64(100) + s.SetDecimals(dec) + + require.Equal(t, dec, s.Decimals()) +} diff --git a/lib/implementations/bootstrap.go b/lib/implementations/bootstrap.go new file mode 100644 index 000000000..458967521 --- /dev/null +++ b/lib/implementations/bootstrap.go @@ -0,0 +1,311 @@ +package implementations + +import ( + "github.com/nspcc-dev/neo-go/pkg/smartcontract" + "github.com/nspcc-dev/neofs-api-go/bootstrap" + "github.com/nspcc-dev/neofs-node/lib/blockchain/goclient" + "github.com/nspcc-dev/neofs-node/lib/boot" + "github.com/nspcc-dev/neofs-node/lib/ir" + "github.com/nspcc-dev/neofs-node/lib/netmap" + "github.com/pkg/errors" +) + +// MorphNetmapContract is a wrapper over NeoFS Netmap contract client +// that provides an interface of network map manipulations. +type MorphNetmapContract struct { + // NeoFS Netmap smart-contract + netmapContract StaticContractClient + + // add peer method name of netmap contract + addPeerMethodName string + + // new epoch method name of netmap contract + newEpochMethodName string + + // get netmap method name of netmap contract + getNetMapMethodName string + + // update state method name of netmap contract + updStateMethodName string + + // IR list method name of netmap contract + irListMethodName string +} + +// UpdateEpochParams is a structure that groups the parameters +// for NeoFS epoch number updating. +type UpdateEpochParams struct { + epoch uint64 +} + +// UpdateStateParams is a structure that groups the parameters +// for NeoFS node state updating. +type UpdateStateParams struct { + st NodeState + + key []byte +} + +// NodeState is a type of node states enumeration. +type NodeState int64 + +const ( + _ NodeState = iota + + // StateOffline is an offline node state value. + StateOffline +) + +const addPeerFixedArgNumber = 2 + +const nodeInfoFixedPrmNumber = 3 + +// SetNetmapContractClient is a Netmap contract client setter. +func (s *MorphNetmapContract) SetNetmapContractClient(v StaticContractClient) { + s.netmapContract = v +} + +// SetAddPeerMethodName is a Netmap contract AddPeer method name setter. +func (s *MorphNetmapContract) SetAddPeerMethodName(v string) { + s.addPeerMethodName = v +} + +// SetNewEpochMethodName is a Netmap contract NewEpoch method name setter. +func (s *MorphNetmapContract) SetNewEpochMethodName(v string) { + s.newEpochMethodName = v +} + +// SetNetMapMethodName is a Netmap contract Netmap method name setter. +func (s *MorphNetmapContract) SetNetMapMethodName(v string) { + s.getNetMapMethodName = v +} + +// SetUpdateStateMethodName is a Netmap contract UpdateState method name setter. +func (s *MorphNetmapContract) SetUpdateStateMethodName(v string) { + s.updStateMethodName = v +} + +// SetIRListMethodName is a Netmap contract InnerRingList method name setter. +func (s *MorphNetmapContract) SetIRListMethodName(v string) { + s.irListMethodName = v +} + +// AddPeer invokes the call of AddPeer method of NeoFS Netmap contract. +func (s *MorphNetmapContract) AddPeer(p boot.BootstrapPeerParams) error { + info := p.NodeInfo() + opts := info.GetOptions() + + args := make([]interface{}, 0, addPeerFixedArgNumber+len(opts)) + + args = append(args, + // Address + []byte(info.GetAddress()), + + // Public key + info.GetPubKey(), + ) + + // Options + for i := range opts { + args = append(args, []byte(opts[i])) + } + + return s.netmapContract.Invoke( + s.addPeerMethodName, + args..., + ) +} + +// UpdateEpoch invokes the call of NewEpoch method of NeoFS Netmap contract. +func (s *MorphNetmapContract) UpdateEpoch(p UpdateEpochParams) error { + return s.netmapContract.Invoke( + s.newEpochMethodName, + int64(p.Number()), // TODO: do not cast after uint64 type will become supported in client + ) +} + +// GetNetMap performs the test invocation call of Netmap method of NeoFS Netmap contract. +func (s *MorphNetmapContract) GetNetMap(p netmap.GetParams) (*netmap.GetResult, error) { + prms, err := s.netmapContract.TestInvoke( + s.getNetMapMethodName, + ) + if err != nil { + return nil, errors.Wrap(err, "could not perform test invocation") + } else if ln := len(prms); ln != 1 { + return nil, errors.Errorf("unexpected stack item count (Nodes): %d", ln) + } + + prms, err = goclient.ArrayFromStackParameter(prms[0]) + if err != nil { + return nil, errors.Wrap(err, "could not get stack item array from stack item (Nodes)") + } + + nm := netmap.NewNetmap() + + for i := range prms { + nodeInfo, err := nodeInfoFromStackItem(prms[i]) + if err != nil { + return nil, errors.Wrapf(err, "could not parse stack item (Node #%d)", i) + } + + if err := nm.AddNode(nodeInfo); err != nil { + return nil, errors.Wrapf(err, "could not add node #%d to network map", i) + } + } + + res := new(netmap.GetResult) + res.SetNetMap(nm) + + return res, nil +} + +func nodeInfoFromStackItem(prm smartcontract.Parameter) (*bootstrap.NodeInfo, error) { + prms, err := goclient.ArrayFromStackParameter(prm) + if err != nil { + return nil, errors.Wrapf(err, "could not get stack item array (NodeInfo)") + } else if ln := len(prms); ln != nodeInfoFixedPrmNumber { + return nil, errors.Errorf("unexpected stack item count (NodeInfo): expected %d, has %d", 3, ln) + } + + res := new(bootstrap.NodeInfo) + + // Address + addrBytes, err := goclient.BytesFromStackParameter(prms[0]) + if err != nil { + return nil, errors.Wrap(err, "could not get byte array from stack item (Address)") + } + + res.Address = string(addrBytes) + + // Public key + res.PubKey, err = goclient.BytesFromStackParameter(prms[1]) + if err != nil { + return nil, errors.Wrap(err, "could not get byte array from stack item (Public key)") + } + + // Options + prms, err = goclient.ArrayFromStackParameter(prms[2]) + if err != nil { + return nil, errors.Wrapf(err, "could not get stack item array (Options)") + } + + res.Options = make([]string, 0, len(prms)) + + for i := range prms { + optBytes, err := goclient.BytesFromStackParameter(prms[i]) + if err != nil { + return nil, errors.Wrapf(err, "could not get byte array from stack item (Option #%d)", i) + } + + res.Options = append(res.Options, string(optBytes)) + } + + return res, nil +} + +// UpdateState invokes the call of UpdateState method of NeoFS Netmap contract. +func (s *MorphNetmapContract) UpdateState(p UpdateStateParams) error { + return s.netmapContract.Invoke( + s.updStateMethodName, + p.State().Int64(), + p.Key(), + ) +} + +// GetIRInfo performs the test invocation call of InnerRingList method of NeoFS Netmap contract. +func (s *MorphNetmapContract) GetIRInfo(ir.GetInfoParams) (*ir.GetInfoResult, error) { + prms, err := s.netmapContract.TestInvoke( + s.irListMethodName, + ) + if err != nil { + return nil, errors.Wrap(err, "could not perform test invocation") + } else if ln := len(prms); ln != 1 { + return nil, errors.Errorf("unexpected stack item count (Nodes): %d", ln) + } + + irInfo, err := irInfoFromStackItem(prms[0]) + if err != nil { + return nil, errors.Wrap(err, "could not get IR info from stack item") + } + + res := new(ir.GetInfoResult) + res.SetInfo(*irInfo) + + return res, nil +} + +func irInfoFromStackItem(prm smartcontract.Parameter) (*ir.Info, error) { + prms, err := goclient.ArrayFromStackParameter(prm) + if err != nil { + return nil, errors.Wrap(err, "could not get stack item array") + } + + nodes := make([]ir.Node, 0, len(prms)) + + for i := range prms { + node, err := irNodeFromStackItem(prms[i]) + if err != nil { + return nil, errors.Wrapf(err, "could not get node info from stack item (IRNode #%d)", i) + } + + nodes = append(nodes, *node) + } + + info := new(ir.Info) + info.SetNodes(nodes) + + return info, nil +} + +func irNodeFromStackItem(prm smartcontract.Parameter) (*ir.Node, error) { + prms, err := goclient.ArrayFromStackParameter(prm) + if err != nil { + return nil, errors.Wrap(err, "could not get stack item array (IRNode)") + } + + // Public key + keyBytes, err := goclient.BytesFromStackParameter(prms[0]) + if err != nil { + return nil, errors.Wrap(err, "could not get byte array from stack item (Key)") + } + + node := new(ir.Node) + node.SetKey(keyBytes) + + return node, nil +} + +// SetNumber is an epoch number setter. +func (s *UpdateEpochParams) SetNumber(v uint64) { + s.epoch = v +} + +// Number is an epoch number getter. +func (s UpdateEpochParams) Number() uint64 { + return s.epoch +} + +// SetState is a state setter. +func (s *UpdateStateParams) SetState(v NodeState) { + s.st = v +} + +// State is a state getter. +func (s UpdateStateParams) State() NodeState { + return s.st +} + +// SetKey is a public key setter. +func (s *UpdateStateParams) SetKey(v []byte) { + s.key = v +} + +// Key is a public key getter. +func (s UpdateStateParams) Key() []byte { + return s.key +} + +// Int64 converts NodeState to int64. +func (s NodeState) Int64() int64 { + return int64(s) +} diff --git a/lib/implementations/bootstrap_test.go b/lib/implementations/bootstrap_test.go new file mode 100644 index 000000000..a9968ae98 --- /dev/null +++ b/lib/implementations/bootstrap_test.go @@ -0,0 +1,30 @@ +package implementations + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestUpdateEpochParams(t *testing.T) { + s := UpdateEpochParams{} + + e := uint64(100) + s.SetNumber(e) + + require.Equal(t, e, s.Number()) +} + +func TestUpdateStateParams(t *testing.T) { + s := UpdateStateParams{} + + st := NodeState(1) + s.SetState(st) + + require.Equal(t, st, s.State()) + + key := []byte{1, 2, 3} + s.SetKey(key) + + require.Equal(t, key, s.Key()) +} diff --git a/lib/implementations/epoch.go b/lib/implementations/epoch.go new file mode 100644 index 000000000..16d9a5c37 --- /dev/null +++ b/lib/implementations/epoch.go @@ -0,0 +1,7 @@ +package implementations + +// EpochReceiver is an interface of the container +// of NeoFS epoch number with read access. +type EpochReceiver interface { + Epoch() uint64 +} diff --git a/lib/implementations/locator.go b/lib/implementations/locator.go new file mode 100644 index 000000000..6cf19ce0e --- /dev/null +++ b/lib/implementations/locator.go @@ -0,0 +1,78 @@ +package implementations + +import ( + "context" + + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-api-go/query" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-node/lib/replication" + "github.com/nspcc-dev/neofs-node/lib/transport" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +type ( + locator struct { + executor SelectiveContainerExecutor + log *zap.Logger + } + + // LocatorParams groups the parameters of ObjectLocator constructor. + LocatorParams struct { + SelectiveContainerExecutor SelectiveContainerExecutor + Logger *zap.Logger + } +) + +const locatorInstanceFailMsg = "could not create object locator" + +var errEmptyObjectsContainerHandler = errors.New("empty container objects container handler") + +func (s *locator) LocateObject(ctx context.Context, addr Address) (res []multiaddr.Multiaddr, err error) { + queryBytes, err := (&query.Query{ + Filters: []query.Filter{ + { + Type: query.Filter_Exact, + Name: transport.KeyID, + Value: addr.ObjectID.String(), + }, + }, + }).Marshal() + if err != nil { + return nil, errors.Wrap(err, "locate object failed on query marshal") + } + + err = s.executor.Search(ctx, &SearchParams{ + SelectiveParams: SelectiveParams{ + CID: addr.CID, + TTL: service.NonForwardingTTL, + IDList: make([]ObjectID, 1), + }, + SearchCID: addr.CID, + SearchQuery: queryBytes, + Handler: func(node multiaddr.Multiaddr, addrList []refs.Address) { + if len(addrList) > 0 { + res = append(res, node) + } + }, + }) + + return +} + +// NewObjectLocator constructs replication.ObjectLocator from SelectiveContainerExecutor. +func NewObjectLocator(p LocatorParams) (replication.ObjectLocator, error) { + switch { + case p.SelectiveContainerExecutor == nil: + return nil, errors.Wrap(errEmptyObjectsContainerHandler, locatorInstanceFailMsg) + case p.Logger == nil: + return nil, errors.Wrap(errEmptyLogger, locatorInstanceFailMsg) + } + + return &locator{ + executor: p.SelectiveContainerExecutor, + log: p.Logger, + }, nil +} diff --git a/lib/implementations/locator_test.go b/lib/implementations/locator_test.go new file mode 100644 index 000000000..892b38839 --- /dev/null +++ b/lib/implementations/locator_test.go @@ -0,0 +1,38 @@ +package implementations + +import ( + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +type testExecutor struct { + SelectiveContainerExecutor +} + +func TestNewObjectLocator(t *testing.T) { + validParams := LocatorParams{ + SelectiveContainerExecutor: new(testExecutor), + Logger: zap.L(), + } + + t.Run("valid params", func(t *testing.T) { + s, err := NewObjectLocator(validParams) + require.NoError(t, err) + require.NotNil(t, s) + }) + t.Run("empty logger", func(t *testing.T) { + p := validParams + p.Logger = nil + _, err := NewObjectLocator(p) + require.EqualError(t, err, errors.Wrap(errEmptyLogger, locatorInstanceFailMsg).Error()) + }) + t.Run("empty container handler", func(t *testing.T) { + p := validParams + p.SelectiveContainerExecutor = nil + _, err := NewObjectLocator(p) + require.EqualError(t, err, errors.Wrap(errEmptyObjectsContainerHandler, locatorInstanceFailMsg).Error()) + }) +} diff --git a/lib/implementations/object.go b/lib/implementations/object.go new file mode 100644 index 000000000..ed260af13 --- /dev/null +++ b/lib/implementations/object.go @@ -0,0 +1,131 @@ +package implementations + +import ( + "context" + + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-node/lib/localstore" + "github.com/nspcc-dev/neofs-node/lib/replication" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +type ( + // ObjectStorage is an interface of encapsulated ObjectReceptacle and ObjectSource pair. + ObjectStorage interface { + replication.ObjectReceptacle + replication.ObjectSource + } + + objectStorage struct { + ls localstore.Localstore + executor SelectiveContainerExecutor + log *zap.Logger + } + + // ObjectStorageParams groups the parameters of ObjectStorage constructor. + ObjectStorageParams struct { + Localstore localstore.Localstore + SelectiveContainerExecutor SelectiveContainerExecutor + Logger *zap.Logger + } +) + +const objectSourceInstanceFailMsg = "could not create object source" + +var errNilObject = errors.New("object is nil") + +var errCouldNotGetObject = errors.New("could not get object from any node") + +func (s *objectStorage) Put(ctx context.Context, params replication.ObjectStoreParams) error { + if params.Object == nil { + return errNilObject + } else if len(params.Nodes) == 0 { + if s.ls == nil { + return errEmptyLocalstore + } + return s.ls.Put(ctx, params.Object) + } + + nodes := make([]multiaddr.Multiaddr, len(params.Nodes)) + for i := range params.Nodes { + nodes[i] = params.Nodes[i].Node + } + + return s.executor.Put(ctx, &PutParams{ + SelectiveParams: SelectiveParams{ + CID: params.Object.SystemHeader.CID, + Nodes: nodes, + TTL: service.NonForwardingTTL, + IDList: make([]ObjectID, 1), + }, + Object: params.Object, + Handler: func(node multiaddr.Multiaddr, valid bool) { + if params.Handler == nil { + return + } + for i := range params.Nodes { + if params.Nodes[i].Node.Equal(node) { + params.Handler(params.Nodes[i], valid) + return + } + } + }, + }) +} + +func (s *objectStorage) Get(ctx context.Context, addr Address) (res *Object, err error) { + if s.ls != nil { + if has, err := s.ls.Has(addr); err == nil && has { + if res, err = s.ls.Get(addr); err == nil { + return res, err + } + } + } + + if err = s.executor.Get(ctx, &GetParams{ + SelectiveParams: SelectiveParams{ + CID: addr.CID, + TTL: service.NonForwardingTTL, + IDList: []ObjectID{addr.ObjectID}, + Breaker: func(refs.Address) (cFlag ProgressControlFlag) { + if res != nil { + cFlag = BreakProgress + } + return + }, + }, + Handler: func(node multiaddr.Multiaddr, obj *object.Object) { res = obj }, + }); err != nil { + return + } else if res == nil { + return nil, errCouldNotGetObject + } + + return +} + +// NewObjectStorage encapsulates Localstore and SelectiveContainerExecutor +// and returns ObjectStorage interface. +func NewObjectStorage(p ObjectStorageParams) (ObjectStorage, error) { + if p.Logger == nil { + return nil, errors.Wrap(errEmptyLogger, objectSourceInstanceFailMsg) + } + + if p.Localstore == nil { + p.Logger.Warn("local storage not provided") + } + + if p.SelectiveContainerExecutor == nil { + p.Logger.Warn("object container handler not provided") + } + + return &objectStorage{ + ls: p.Localstore, + executor: p.SelectiveContainerExecutor, + log: p.Logger, + }, nil +} diff --git a/lib/implementations/peerstore.go b/lib/implementations/peerstore.go new file mode 100644 index 000000000..6a7070f1b --- /dev/null +++ b/lib/implementations/peerstore.go @@ -0,0 +1,74 @@ +package implementations + +import ( + "crypto/ecdsa" + + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/peers" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +type ( + // AddressStoreComponent is an interface of encapsulated AddressStore and NodePublicKeyReceiver pair. + AddressStoreComponent interface { + AddressStore + NodePublicKeyReceiver + } + + // AddressStore is an interface of the container of local Multiaddr. + AddressStore interface { + SelfAddr() (multiaddr.Multiaddr, error) + } + + // NodePublicKeyReceiver is an interface of Multiaddr to PublicKey converter. + NodePublicKeyReceiver interface { + PublicKey(multiaddr.Multiaddr) *ecdsa.PublicKey + } + + addressStore struct { + ps peers.Store + + log *zap.Logger + } +) + +const ( + addressStoreInstanceFailMsg = "could not create address store" + errEmptyPeerStore = internal.Error("empty peer store") + + errEmptyAddressStore = internal.Error("empty address store") +) + +func (s addressStore) SelfAddr() (multiaddr.Multiaddr, error) { return s.ps.GetAddr(s.ps.SelfID()) } + +func (s addressStore) PublicKey(mAddr multiaddr.Multiaddr) (res *ecdsa.PublicKey) { + if peerID, err := s.ps.AddressID(mAddr); err != nil { + s.log.Error("could not peer ID", + zap.Stringer("node", mAddr), + zap.Error(err), + ) + } else if res, err = s.ps.GetPublicKey(peerID); err != nil { + s.log.Error("could not receive public key", + zap.Stringer("peer", peerID), + zap.Error(err), + ) + } + + return res +} + +// NewAddressStore wraps peer store and returns AddressStoreComponent. +func NewAddressStore(ps peers.Store, log *zap.Logger) (AddressStoreComponent, error) { + if ps == nil { + return nil, errors.Wrap(errEmptyPeerStore, addressStoreInstanceFailMsg) + } else if log == nil { + return nil, errors.Wrap(errEmptyLogger, addressStoreInstanceFailMsg) + } + + return &addressStore{ + ps: ps, + log: log, + }, nil +} diff --git a/lib/implementations/placement.go b/lib/implementations/placement.go new file mode 100644 index 000000000..4c7d95cf1 --- /dev/null +++ b/lib/implementations/placement.go @@ -0,0 +1,152 @@ +package implementations + +import ( + "context" + + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-api-go/bootstrap" + "github.com/nspcc-dev/neofs-api-go/container" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/netmap" + "github.com/nspcc-dev/neofs-node/lib/placement" + "github.com/pkg/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +/* + File source code includes implementations of placement-related solutions. + Highly specialized interfaces give the opportunity to hide placement implementation in a black box for the reasons: + * placement is implementation-tied entity working with graphs, filters, etc.; + * NeoFS components are mostly needed in a small part of the solutions provided by placement; + * direct dependency from placement avoidance helps other components do not touch crucial changes in placement. +*/ + +type ( + // CID is a type alias of + // CID from refs package of neofs-api-go. + CID = refs.CID + + // SGID is a type alias of + // SGID from refs package of neofs-api-go. + SGID = refs.SGID + + // ObjectID is a type alias of + // ObjectID from refs package of neofs-api-go. + ObjectID = refs.ObjectID + + // Object is a type alias of + // Object from object package of neofs-api-go. + Object = object.Object + + // Address is a type alias of + // Address from refs package of neofs-api-go. + Address = refs.Address + + // Netmap is a type alias of + // NetMap from netmap package. + Netmap = netmap.NetMap + + // ObjectPlacer is an interface of placement utility. + ObjectPlacer interface { + ContainerNodesLister + ContainerInvolvementChecker + GetNodes(ctx context.Context, addr Address, usePreviousNetMap bool, excl ...multiaddr.Multiaddr) ([]multiaddr.Multiaddr, error) + Epoch() uint64 + } + + // ContainerNodesLister is an interface of container placement vector builder. + ContainerNodesLister interface { + ContainerNodes(ctx context.Context, cid CID) ([]multiaddr.Multiaddr, error) + ContainerNodesInfo(ctx context.Context, cid CID, prev int) ([]bootstrap.NodeInfo, error) + } + + // ContainerInvolvementChecker is an interface of container affiliation checker. + ContainerInvolvementChecker interface { + IsContainerNode(ctx context.Context, addr multiaddr.Multiaddr, cid CID, previousNetMap bool) (bool, error) + } + + objectPlacer struct { + pl placement.Component + } +) + +const errEmptyPlacement = internal.Error("could not create storage lister: empty placement component") + +// NewObjectPlacer wraps placement.Component and returns ObjectPlacer interface. +func NewObjectPlacer(pl placement.Component) (ObjectPlacer, error) { + if pl == nil { + return nil, errEmptyPlacement + } + + return &objectPlacer{pl}, nil +} + +func (v objectPlacer) ContainerNodes(ctx context.Context, cid CID) ([]multiaddr.Multiaddr, error) { + graph, err := v.pl.Query(ctx, placement.ContainerID(cid)) + if err != nil { + return nil, errors.Wrap(err, "objectPlacer.ContainerNodes failed on graph query") + } + + return graph.NodeList() +} + +func (v objectPlacer) ContainerNodesInfo(ctx context.Context, cid CID, prev int) ([]bootstrap.NodeInfo, error) { + graph, err := v.pl.Query(ctx, placement.ContainerID(cid), placement.UsePreviousNetmap(prev)) + if err != nil { + return nil, errors.Wrap(err, "objectPlacer.ContainerNodesInfo failed on graph query") + } + + return graph.NodeInfo() +} + +func (v objectPlacer) GetNodes(ctx context.Context, addr Address, usePreviousNetMap bool, excl ...multiaddr.Multiaddr) ([]multiaddr.Multiaddr, error) { + queryOptions := make([]placement.QueryOption, 1, 2) + queryOptions[0] = placement.ContainerID(addr.CID) + + if usePreviousNetMap { + queryOptions = append(queryOptions, placement.UsePreviousNetmap(1)) + } + + graph, err := v.pl.Query(ctx, queryOptions...) + if err != nil { + if st, ok := status.FromError(errors.Cause(err)); ok && st.Code() == codes.NotFound { + return nil, container.ErrNotFound + } + + return nil, errors.Wrap(err, "placer.GetNodes failed on graph query") + } + + filter := func(group netmap.SFGroup, bucket *netmap.Bucket) *netmap.Bucket { + return bucket + } + + if !addr.ObjectID.Empty() { + filter = func(group netmap.SFGroup, bucket *netmap.Bucket) *netmap.Bucket { + return bucket.GetSelection(group.Selectors, addr.ObjectID.Bytes()) + } + } + + return graph.Exclude(excl).Filter(filter).NodeList() +} + +func (v objectPlacer) IsContainerNode(ctx context.Context, addr multiaddr.Multiaddr, cid CID, previousNetMap bool) (bool, error) { + nodes, err := v.GetNodes(ctx, Address{ + CID: cid, + }, previousNetMap) + if err != nil { + return false, errors.Wrap(err, "placer.FromContainer failed on placer.GetNodes") + } + + for i := range nodes { + if nodes[i].Equal(addr) { + return true, nil + } + } + + return false, nil +} + +func (v objectPlacer) Epoch() uint64 { return v.pl.NetworkState().Epoch } diff --git a/lib/implementations/reputation.go b/lib/implementations/reputation.go new file mode 100644 index 000000000..2fb4865e2 --- /dev/null +++ b/lib/implementations/reputation.go @@ -0,0 +1,41 @@ +package implementations + +import ( + "github.com/nspcc-dev/neofs-node/lib/peers" +) + +// MorphReputationContract is a wrapper over NeoFS Reputation contract client +// that provides an interface of the storage of global trust values. +type MorphReputationContract struct { + // NeoFS Reputation smart-contract + repContract StaticContractClient + + // put method name of reputation contract + putMethodName string + + // list method name of reputation contract + listMethodName string + + // public key storage + pkStore peers.PublicKeyStore +} + +// SetReputationContractClient is a Reputation contract client setter. +func (s *MorphReputationContract) SetReputationContractClient(v StaticContractClient) { + s.repContract = v +} + +// SetPublicKeyStore is a public key store setter. +func (s *MorphReputationContract) SetPublicKeyStore(v peers.PublicKeyStore) { + s.pkStore = v +} + +// SetPutMethodName is a Reputation contract Put method name setter. +func (s *MorphReputationContract) SetPutMethodName(v string) { + s.putMethodName = v +} + +// SetListMethodName is a Reputation contract List method name setter. +func (s *MorphReputationContract) SetListMethodName(v string) { + s.listMethodName = v +} diff --git a/lib/implementations/sg.go b/lib/implementations/sg.go new file mode 100644 index 000000000..ef0f95e8a --- /dev/null +++ b/lib/implementations/sg.go @@ -0,0 +1,136 @@ +package implementations + +import ( + "context" + + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-api-go/hash" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-api-go/storagegroup" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +type ( + // StorageGroupInfoReceiverParams groups the parameters of + // storage group information receiver. + StorageGroupInfoReceiverParams struct { + SelectiveContainerExecutor SelectiveContainerExecutor + Logger *zap.Logger + } + + sgInfoRecv struct { + executor SelectiveContainerExecutor + log *zap.Logger + } +) + +const locationFinderInstanceFailMsg = "could not create object location finder" + +// ErrIncompleteSGInfo is returned by storage group information receiver +// that could not receive full information. +const ErrIncompleteSGInfo = internal.Error("could not receive full storage group info") + +// PublicSessionToken is a context key for SessionToken. +// FIXME: temp solution for cycle import fix. +// Unify with same const from transformer pkg. +const PublicSessionToken = "public token" + +// BearerToken is a context key for BearerToken. +const BearerToken = "bearer token" + +// ExtendedHeaders is a context key for X-headers. +const ExtendedHeaders = "extended headers" + +func (s *sgInfoRecv) GetSGInfo(ctx context.Context, cid CID, group []ObjectID) (*storagegroup.StorageGroup, error) { + var ( + err error + res = new(storagegroup.StorageGroup) + hashList = make([]hash.Hash, 0, len(group)) + ) + + m := make(map[string]struct{}, len(group)) + for i := range group { + m[group[i].String()] = struct{}{} + } + + // FIXME: hardcoded for simplicity. + // Function is called in next cases: + // - SG transformation on trusted node side (only in this case session token is needed); + // - SG info check on container nodes (token is not needed since system group has extra access); + // - data audit on inner ring nodes (same as previous). + var token service.SessionToken + if v, ok := ctx.Value(PublicSessionToken).(service.SessionToken); ok { + token = v + } + + var bearer service.BearerToken + if v, ok := ctx.Value(BearerToken).(service.BearerToken); ok { + bearer = v + } + + var extHdrs []service.ExtendedHeader + if v, ok := ctx.Value(ExtendedHeaders).([]service.ExtendedHeader); ok { + extHdrs = v + } + + if err = s.executor.Head(ctx, &HeadParams{ + GetParams: GetParams{ + SelectiveParams: SelectiveParams{ + CID: cid, + TTL: service.SingleForwardingTTL, + IDList: group, + Breaker: func(addr refs.Address) (cFlag ProgressControlFlag) { + if len(m) == 0 { + cFlag = BreakProgress + } else if _, ok := m[addr.ObjectID.String()]; !ok { + cFlag = NextAddress + } + return + }, + Token: token, + + Bearer: bearer, + + ExtendedHeaders: extHdrs, + }, + Handler: func(_ multiaddr.Multiaddr, obj *object.Object) { + _, hashHeader := obj.LastHeader(object.HeaderType(object.HomoHashHdr)) + if hashHeader == nil { + return + } + + hashList = append(hashList, hashHeader.Value.(*object.Header_HomoHash).HomoHash) + res.ValidationDataSize += obj.SystemHeader.PayloadLength + delete(m, obj.SystemHeader.ID.String()) + }, + }, + FullHeaders: true, + }); err != nil { + return nil, err + } else if len(m) > 0 { + return nil, ErrIncompleteSGInfo + } + + res.ValidationHash, err = hash.Concat(hashList) + + return res, err +} + +// NewStorageGroupInfoReceiver constructs storagegroup.InfoReceiver from SelectiveContainerExecutor. +func NewStorageGroupInfoReceiver(p StorageGroupInfoReceiverParams) (storagegroup.InfoReceiver, error) { + switch { + case p.Logger == nil: + return nil, errors.Wrap(errEmptyLogger, locationFinderInstanceFailMsg) + case p.SelectiveContainerExecutor == nil: + return nil, errors.Wrap(errEmptyObjectsContainerHandler, locationFinderInstanceFailMsg) + } + + return &sgInfoRecv{ + executor: p.SelectiveContainerExecutor, + log: p.Logger, + }, nil +} diff --git a/lib/implementations/transport.go b/lib/implementations/transport.go new file mode 100644 index 000000000..b409be83d --- /dev/null +++ b/lib/implementations/transport.go @@ -0,0 +1,657 @@ +package implementations + +import ( + "context" + "io" + "sync" + "time" + + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-api-go/hash" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/transport" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +/* + File source code includes implementation of unified objects container handler. + Implementation provides the opportunity to perform any logic over object container distributed in network. + Implementation holds placement and object transport implementations in a black box. + Any special logic could be tuned through passing handle parameters. + NOTE: Although the implementation of the other interfaces via OCH is the same, they are still separated in order to avoid mess. +*/ + +type ( + // SelectiveContainerExecutor is an interface the tool that performs + // object operations in container with preconditions. + SelectiveContainerExecutor interface { + Put(context.Context, *PutParams) error + Get(context.Context, *GetParams) error + Head(context.Context, *HeadParams) error + Search(context.Context, *SearchParams) error + RangeHash(context.Context, *RangeHashParams) error + } + + // PutParams groups the parameters + // of selective object Put. + PutParams struct { + SelectiveParams + Object *object.Object + Handler func(multiaddr.Multiaddr, bool) + + CopiesNumber uint32 + } + + // GetParams groups the parameters + // of selective object Get. + GetParams struct { + SelectiveParams + Handler func(multiaddr.Multiaddr, *object.Object) + } + + // HeadParams groups the parameters + // of selective object Head. + HeadParams struct { + GetParams + FullHeaders bool + } + + // SearchParams groups the parameters + // of selective object Search. + SearchParams struct { + SelectiveParams + SearchCID refs.CID + SearchQuery []byte + Handler func(multiaddr.Multiaddr, []refs.Address) + } + + // RangeHashParams groups the parameters + // of selective object GetRangeHash. + RangeHashParams struct { + SelectiveParams + Ranges []object.Range + Salt []byte + Handler func(multiaddr.Multiaddr, []hash.Hash) + } + + // SelectiveParams groups the parameters of + // the execution of selective container operation. + SelectiveParams struct { + /* Should be set to true only if service under object transport implementations is served on localhost. */ + ServeLocal bool + + /* Raw option of the request */ + Raw bool + + /* TTL for object transport. All transport operations inherit same value. */ + TTL uint32 + + /* Required ID of processing container. If empty or not set, an error is returned. */ + CID + + /* List of nodes selected for processing. If not specified => nodes will be selected during. */ + Nodes []multiaddr.Multiaddr + + /* + Next two parameters provide the opportunity to process selective objects in container. + At least on of non-empty IDList or Query is required, an error is returned otherwise. + */ + + /* List of objects to process (overlaps query). */ + IDList []refs.ObjectID + /* If no objects is indicated, query is used for selection. */ + Query []byte + + /* + If function provided, it is called after every successful operation. + True result breaks operation performing. + */ + Breaker func(refs.Address) ProgressControlFlag + + /* Public session token */ + Token service.SessionToken + + /* Bearer token */ + Bearer service.BearerToken + + /* Extended headers */ + ExtendedHeaders []service.ExtendedHeader + } + + // ProgressControlFlag is an enumeration of progress control flags. + ProgressControlFlag int + + // ObjectContainerHandlerParams grops the parameters of SelectiveContainerExecutor constructor. + ObjectContainerHandlerParams struct { + NodeLister ContainerNodesLister + Executor ContainerTraverseExecutor + *zap.Logger + } + + simpleTraverser struct { + *sync.Once + list []multiaddr.Multiaddr + } + + selectiveCnrExec struct { + cnl ContainerNodesLister + Executor ContainerTraverseExecutor + log *zap.Logger + } + + metaInfo struct { + ttl uint32 + raw bool + rt object.RequestType + + token service.SessionToken + + bearer service.BearerToken + + extHdrs []service.ExtendedHeader + } + + putInfo struct { + metaInfo + obj *object.Object + cn uint32 + } + + getInfo struct { + metaInfo + addr Address + raw bool + } + + headInfo struct { + getInfo + fullHdr bool + } + + searchInfo struct { + metaInfo + cid CID + query []byte + } + + rangeHashInfo struct { + metaInfo + addr Address + ranges []object.Range + salt []byte + } + + execItems struct { + params SelectiveParams + metaConstructor func(addr Address) transport.MetaInfo + handler transport.ResultHandler + } + + searchTarget struct { + list []refs.Address + } + + // ContainerTraverseExecutor is an interface of + // object operation executor with container traversing. + ContainerTraverseExecutor interface { + Execute(context.Context, TraverseParams) + } + + // TraverseParams groups the parameters of container traversing. + TraverseParams struct { + TransportInfo transport.MetaInfo + Handler transport.ResultHandler + Traverser Traverser + WorkerPool WorkerPool + ExecutionInterceptor func(context.Context, multiaddr.Multiaddr) bool + } + + // WorkerPool is an interface of go-routine pool + WorkerPool interface { + Submit(func()) error + } + + // Traverser is an interface of container traverser. + Traverser interface { + Next(context.Context) []multiaddr.Multiaddr + } + + cnrTraverseExec struct { + transport transport.ObjectTransport + } + + singleRoutinePool struct{} + + emptyReader struct{} +) + +const ( + _ ProgressControlFlag = iota + + // NextAddress is a ProgressControlFlag of to go to the next address of the object. + NextAddress + + // NextNode is a ProgressControlFlag of to go to the next node. + NextNode + + // BreakProgress is a ProgressControlFlag to interrupt the execution. + BreakProgress +) + +const ( + instanceFailMsg = "could not create container objects collector" + errEmptyLogger = internal.Error("empty logger") + errEmptyNodeLister = internal.Error("empty container node lister") + errEmptyTraverseExecutor = internal.Error("empty container traverse executor") + + errSelectiveParams = internal.Error("neither ID list nor query provided") +) + +var errNilObjectTransport = errors.New("object transport is nil") + +func (s *selectiveCnrExec) Put(ctx context.Context, p *PutParams) error { + meta := &putInfo{ + metaInfo: metaInfo{ + ttl: p.TTL, + rt: object.RequestPut, + raw: p.Raw, + + token: p.Token, + + bearer: p.Bearer, + + extHdrs: p.ExtendedHeaders, + }, + obj: p.Object, + cn: p.CopiesNumber, + } + + return s.exec(ctx, &execItems{ + params: p.SelectiveParams, + metaConstructor: func(Address) transport.MetaInfo { return meta }, + handler: p, + }) +} + +func (s *selectiveCnrExec) Get(ctx context.Context, p *GetParams) error { + return s.exec(ctx, &execItems{ + params: p.SelectiveParams, + metaConstructor: func(addr Address) transport.MetaInfo { + return &getInfo{ + metaInfo: metaInfo{ + ttl: p.TTL, + rt: object.RequestGet, + raw: p.Raw, + + token: p.Token, + + bearer: p.Bearer, + + extHdrs: p.ExtendedHeaders, + }, + addr: addr, + raw: p.Raw, + } + }, + handler: p, + }) +} + +func (s *selectiveCnrExec) Head(ctx context.Context, p *HeadParams) error { + return s.exec(ctx, &execItems{ + params: p.SelectiveParams, + metaConstructor: func(addr Address) transport.MetaInfo { + return &headInfo{ + getInfo: getInfo{ + metaInfo: metaInfo{ + ttl: p.TTL, + rt: object.RequestHead, + raw: p.Raw, + + token: p.Token, + + bearer: p.Bearer, + + extHdrs: p.ExtendedHeaders, + }, + addr: addr, + raw: p.Raw, + }, + fullHdr: p.FullHeaders, + } + }, + handler: p, + }) +} + +func (s *selectiveCnrExec) Search(ctx context.Context, p *SearchParams) error { + return s.exec(ctx, &execItems{ + params: p.SelectiveParams, + metaConstructor: func(Address) transport.MetaInfo { + return &searchInfo{ + metaInfo: metaInfo{ + ttl: p.TTL, + rt: object.RequestSearch, + raw: p.Raw, + + token: p.Token, + + bearer: p.Bearer, + + extHdrs: p.ExtendedHeaders, + }, + cid: p.SearchCID, + query: p.SearchQuery, + } + }, + handler: p, + }) +} + +func (s *selectiveCnrExec) RangeHash(ctx context.Context, p *RangeHashParams) error { + return s.exec(ctx, &execItems{ + params: p.SelectiveParams, + metaConstructor: func(addr Address) transport.MetaInfo { + return &rangeHashInfo{ + metaInfo: metaInfo{ + ttl: p.TTL, + rt: object.RequestRangeHash, + raw: p.Raw, + + token: p.Token, + + bearer: p.Bearer, + + extHdrs: p.ExtendedHeaders, + }, + addr: addr, + ranges: p.Ranges, + salt: p.Salt, + } + }, + handler: p, + }) +} + +func (s *selectiveCnrExec) exec(ctx context.Context, p *execItems) error { + if err := p.params.validate(); err != nil { + return err + } + + nodes, err := s.prepareNodes(ctx, &p.params) + if err != nil { + return err + } + +loop: + for i := range nodes { + addrList := s.prepareAddrList(ctx, &p.params, nodes[i]) + if len(addrList) == 0 { + continue + } + + for j := range addrList { + if p.params.Breaker != nil { + switch cFlag := p.params.Breaker(addrList[j]); cFlag { + case NextAddress: + continue + case NextNode: + continue loop + case BreakProgress: + break loop + } + } + + s.Executor.Execute(ctx, TraverseParams{ + TransportInfo: p.metaConstructor(addrList[j]), + Handler: p.handler, + Traverser: newSimpleTraverser(nodes[i]), + }) + } + } + + return nil +} + +func (s *SelectiveParams) validate() error { + switch { + case len(s.IDList) == 0 && len(s.Query) == 0: + return errSelectiveParams + default: + return nil + } +} + +func (s *selectiveCnrExec) prepareNodes(ctx context.Context, p *SelectiveParams) ([]multiaddr.Multiaddr, error) { + if len(p.Nodes) > 0 { + return p.Nodes, nil + } + + // If node serves Object transport service on localhost => pass single empty node + if p.ServeLocal { + // all transport implementations will use localhost by default + return []multiaddr.Multiaddr{nil}, nil + } + + // Otherwise use container nodes + return s.cnl.ContainerNodes(ctx, p.CID) +} + +func (s *selectiveCnrExec) prepareAddrList(ctx context.Context, p *SelectiveParams, node multiaddr.Multiaddr) []refs.Address { + var ( + addrList []Address + l = len(p.IDList) + ) + + if l > 0 { + addrList = make([]Address, 0, l) + for i := range p.IDList { + addrList = append(addrList, Address{CID: p.CID, ObjectID: p.IDList[i]}) + } + + return addrList + } + + handler := new(searchTarget) + + s.Executor.Execute(ctx, TraverseParams{ + TransportInfo: &searchInfo{ + metaInfo: metaInfo{ + ttl: p.TTL, + rt: object.RequestSearch, + raw: p.Raw, + + token: p.Token, + + bearer: p.Bearer, + + extHdrs: p.ExtendedHeaders, + }, + cid: p.CID, + query: p.Query, + }, + Handler: handler, + Traverser: newSimpleTraverser(node), + }) + + return handler.list +} + +func newSimpleTraverser(list ...multiaddr.Multiaddr) Traverser { + return &simpleTraverser{ + Once: new(sync.Once), + list: list, + } +} + +func (s *simpleTraverser) Next(context.Context) (res []multiaddr.Multiaddr) { + s.Do(func() { + res = s.list + }) + + return +} + +func (s metaInfo) GetTTL() uint32 { return s.ttl } + +func (s metaInfo) GetTimeout() time.Duration { return 0 } + +func (s metaInfo) GetRaw() bool { return s.raw } + +func (s metaInfo) Type() object.RequestType { return s.rt } + +func (s metaInfo) GetSessionToken() service.SessionToken { return s.token } + +func (s metaInfo) GetBearerToken() service.BearerToken { return s.bearer } + +func (s metaInfo) ExtendedHeaders() []service.ExtendedHeader { return s.extHdrs } + +func (s *putInfo) GetHead() *object.Object { return s.obj } + +func (s *putInfo) Payload() io.Reader { return new(emptyReader) } + +func (*emptyReader) Read(p []byte) (int, error) { return 0, io.EOF } + +func (s *putInfo) CopiesNumber() uint32 { + return s.cn +} + +func (s *getInfo) GetAddress() refs.Address { return s.addr } + +func (s *getInfo) Raw() bool { return s.raw } + +func (s *headInfo) GetFullHeaders() bool { return s.fullHdr } + +func (s *searchInfo) GetCID() refs.CID { return s.cid } + +func (s *searchInfo) GetQuery() []byte { return s.query } + +func (s *rangeHashInfo) GetAddress() refs.Address { return s.addr } + +func (s *rangeHashInfo) GetRanges() []object.Range { return s.ranges } + +func (s *rangeHashInfo) GetSalt() []byte { return s.salt } + +func (s *searchTarget) HandleResult(_ context.Context, _ multiaddr.Multiaddr, r interface{}, e error) { + if e == nil { + s.list = append(s.list, r.([]refs.Address)...) + } +} + +// HandleResult calls Handler with: +// - Multiaddr with argument value; +// - error equality to nil. +func (s *PutParams) HandleResult(_ context.Context, node multiaddr.Multiaddr, _ interface{}, e error) { + s.Handler(node, e == nil) +} + +// HandleResult calls Handler if error argument is nil with: +// - Multiaddr with argument value; +// - result casted to an Object pointer. +func (s *GetParams) HandleResult(_ context.Context, node multiaddr.Multiaddr, r interface{}, e error) { + if e == nil { + s.Handler(node, r.(*object.Object)) + } +} + +// HandleResult calls Handler if error argument is nil with: +// - Multiaddr with argument value; +// - result casted to Address slice. +func (s *SearchParams) HandleResult(_ context.Context, node multiaddr.Multiaddr, r interface{}, e error) { + if e == nil { + s.Handler(node, r.([]refs.Address)) + } +} + +// HandleResult calls Handler if error argument is nil with: +// - Multiaddr with argument value; +// - result casted to Hash slice. +func (s *RangeHashParams) HandleResult(_ context.Context, node multiaddr.Multiaddr, r interface{}, e error) { + if e == nil { + s.Handler(node, r.([]hash.Hash)) + } +} + +func (s *cnrTraverseExec) Execute(ctx context.Context, p TraverseParams) { + if p.WorkerPool == nil { + p.WorkerPool = new(singleRoutinePool) + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + wg := new(sync.WaitGroup) + + for { + select { + case <-ctx.Done(): + return + default: + } + + nodes := p.Traverser.Next(ctx) + if len(nodes) == 0 { + break + } + + for i := range nodes { + node := nodes[i] + + wg.Add(1) + + if err := p.WorkerPool.Submit(func() { + defer wg.Done() + + if p.ExecutionInterceptor != nil && p.ExecutionInterceptor(ctx, node) { + return + } + + s.transport.Transport(ctx, transport.ObjectTransportParams{ + TransportInfo: p.TransportInfo, + TargetNode: node, + ResultHandler: p.Handler, + }) + }); err != nil { + wg.Done() + } + } + + wg.Wait() + } +} + +func (*singleRoutinePool) Submit(fn func()) error { + fn() + return nil +} + +// NewObjectContainerHandler is a SelectiveContainerExecutor constructor. +func NewObjectContainerHandler(p ObjectContainerHandlerParams) (SelectiveContainerExecutor, error) { + switch { + case p.Executor == nil: + return nil, errors.Wrap(errEmptyTraverseExecutor, instanceFailMsg) + case p.Logger == nil: + return nil, errors.Wrap(errEmptyLogger, instanceFailMsg) + case p.NodeLister == nil: + return nil, errors.Wrap(errEmptyNodeLister, instanceFailMsg) + } + + return &selectiveCnrExec{ + cnl: p.NodeLister, + Executor: p.Executor, + log: p.Logger, + }, nil +} + +// NewContainerTraverseExecutor is a ContainerTraverseExecutor executor. +func NewContainerTraverseExecutor(t transport.ObjectTransport) (ContainerTraverseExecutor, error) { + if t == nil { + return nil, errNilObjectTransport + } + + return &cnrTraverseExec{transport: t}, nil +} diff --git a/lib/implementations/validation.go b/lib/implementations/validation.go new file mode 100644 index 000000000..4ab858a3d --- /dev/null +++ b/lib/implementations/validation.go @@ -0,0 +1,405 @@ +package implementations + +import ( + "bytes" + "context" + "crypto/ecdsa" + "crypto/sha256" + + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-api-go/hash" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-api-go/service" + crypto "github.com/nspcc-dev/neofs-crypto" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/nspcc-dev/neofs-node/lib/localstore" + "github.com/nspcc-dev/neofs-node/lib/objutil" + "github.com/nspcc-dev/neofs-node/lib/rand" + "github.com/nspcc-dev/neofs-node/lib/replication" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +type ( + objectValidator struct { + as AddressStore + ls localstore.Localstore + executor SelectiveContainerExecutor + log *zap.Logger + + saltSize int + maxRngSize uint64 + rangeCount int + sltr Salitor + verifier objutil.Verifier + } + + // Salitor is a salting data function. + Salitor func(data, salt []byte) []byte + + // ObjectValidatorParams groups th + ObjectValidatorParams struct { + AddressStore AddressStore + Localstore localstore.Localstore + SelectiveContainerExecutor SelectiveContainerExecutor + Logger *zap.Logger + + Salitor Salitor + SaltSize int + MaxPayloadRangeSize uint64 + PayloadRangeCount int + + Verifier objutil.Verifier + } + + localHeadIntegrityVerifier struct { + keyVerifier core.OwnerKeyVerifier + } + + payloadVerifier struct { + } + + localIntegrityVerifier struct { + headVerifier objutil.Verifier + payloadVerifier objutil.Verifier + } +) + +const ( + objectValidatorInstanceFailMsg = "could not create object validator" + errEmptyLocalstore = internal.Error("empty local storage") + errEmptyObjectVerifier = internal.Error("empty object verifier") + + defaultSaltSize = 64 // bytes + defaultPayloadRangeCount = 3 + defaultMaxPayloadRangeSize = 64 +) + +const ( + errBrokenHeaderStructure = internal.Error("broken header structure") + + errMissingPayloadChecksumHeader = internal.Error("missing payload checksum header") + errWrongPayloadChecksum = internal.Error("wrong payload checksum") +) + +func (s *objectValidator) Verify(ctx context.Context, params *replication.ObjectVerificationParams) bool { + selfAddr, err := s.as.SelfAddr() + if err != nil { + s.log.Debug("receive self address failure", zap.Error(err)) + return false + } + + if params.Node == nil || params.Node.Equal(selfAddr) { + return s.verifyLocal(ctx, params.Address) + } + + return s.verifyRemote(ctx, params) +} + +func (s *objectValidator) verifyLocal(ctx context.Context, addr Address) bool { + var ( + err error + obj *Object + ) + + if obj, err = s.ls.Get(addr); err != nil { + s.log.Debug("get local meta information failure", zap.Error(err)) + return false + } else if err = s.verifier.Verify(ctx, obj); err != nil { + s.log.Debug("integrity check failure", zap.Error(err)) + } + + return err == nil +} + +func (s *objectValidator) verifyRemote(ctx context.Context, params *replication.ObjectVerificationParams) bool { + var ( + receivedObj *Object + valid bool + ) + + defer func() { + if params.Handler != nil && receivedObj != nil { + params.Handler(valid, receivedObj) + } + }() + + p := &HeadParams{ + GetParams: GetParams{ + SelectiveParams: SelectiveParams{ + CID: params.CID, + Nodes: []multiaddr.Multiaddr{params.Node}, + TTL: service.NonForwardingTTL, + IDList: []ObjectID{params.ObjectID}, + Raw: true, + }, + Handler: func(_ multiaddr.Multiaddr, obj *object.Object) { + receivedObj = obj + valid = s.verifier.Verify(ctx, obj) == nil + }, + }, + FullHeaders: true, + } + + if err := s.executor.Head(ctx, p); err != nil || !valid { + return false + } else if receivedObj.SystemHeader.PayloadLength <= 0 || receivedObj.IsLinking() { + return true + } + + if !params.LocalInvalid { + has, err := s.ls.Has(params.Address) + if err == nil && has { + obj, err := s.ls.Get(params.Address) + if err == nil { + return s.verifyThroughHashes(ctx, obj, params.Node) + } + } + } + + valid = false + _ = s.executor.Get(ctx, &p.GetParams) + + return valid +} + +func (s *objectValidator) verifyThroughHashes(ctx context.Context, obj *Object, node multiaddr.Multiaddr) (valid bool) { + var ( + salt = generateSalt(s.saltSize) + rngs = generateRanges(obj.SystemHeader.PayloadLength, s.maxRngSize, s.rangeCount) + ) + + _ = s.executor.RangeHash(ctx, &RangeHashParams{ + SelectiveParams: SelectiveParams{ + CID: obj.SystemHeader.CID, + Nodes: []multiaddr.Multiaddr{node}, + TTL: service.NonForwardingTTL, + IDList: []ObjectID{obj.SystemHeader.ID}, + }, + Ranges: rngs, + Salt: salt, + Handler: func(node multiaddr.Multiaddr, hashes []hash.Hash) { + valid = compareHashes(s.sltr, obj.Payload, salt, rngs, hashes) + }, + }) + + return +} + +func compareHashes(sltr Salitor, payload, salt []byte, rngs []object.Range, hashes []hash.Hash) bool { + if len(rngs) != len(hashes) { + return false + } + + for i := range rngs { + saltPayloadPart := sltr(payload[rngs[i].Offset:rngs[i].Offset+rngs[i].Length], salt) + if !hashes[i].Equal(hash.Sum(saltPayloadPart)) { + return false + } + } + + return true +} + +func generateRanges(payloadSize, maxRangeSize uint64, count int) []object.Range { + res := make([]object.Range, count) + + l := min(payloadSize, maxRangeSize) + + for i := 0; i < count; i++ { + res[i].Length = l + res[i].Offset = rand.Uint64(rand.New(), int64(payloadSize-l)) + } + + return res +} + +func min(a, b uint64) uint64 { + if a < b { + return a + } + + return b +} + +func generateSalt(saltSize int) []byte { + salt := make([]byte, saltSize) + if _, err := rand.Read(salt); err != nil { + return nil + } + + return salt +} + +// NewObjectValidator constructs universal replication.ObjectVerifier. +func NewObjectValidator(p *ObjectValidatorParams) (replication.ObjectVerifier, error) { + switch { + case p.Logger == nil: + return nil, errors.Wrap(errEmptyLogger, objectValidatorInstanceFailMsg) + case p.AddressStore == nil: + return nil, errors.Wrap(errEmptyAddressStore, objectValidatorInstanceFailMsg) + case p.Localstore == nil: + return nil, errors.Wrap(errEmptyLocalstore, objectValidatorInstanceFailMsg) + case p.Verifier == nil: + return nil, errors.Wrap(errEmptyObjectVerifier, objectValidatorInstanceFailMsg) + } + + if p.SaltSize <= 0 { + p.SaltSize = defaultSaltSize + } + + if p.PayloadRangeCount <= 0 { + p.PayloadRangeCount = defaultPayloadRangeCount + } + + if p.MaxPayloadRangeSize <= 0 { + p.MaxPayloadRangeSize = defaultMaxPayloadRangeSize + } + + return &objectValidator{ + as: p.AddressStore, + ls: p.Localstore, + executor: p.SelectiveContainerExecutor, + log: p.Logger, + saltSize: p.SaltSize, + maxRngSize: p.MaxPayloadRangeSize, + rangeCount: p.PayloadRangeCount, + sltr: p.Salitor, + verifier: p.Verifier, + }, nil +} + +// NewLocalHeadIntegrityVerifier constructs local object head verifier and returns objutil.Verifier interface. +func NewLocalHeadIntegrityVerifier(keyVerifier core.OwnerKeyVerifier) (objutil.Verifier, error) { + if keyVerifier == nil { + return nil, core.ErrNilOwnerKeyVerifier + } + + return &localHeadIntegrityVerifier{ + keyVerifier: keyVerifier, + }, nil +} + +// NewLocalIntegrityVerifier constructs local object verifier and returns objutil.Verifier interface. +func NewLocalIntegrityVerifier(keyVerifier core.OwnerKeyVerifier) (objutil.Verifier, error) { + if keyVerifier == nil { + return nil, core.ErrNilOwnerKeyVerifier + } + + return &localIntegrityVerifier{ + headVerifier: &localHeadIntegrityVerifier{ + keyVerifier: keyVerifier, + }, + payloadVerifier: new(payloadVerifier), + }, nil +} + +// NewPayloadVerifier constructs object payload verifier and returns objutil.Verifier. +func NewPayloadVerifier() objutil.Verifier { + return new(payloadVerifier) +} + +type hdrOwnerKeyContainer struct { + owner refs.OwnerID + key []byte +} + +func (s hdrOwnerKeyContainer) GetOwnerID() refs.OwnerID { + return s.owner +} + +func (s hdrOwnerKeyContainer) GetOwnerKey() []byte { + return s.key +} + +func (s *localHeadIntegrityVerifier) Verify(ctx context.Context, obj *Object) error { + var ( + checkKey *ecdsa.PublicKey + ownerKeyCnr core.OwnerKeyContainer + ) + + if _, h := obj.LastHeader(object.HeaderType(object.TokenHdr)); h != nil { + token := h.GetValue().(*object.Header_Token).Token + + if err := service.VerifySignatureWithKey( + crypto.UnmarshalPublicKey(token.GetOwnerKey()), + service.NewVerifiedSessionToken(token), + ); err != nil { + return err + } + + ownerKeyCnr = token + + checkKey = crypto.UnmarshalPublicKey(token.GetSessionKey()) + } else if _, h := obj.LastHeader(object.HeaderType(object.PublicKeyHdr)); h != nil { + pkHdr := h.GetValue().(*object.Header_PublicKey) + if pkHdr != nil && pkHdr.PublicKey != nil { + val := pkHdr.PublicKey.GetValue() + + ownerKeyCnr = &hdrOwnerKeyContainer{ + owner: obj.GetSystemHeader().OwnerID, + key: val, + } + + checkKey = crypto.UnmarshalPublicKey(val) + } + } + + if ownerKeyCnr == nil { + return core.ErrNilOwnerKeyContainer + } else if err := s.keyVerifier.VerifyKey(ctx, ownerKeyCnr); err != nil { + return err + } + + return verifyObjectIntegrity(obj, checkKey) +} + +// verifyObjectIntegrity verifies integrity of object header. +// Returns error if object +// - does not contains integrity header; +// - integrity header is not a last header in object; +// - integrity header signature is broken. +func verifyObjectIntegrity(obj *Object, key *ecdsa.PublicKey) error { + n, h := obj.LastHeader(object.HeaderType(object.IntegrityHdr)) + + if l := len(obj.Headers); l <= 0 || n != l-1 { + return errBrokenHeaderStructure + } + + integrityHdr := h.Value.(*object.Header_Integrity).Integrity + if integrityHdr == nil { + return errBrokenHeaderStructure + } + + data, err := objutil.MarshalHeaders(obj, n) + if err != nil { + return err + } + + hdrChecksum := sha256.Sum256(data) + + return crypto.Verify(key, hdrChecksum[:], integrityHdr.ChecksumSignature) +} + +func (s *payloadVerifier) Verify(_ context.Context, obj *Object) error { + if _, h := obj.LastHeader(object.HeaderType(object.PayloadChecksumHdr)); h == nil { + return errMissingPayloadChecksumHeader + } else if checksum := sha256.Sum256(obj.Payload); !bytes.Equal( + checksum[:], + h.Value.(*object.Header_PayloadChecksum).PayloadChecksum, + ) { + return errWrongPayloadChecksum + } + + return nil +} + +func (s *localIntegrityVerifier) Verify(ctx context.Context, obj *Object) error { + if err := s.headVerifier.Verify(ctx, obj); err != nil { + return err + } + + return s.payloadVerifier.Verify(ctx, obj) +} diff --git a/lib/implementations/validation_test.go b/lib/implementations/validation_test.go new file mode 100644 index 000000000..f795ebd4b --- /dev/null +++ b/lib/implementations/validation_test.go @@ -0,0 +1,273 @@ +package implementations + +import ( + "context" + "crypto/ecdsa" + "crypto/sha256" + "math/rand" + "testing" + + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-api-go/service" + crypto "github.com/nspcc-dev/neofs-crypto" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/nspcc-dev/neofs-node/lib/localstore" + "github.com/nspcc-dev/neofs-node/lib/objutil" + "github.com/nspcc-dev/neofs-node/lib/test" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +type testEntity struct { + err error +} + +func (s *testEntity) Verify(context.Context, *object.Object) error { return s.err } + +func (s *testEntity) SelfAddr() (multiaddr.Multiaddr, error) { panic("implement me") } +func (s *testEntity) Put(context.Context, *localstore.Object) error { panic("implement me") } +func (s *testEntity) Get(localstore.Address) (*localstore.Object, error) { panic("implement me") } +func (s *testEntity) Del(localstore.Address) error { panic("implement me") } +func (s *testEntity) Meta(localstore.Address) (*localstore.ObjectMeta, error) { panic("implement me") } +func (s *testEntity) Has(localstore.Address) (bool, error) { panic("implement me") } +func (s *testEntity) ObjectsCount() (uint64, error) { panic("implement me") } +func (s *testEntity) Size() int64 { panic("implement me") } +func (s *testEntity) Iterate(localstore.FilterPipeline, localstore.MetaHandler) error { + panic("implement me") +} + +func (s *testEntity) PRead(ctx context.Context, addr refs.Address, rng object.Range) ([]byte, error) { + panic("implement me") +} + +func (s *testEntity) VerifyKey(context.Context, core.OwnerKeyContainer) error { + return s.err +} + +func TestNewObjectValidator(t *testing.T) { + validParams := ObjectValidatorParams{ + Logger: zap.L(), + AddressStore: new(testEntity), + Localstore: new(testEntity), + Verifier: new(testEntity), + } + + t.Run("valid params", func(t *testing.T) { + s, err := NewObjectValidator(&validParams) + require.NoError(t, err) + require.NotNil(t, s) + }) + t.Run("fail on empty local storage", func(t *testing.T) { + p := validParams + p.Localstore = nil + _, err := NewObjectValidator(&p) + require.EqualError(t, err, errors.Wrap(errEmptyLocalstore, objectValidatorInstanceFailMsg).Error()) + }) + t.Run("fail on empty logger", func(t *testing.T) { + p := validParams + p.Logger = nil + _, err := NewObjectValidator(&p) + require.EqualError(t, err, errors.Wrap(errEmptyLogger, objectValidatorInstanceFailMsg).Error()) + }) +} + +func TestNewLocalIntegrityVerifier(t *testing.T) { + var ( + err error + verifier objutil.Verifier + keyVerifier = new(testEntity) + ) + + _, err = NewLocalHeadIntegrityVerifier(nil) + require.EqualError(t, err, core.ErrNilOwnerKeyVerifier.Error()) + + _, err = NewLocalIntegrityVerifier(nil) + require.EqualError(t, err, core.ErrNilOwnerKeyVerifier.Error()) + + verifier, err = NewLocalHeadIntegrityVerifier(keyVerifier) + require.NoError(t, err) + require.NotNil(t, verifier) + + verifier, err = NewLocalIntegrityVerifier(keyVerifier) + require.NoError(t, err) + require.NotNil(t, verifier) +} + +func TestLocalHeadIntegrityVerifier_Verify(t *testing.T) { + var ( + ctx = context.TODO() + ownerPrivateKey = test.DecodeKey(0) + ownerPublicKey = &ownerPrivateKey.PublicKey + sessionPrivateKey = test.DecodeKey(1) + sessionPublicKey = &sessionPrivateKey.PublicKey + ) + + ownerID, err := refs.NewOwnerID(ownerPublicKey) + require.NoError(t, err) + + s, err := NewLocalIntegrityVerifier(core.NewNeoKeyVerifier()) + require.NoError(t, err) + + okItems := []func() *Object{ + // correct object w/ session token + func() *Object { + token := new(service.Token) + token.SetOwnerID(ownerID) + token.SetSessionKey(crypto.MarshalPublicKey(sessionPublicKey)) + + require.NoError(t, + service.AddSignatureWithKey( + ownerPrivateKey, + service.NewSignedSessionToken(token), + ), + ) + + obj := new(Object) + obj.AddHeader(&object.Header{ + Value: &object.Header_Token{ + Token: token, + }, + }) + + obj.SetPayload([]byte{1, 2, 3}) + addPayloadChecksum(obj) + + addHeadersChecksum(t, obj, sessionPrivateKey) + + return obj + }, + // correct object w/o session token + func() *Object { + obj := new(Object) + obj.SystemHeader.OwnerID = ownerID + obj.SetPayload([]byte{1, 2, 3}) + + addPayloadChecksum(obj) + + obj.AddHeader(&object.Header{ + Value: &object.Header_PublicKey{ + PublicKey: &object.PublicKey{ + Value: crypto.MarshalPublicKey(ownerPublicKey), + }, + }, + }) + + addHeadersChecksum(t, obj, ownerPrivateKey) + + return obj + }, + } + + failItems := []func() *Object{} + + for _, item := range okItems { + require.NoError(t, s.Verify(ctx, item())) + } + + for _, item := range failItems { + require.Error(t, s.Verify(ctx, item())) + } +} + +func addPayloadChecksum(obj *Object) { + payloadChecksum := sha256.Sum256(obj.GetPayload()) + + obj.AddHeader(&object.Header{ + Value: &object.Header_PayloadChecksum{ + PayloadChecksum: payloadChecksum[:], + }, + }) +} + +func addHeadersChecksum(t *testing.T, obj *Object, key *ecdsa.PrivateKey) { + headersData, err := objutil.MarshalHeaders(obj, len(obj.Headers)) + require.NoError(t, err) + + headersChecksum := sha256.Sum256(headersData) + + integrityHdr := new(object.IntegrityHeader) + integrityHdr.SetHeadersChecksum(headersChecksum[:]) + + require.NoError(t, service.AddSignatureWithKey(key, integrityHdr)) + + obj.AddHeader(&object.Header{ + Value: &object.Header_Integrity{ + Integrity: integrityHdr, + }, + }) +} + +func TestPayloadVerifier_Verify(t *testing.T) { + ctx := context.TODO() + verifier := new(payloadVerifier) + + t.Run("missing header", func(t *testing.T) { + obj := new(Object) + require.EqualError(t, verifier.Verify(ctx, obj), errMissingPayloadChecksumHeader.Error()) + }) + + t.Run("correct result", func(t *testing.T) { + payload := testData(t, 10) + + cs := sha256.Sum256(payload) + hdr := &object.Header_PayloadChecksum{PayloadChecksum: cs[:]} + + obj := &Object{ + Headers: []object.Header{{Value: hdr}}, + Payload: payload, + } + + require.NoError(t, verifier.Verify(ctx, obj)) + + hdr.PayloadChecksum[0]++ + require.EqualError(t, verifier.Verify(ctx, obj), errWrongPayloadChecksum.Error()) + + hdr.PayloadChecksum[0]-- + obj.Payload[0]++ + require.EqualError(t, verifier.Verify(ctx, obj), errWrongPayloadChecksum.Error()) + }) +} + +func TestLocalIntegrityVerifier_Verify(t *testing.T) { + ctx := context.TODO() + obj := new(Object) + + t.Run("head verification failure", func(t *testing.T) { + hErr := internal.Error("test error for head verifier") + + s := &localIntegrityVerifier{ + headVerifier: &testEntity{ + err: hErr, // force head verifier to return hErr + }, + } + + require.EqualError(t, s.Verify(ctx, obj), hErr.Error()) + }) + + t.Run("correct result", func(t *testing.T) { + pErr := internal.Error("test error for payload verifier") + + s := &localIntegrityVerifier{ + headVerifier: new(testEntity), + payloadVerifier: &testEntity{ + err: pErr, // force payload verifier to return hErr + }, + } + + require.EqualError(t, s.Verify(ctx, obj), pErr.Error()) + }) +} + +// testData returns size bytes of random data. +func testData(t *testing.T, size int) []byte { + res := make([]byte, size) + _, err := rand.Read(res) + require.NoError(t, err) + return res +} + +// TODO: write functionality tests diff --git a/lib/ir/info.go b/lib/ir/info.go new file mode 100644 index 000000000..991a1efad --- /dev/null +++ b/lib/ir/info.go @@ -0,0 +1,17 @@ +package ir + +// Info is a structure that groups the information +// about inner ring. +type Info struct { + nodes []Node +} + +// SetNodes is an IR node list setter. +func (s *Info) SetNodes(v []Node) { + s.nodes = v +} + +// Nodes is an IR node list getter. +func (s Info) Nodes() []Node { + return s.nodes +} diff --git a/lib/ir/info_test.go b/lib/ir/info_test.go new file mode 100644 index 000000000..6b1f3df4b --- /dev/null +++ b/lib/ir/info_test.go @@ -0,0 +1,25 @@ +package ir + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestInfo(t *testing.T) { + s := Info{} + + n1 := Node{} + n1.SetKey([]byte{1, 2, 3}) + + n2 := Node{} + n2.SetKey([]byte{4, 5, 6}) + + nodes := []Node{ + n1, + n2, + } + s.SetNodes(nodes) + + require.Equal(t, nodes, s.Nodes()) +} diff --git a/lib/ir/node.go b/lib/ir/node.go new file mode 100644 index 000000000..c1a765b5d --- /dev/null +++ b/lib/ir/node.go @@ -0,0 +1,17 @@ +package ir + +// Node is a structure that groups +// the information about IR node. +type Node struct { + key []byte +} + +// SetKey is an IR node public key setter. +func (s *Node) SetKey(v []byte) { + s.key = v +} + +// Key is an IR node public key getter. +func (s Node) Key() []byte { + return s.key +} diff --git a/lib/ir/node_test.go b/lib/ir/node_test.go new file mode 100644 index 000000000..9663caf9c --- /dev/null +++ b/lib/ir/node_test.go @@ -0,0 +1,16 @@ +package ir + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNode(t *testing.T) { + s := Node{} + + key := []byte{1, 2, 3} + s.SetKey(key) + + require.Equal(t, key, s.Key()) +} diff --git a/lib/ir/storage.go b/lib/ir/storage.go new file mode 100644 index 000000000..8df21933d --- /dev/null +++ b/lib/ir/storage.go @@ -0,0 +1,94 @@ +package ir + +import ( + "bytes" + + crypto "github.com/nspcc-dev/neofs-crypto" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/pkg/errors" +) + +// Storage is an interface of the storage of info about NeoFS IR. +type Storage interface { + GetIRInfo(GetInfoParams) (*GetInfoResult, error) +} + +// GetInfoParams is a structure that groups the parameters +// for IR info receiving operation. +type GetInfoParams struct { +} + +// GetInfoResult is a structure that groups +// values returned by IR info receiving operation. +type GetInfoResult struct { + info Info +} + +// ErrNilStorage is returned by functions that expect +// a non-nil Storage, but received nil. +const ErrNilStorage = internal.Error("inner ring storage is nil") + +// SetInfo is an IR info setter. +func (s *GetInfoResult) SetInfo(v Info) { + s.info = v +} + +// Info is an IR info getter. +func (s GetInfoResult) Info() Info { + return s.info +} + +// BinaryKeyList returns the list of binary public key of IR nodes. +// +// If passed Storage is nil, ErrNilStorage returns. +func BinaryKeyList(storage Storage) ([][]byte, error) { + if storage == nil { + return nil, ErrNilStorage + } + + // get IR info + getRes, err := storage.GetIRInfo(GetInfoParams{}) + if err != nil { + return nil, errors.Wrap( + err, + "could not get information about IR", + ) + } + + nodes := getRes.Info().Nodes() + + keys := make([][]byte, 0, len(nodes)) + + for i := range nodes { + keys = append(keys, nodes[i].Key()) + } + + return keys, nil +} + +// IsInnerRingKey checks if the passed argument is the +// key of one of IR nodes. +// +// Uses BinaryKeyList function to receive the key list of IR nodes internally. +// +// If passed key slice is empty, crypto.ErrEmptyPublicKey returns immediately. +func IsInnerRingKey(storage Storage, key []byte) (bool, error) { + // check key emptiness + // TODO: summarize the void check to a full IR key-format check. + if len(key) == 0 { + return false, crypto.ErrEmptyPublicKey + } + + irKeys, err := BinaryKeyList(storage) + if err != nil { + return false, err + } + + for i := range irKeys { + if bytes.Equal(irKeys[i], key) { + return true, nil + } + } + + return false, nil +} diff --git a/lib/ir/storage_test.go b/lib/ir/storage_test.go new file mode 100644 index 000000000..71a654847 --- /dev/null +++ b/lib/ir/storage_test.go @@ -0,0 +1,101 @@ +package ir + +import ( + "testing" + + crypto "github.com/nspcc-dev/neofs-crypto" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" +) + +type testInfoReceiver struct { + keys [][]byte + + err error +} + +func (s testInfoReceiver) GetIRInfo(GetInfoParams) (*GetInfoResult, error) { + if s.err != nil { + return nil, s.err + } + + nodes := make([]Node, 0, len(s.keys)) + + for i := range s.keys { + node := Node{} + node.SetKey(s.keys[i]) + + nodes = append(nodes, node) + } + + info := Info{} + info.SetNodes(nodes) + + res := new(GetInfoResult) + res.SetInfo(info) + + return res, nil +} + +func (s *testInfoReceiver) addKey(key []byte) { + s.keys = append(s.keys, key) +} + +func TestGetInfoResult(t *testing.T) { + s := GetInfoResult{} + + info := Info{} + + n := Node{} + n.SetKey([]byte{1, 2, 3}) + + info.SetNodes([]Node{ + n, + }) + + s.SetInfo(info) + + require.Equal(t, info, s.Info()) +} + +func TestIsInnerRingKey(t *testing.T) { + var ( + res bool + err error + s = new(testInfoReceiver) + ) + + // empty public key + res, err = IsInnerRingKey(nil, nil) + require.EqualError(t, err, crypto.ErrEmptyPublicKey.Error()) + + key := []byte{1, 2, 3} + + // nil Storage + res, err = IsInnerRingKey(nil, key) + require.EqualError(t, err, ErrNilStorage.Error()) + + // force Storage to return an error + s.err = errors.New("some error") + + // Storage error + res, err = IsInnerRingKey(s, key) + require.EqualError(t, errors.Cause(err), s.err.Error()) + + // reset Storage error + s.err = nil + + // IR keys don't contain key + s.addKey(append(key, 1)) + + res, err = IsInnerRingKey(s, key) + require.NoError(t, err) + require.False(t, res) + + // IR keys contain key + s.addKey(key) + + res, err = IsInnerRingKey(s, key) + require.NoError(t, err) + require.True(t, res) +} diff --git a/lib/localstore/alias.go b/lib/localstore/alias.go new file mode 100644 index 000000000..03053f48b --- /dev/null +++ b/lib/localstore/alias.go @@ -0,0 +1,35 @@ +package localstore + +import ( + "github.com/nspcc-dev/neofs-api-go/hash" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/refs" +) + +// CID is a type alias of +// CID from refs package of neofs-api-go. +type CID = refs.CID + +// SGID is a type alias of +// SGID from refs package of neofs-api-go. +type SGID = refs.ObjectID + +// Header is a type alias of +// Header from object package of neofs-api-go. +type Header = object.Header + +// Object is a type alias of +// Object from object package of neofs-api-go. +type Object = object.Object + +// ObjectID is a type alias of +// ObjectID from refs package of neofs-api-go. +type ObjectID = refs.ObjectID + +// Address is a type alias of +// Address from refs package of neofs-api-go. +type Address = refs.Address + +// Hash is a type alias of +// Hash from hash package of neofs-api-go. +type Hash = hash.Hash diff --git a/lib/localstore/del.go b/lib/localstore/del.go new file mode 100644 index 000000000..f09f40868 --- /dev/null +++ b/lib/localstore/del.go @@ -0,0 +1,38 @@ +package localstore + +import ( + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-node/lib/metrics" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +func (l *localstore) Del(key refs.Address) error { + k, err := key.Hash() + if err != nil { + return errors.Wrap(err, "Localstore Del failed on key.Marshal") + } + + // try to fetch object for metrics + obj, err := l.Get(key) + if err != nil { + l.log.Warn("localstore Del failed on localstore.Get", zap.Error(err)) + } + + if err := l.blobBucket.Del(k); err != nil { + l.log.Warn("Localstore Del failed on BlobBucket.Del", zap.Error(err)) + } + + if err := l.metaBucket.Del(k); err != nil { + return errors.Wrap(err, "Localstore Del failed on MetaBucket.Del") + } + + if obj != nil { + l.col.UpdateContainer( + key.CID, + obj.SystemHeader.PayloadLength, + metrics.RemSpace) + } + + return nil +} diff --git a/lib/localstore/filter.go b/lib/localstore/filter.go new file mode 100644 index 000000000..a568e7d9b --- /dev/null +++ b/lib/localstore/filter.go @@ -0,0 +1,306 @@ +package localstore + +import ( + "context" + "math" + "sort" + "sync" + + "github.com/nspcc-dev/neofs-node/internal" + "github.com/pkg/errors" +) + +type ( + // FilterCode is an enumeration of filter return codes. + FilterCode int + + // PriorityFlag is an enumeration of priority flags. + PriorityFlag int + + filterPipelineSet []FilterPipeline + + // FilterFunc is a function that checks whether an ObjectMeta matches a specific criterion. + FilterFunc func(ctx context.Context, meta *ObjectMeta) *FilterResult + + // FilterResult groups of ObjectMeta filter result values. + FilterResult struct { + c FilterCode + + e error + } + + // FilterPipeline is an interface of ObjectMeta filtering tool with sub-filters and priorities. + FilterPipeline interface { + Pass(ctx context.Context, meta *ObjectMeta) *FilterResult + PutSubFilter(params SubFilterParams) error + GetPriority() uint64 + SetPriority(uint64) + GetName() string + } + + // FilterParams groups the parameters of FilterPipeline constructor. + FilterParams struct { + Name string + Priority uint64 + FilterFunc FilterFunc + } + + // SubFilterParams groups the parameters of sub-filter registration. + SubFilterParams struct { + PriorityFlag + FilterPipeline + OnIgnore FilterCode + OnPass FilterCode + OnFail FilterCode + } + + filterPipeline struct { + *sync.RWMutex + + name string + pri uint64 + filterFn FilterFunc + + maxSubPri uint64 + mSubResult map[string]map[FilterCode]FilterCode + subFilters []FilterPipeline + } +) + +const ( + // PriorityValue is a PriorityFlag of the sub-filter registration with GetPriority() value. + PriorityValue PriorityFlag = iota + + // PriorityMax is a PriorityFlag of the sub-filter registration with maximum priority. + PriorityMax + + // PriorityMin is a PriorityFlag of the sub-filter registration with minimum priority. + PriorityMin +) + +const ( + // CodeUndefined is a undefined FilterCode. + CodeUndefined FilterCode = iota + + // CodePass is a FilterCode of filter passage. + CodePass + + // CodeFail is a FilterCode of filter failure. + CodeFail + + // CodeIgnore is a FilterCode of filter ignoring. + CodeIgnore +) + +var ( + rPass = &FilterResult{ + c: CodePass, + } + + rFail = &FilterResult{ + c: CodeFail, + } + + rIgnore = &FilterResult{ + c: CodeIgnore, + } + + rUndefined = &FilterResult{ + c: CodeUndefined, + } +) + +// ResultPass returns the FilterResult with CodePass code and nil error. +func ResultPass() *FilterResult { + return rPass +} + +// ResultFail returns the FilterResult with CodeFail code and nil error. +func ResultFail() *FilterResult { + return rFail +} + +// ResultIgnore returns the FilterResult with CodeIgnore code and nil error. +func ResultIgnore() *FilterResult { + return rIgnore +} + +// ResultUndefined returns the FilterResult with CodeUndefined code and nil error. +func ResultUndefined() *FilterResult { + return rUndefined +} + +// ResultWithError returns the FilterResult with passed code and error. +func ResultWithError(c FilterCode, e error) *FilterResult { + return &FilterResult{ + e: e, + c: c, + } +} + +// Code returns the filter result code. +func (s *FilterResult) Code() FilterCode { + return s.c +} + +// Err returns the filter result error. +func (s *FilterResult) Err() error { + return s.e +} + +func (f filterPipelineSet) Len() int { return len(f) } +func (f filterPipelineSet) Less(i, j int) bool { return f[i].GetPriority() > f[j].GetPriority() } +func (f filterPipelineSet) Swap(i, j int) { f[i], f[j] = f[j], f[i] } + +func (r FilterCode) String() string { + switch r { + case CodePass: + return "PASSED" + case CodeFail: + return "FAILED" + case CodeIgnore: + return "IGNORED" + default: + return "UNDEFINED" + } +} + +// NewFilter is a FilterPipeline constructor. +func NewFilter(p *FilterParams) FilterPipeline { + return &filterPipeline{ + RWMutex: new(sync.RWMutex), + name: p.Name, + pri: p.Priority, + filterFn: p.FilterFunc, + mSubResult: make(map[string]map[FilterCode]FilterCode), + } +} + +// AllPassIncludingFilter returns FilterPipeline with sub-filters composed from parameters. +// Result filter fails with CodeFail code if any of the sub-filters returns not a CodePass code. +func AllPassIncludingFilter(name string, params ...*FilterParams) (FilterPipeline, error) { + res := NewFilter(&FilterParams{ + Name: name, + FilterFunc: SkippingFilterFunc, + }) + + for i := range params { + if err := res.PutSubFilter(SubFilterParams{ + FilterPipeline: NewFilter(params[i]), + OnIgnore: CodeFail, + OnFail: CodeFail, + }); err != nil { + return nil, errors.Wrap(err, "could not create all pass including filter") + } + } + + return res, nil +} + +func (p *filterPipeline) Pass(ctx context.Context, meta *ObjectMeta) *FilterResult { + p.RLock() + defer p.RUnlock() + + for i := range p.subFilters { + subResult := p.subFilters[i].Pass(ctx, meta) + subName := p.subFilters[i].GetName() + + cSub := subResult.Code() + + if cSub <= CodeUndefined { + return ResultUndefined() + } + + if cFin := p.mSubResult[subName][cSub]; cFin != CodeIgnore { + return ResultWithError(cFin, subResult.Err()) + } + } + + if p.filterFn == nil { + return ResultUndefined() + } + + return p.filterFn(ctx, meta) +} + +func (p *filterPipeline) PutSubFilter(params SubFilterParams) error { + p.Lock() + defer p.Unlock() + + if params.FilterPipeline == nil { + return internal.Error("could not put sub filter: empty filter pipeline") + } + + name := params.FilterPipeline.GetName() + if _, ok := p.mSubResult[name]; ok { + return errors.Errorf("filter %s is already in pipeline %s", name, p.GetName()) + } + + if params.PriorityFlag != PriorityMin { + if pri := params.FilterPipeline.GetPriority(); pri < math.MaxUint64 { + params.FilterPipeline.SetPriority(pri + 1) + } + } else { + params.FilterPipeline.SetPriority(0) + } + + switch pri := params.FilterPipeline.GetPriority(); params.PriorityFlag { + case PriorityMax: + if p.maxSubPri < math.MaxUint64 { + p.maxSubPri++ + } + + params.FilterPipeline.SetPriority(p.maxSubPri) + case PriorityValue: + if pri > p.maxSubPri { + p.maxSubPri = pri + } + } + + if params.OnFail <= 0 { + params.OnFail = CodeIgnore + } + + if params.OnIgnore <= 0 { + params.OnIgnore = CodeIgnore + } + + if params.OnPass <= 0 { + params.OnPass = CodeIgnore + } + + p.mSubResult[name] = map[FilterCode]FilterCode{ + CodePass: params.OnPass, + CodeIgnore: params.OnIgnore, + CodeFail: params.OnFail, + } + + p.subFilters = append(p.subFilters, params.FilterPipeline) + + sort.Sort(filterPipelineSet(p.subFilters)) + + return nil +} + +func (p *filterPipeline) GetPriority() uint64 { + p.RLock() + defer p.RUnlock() + + return p.pri +} +func (p *filterPipeline) SetPriority(pri uint64) { + p.Lock() + p.pri = pri + p.Unlock() +} + +func (p *filterPipeline) GetName() string { + p.RLock() + defer p.RUnlock() + + if p.name == "" { + return "FILTER_UNNAMED" + } + + return p.name +} diff --git a/lib/localstore/filter_funcs.go b/lib/localstore/filter_funcs.go new file mode 100644 index 000000000..c92610c20 --- /dev/null +++ b/lib/localstore/filter_funcs.go @@ -0,0 +1,39 @@ +package localstore + +import ( + "context" +) + +// SkippingFilterFunc is a FilterFunc that always returns result with +// CodePass code and nil error. +func SkippingFilterFunc(_ context.Context, _ *ObjectMeta) *FilterResult { + return ResultPass() +} + +// ContainerFilterFunc returns a FilterFunc that returns: +// - result with CodePass code and nil error if CID of ObjectMeta if from the CID list; +// - result with CodeFail code an nil error otherwise. +func ContainerFilterFunc(cidList []CID) FilterFunc { + return func(_ context.Context, meta *ObjectMeta) *FilterResult { + for i := range cidList { + if meta.Object.SystemHeader.CID.Equal(cidList[i]) { + return ResultPass() + } + } + + return ResultFail() + } +} + +// StoredEarlierThanFilterFunc returns a FilterFunc that returns: +// - result with CodePass code and nil error if StoreEpoch is less that argument; +// - result with CodeFail code and nil error otherwise. +func StoredEarlierThanFilterFunc(epoch uint64) FilterFunc { + return func(_ context.Context, meta *ObjectMeta) *FilterResult { + if meta.StoreEpoch < epoch { + return ResultPass() + } + + return ResultFail() + } +} diff --git a/lib/localstore/filter_test.go b/lib/localstore/filter_test.go new file mode 100644 index 000000000..c07b9fe0c --- /dev/null +++ b/lib/localstore/filter_test.go @@ -0,0 +1,38 @@ +package localstore + +import ( + "context" + "testing" + + "github.com/nspcc-dev/neofs-node/internal" + "github.com/stretchr/testify/require" +) + +func TestSkippingFilterFunc(t *testing.T) { + res := SkippingFilterFunc(context.TODO(), &ObjectMeta{}) + require.Equal(t, CodePass, res.Code()) +} + +func TestFilterResult(t *testing.T) { + var ( + r *FilterResult + c = CodePass + e = internal.Error("test error") + ) + + r = ResultPass() + require.Equal(t, CodePass, r.Code()) + require.NoError(t, r.Err()) + + r = ResultFail() + require.Equal(t, CodeFail, r.Code()) + require.NoError(t, r.Err()) + + r = ResultIgnore() + require.Equal(t, CodeIgnore, r.Code()) + require.NoError(t, r.Err()) + + r = ResultWithError(c, e) + require.Equal(t, c, r.Code()) + require.EqualError(t, r.Err(), e.Error()) +} diff --git a/lib/localstore/get.go b/lib/localstore/get.go new file mode 100644 index 000000000..4e4090f48 --- /dev/null +++ b/lib/localstore/get.go @@ -0,0 +1,30 @@ +package localstore + +import ( + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/pkg/errors" +) + +func (l *localstore) Get(key refs.Address) (*Object, error) { + var ( + err error + k, v []byte + o = new(Object) + ) + + k, err = key.Hash() + if err != nil { + return nil, errors.Wrap(err, "Localstore Get failed on key.Marshal") + } + + v, err = l.blobBucket.Get(k) + if err != nil { + return nil, errors.Wrap(err, "Localstore Get failed on blobBucket.Get") + } + + if err = o.Unmarshal(v); err != nil { + return nil, errors.Wrap(err, "Localstore Get failed on Object.Unmarshal") + } + + return o, nil +} diff --git a/lib/localstore/has.go b/lib/localstore/has.go new file mode 100644 index 000000000..831e77def --- /dev/null +++ b/lib/localstore/has.go @@ -0,0 +1,20 @@ +package localstore + +import ( + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/pkg/errors" +) + +func (l *localstore) Has(key refs.Address) (bool, error) { + var ( + err error + k []byte + ) + + k, err = key.Hash() + if err != nil { + return false, errors.Wrap(err, "localstore.Has failed on key.Marshal") + } + + return l.metaBucket.Has(k) && l.blobBucket.Has(k), nil +} diff --git a/lib/localstore/interface.go b/lib/localstore/interface.go new file mode 100644 index 000000000..b1b14b4d0 --- /dev/null +++ b/lib/localstore/interface.go @@ -0,0 +1,102 @@ +package localstore + +import ( + "context" + + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/nspcc-dev/neofs-node/lib/metrics" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +type ( + // Localstore is an interface of local object storage. + Localstore interface { + Put(context.Context, *Object) error + Get(Address) (*Object, error) + Del(Address) error + Meta(Address) (*ObjectMeta, error) + Iterator + Has(Address) (bool, error) + ObjectsCount() (uint64, error) + + object.PositionReader + Size() int64 + } + + // MetaHandler is a function that handles ObjectMeta. + MetaHandler func(*ObjectMeta) bool + + // Iterator is an interface of the iterator over local object storage. + Iterator interface { + Iterate(FilterPipeline, MetaHandler) error + } + + // ListItem is an ObjectMeta wrapper. + ListItem struct { + ObjectMeta + } + + // Params groups the parameters of + // local object storage constructor. + Params struct { + BlobBucket core.Bucket + MetaBucket core.Bucket + Logger *zap.Logger + Collector metrics.Collector + } + + localstore struct { + metaBucket core.Bucket + blobBucket core.Bucket + + log *zap.Logger + col metrics.Collector + } +) + +// ErrOutOfRange is returned when requested object payload range is +// out of object payload bounds. +var ErrOutOfRange = errors.New("range is out of payload bounds") + +// ErrEmptyMetaHandler is returned by functions that expect +// a non-nil MetaHandler, but received nil. +var ErrEmptyMetaHandler = errors.New("meta handler is nil") + +var errNilLogger = errors.New("logger is nil") + +var errNilCollector = errors.New("metrics collector is nil") + +// New is a local object storage constructor. +func New(p Params) (Localstore, error) { + switch { + case p.MetaBucket == nil: + return nil, errors.Errorf("%s bucket is nil", core.MetaStore) + case p.BlobBucket == nil: + return nil, errors.Errorf("%s bucket is nil", core.BlobStore) + case p.Logger == nil: + return nil, errNilLogger + case p.Collector == nil: + return nil, errNilCollector + } + + return &localstore{ + metaBucket: p.MetaBucket, + blobBucket: p.BlobBucket, + log: p.Logger, + col: p.Collector, + }, nil +} + +func (l localstore) Size() int64 { return l.blobBucket.Size() } + +// TODO: implement less costly method of counting. +func (l localstore) ObjectsCount() (uint64, error) { + items, err := l.metaBucket.List() + if err != nil { + return 0, err + } + + return uint64(len(items)), nil +} diff --git a/lib/localstore/list.go b/lib/localstore/list.go new file mode 100644 index 000000000..c4e1ec62c --- /dev/null +++ b/lib/localstore/list.go @@ -0,0 +1,41 @@ +package localstore + +import ( + "context" + + "go.uber.org/zap" +) + +func (l *localstore) Iterate(filter FilterPipeline, handler MetaHandler) error { + if handler == nil { + return ErrEmptyMetaHandler + } else if filter == nil { + filter = NewFilter(&FilterParams{ + Name: "SKIPPING_FILTER", + FilterFunc: SkippingFilterFunc, + }) + } + + return l.metaBucket.Iterate(func(_, v []byte) bool { + meta := new(ObjectMeta) + if err := meta.Unmarshal(v); err != nil { + l.log.Error("unmarshal meta bucket item failure", zap.Error(err)) + } else if filter.Pass(context.TODO(), meta).Code() == CodePass { + return !handler(meta) + } + return true + }) +} + +// ListItems iterates over Iterator with FilterPipeline and returns all passed items. +func ListItems(it Iterator, f FilterPipeline) ([]ListItem, error) { + res := make([]ListItem, 0) + err := it.Iterate(f, func(meta *ObjectMeta) (stop bool) { + res = append(res, ListItem{ + ObjectMeta: *meta, + }) + return + }) + + return res, err +} diff --git a/lib/localstore/localstore.pb.go b/lib/localstore/localstore.pb.go new file mode 100644 index 000000000..e6c13b373 --- /dev/null +++ b/lib/localstore/localstore.pb.go @@ -0,0 +1,462 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: lib/localstore/localstore.proto + +package localstore + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/golang/protobuf/proto" + object "github.com/nspcc-dev/neofs-api-go/object" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type ObjectMeta struct { + Object *object.Object `protobuf:"bytes,1,opt,name=Object,proto3" json:"Object,omitempty"` + PayloadHash Hash `protobuf:"bytes,2,opt,name=PayloadHash,proto3,customtype=Hash" json:"PayloadHash"` + PayloadSize uint64 `protobuf:"varint,3,opt,name=PayloadSize,proto3" json:"PayloadSize,omitempty"` + StoreEpoch uint64 `protobuf:"varint,4,opt,name=StoreEpoch,proto3" json:"StoreEpoch,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } +func (m *ObjectMeta) String() string { return proto.CompactTextString(m) } +func (*ObjectMeta) ProtoMessage() {} +func (*ObjectMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_3236d71280f5b180, []int{0} +} +func (m *ObjectMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ObjectMeta.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ObjectMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMeta.Merge(m, src) +} +func (m *ObjectMeta) XXX_Size() int { + return m.Size() +} +func (m *ObjectMeta) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMeta proto.InternalMessageInfo + +func (m *ObjectMeta) GetObject() *object.Object { + if m != nil { + return m.Object + } + return nil +} + +func (m *ObjectMeta) GetPayloadSize() uint64 { + if m != nil { + return m.PayloadSize + } + return 0 +} + +func (m *ObjectMeta) GetStoreEpoch() uint64 { + if m != nil { + return m.StoreEpoch + } + return 0 +} + +func init() { + proto.RegisterType((*ObjectMeta)(nil), "localstore.ObjectMeta") +} + +func init() { proto.RegisterFile("lib/localstore/localstore.proto", fileDescriptor_3236d71280f5b180) } + +var fileDescriptor_3236d71280f5b180 = []byte{ + // 257 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcf, 0xc9, 0x4c, 0xd2, + 0xcf, 0xc9, 0x4f, 0x4e, 0xcc, 0x29, 0x2e, 0xc9, 0x2f, 0x4a, 0x45, 0x62, 0xea, 0x15, 0x14, 0xe5, + 0x97, 0xe4, 0x0b, 0x71, 0x21, 0x44, 0xa4, 0x84, 0xf2, 0x93, 0xb2, 0x52, 0x93, 0x4b, 0xf4, 0x4b, + 0x2a, 0x0b, 0x52, 0x8b, 0x21, 0xf2, 0x52, 0xba, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, + 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0xe9, 0xf9, 0xfa, 0x60, 0xe1, 0xa4, 0xd2, 0x34, 0x30, 0x0f, 0xcc, + 0x01, 0xb3, 0x20, 0xca, 0x95, 0x96, 0x31, 0x72, 0x71, 0xf9, 0x83, 0x4d, 0xf1, 0x4d, 0x2d, 0x49, + 0x14, 0x52, 0xe3, 0x62, 0x83, 0xf0, 0x24, 0x18, 0x15, 0x18, 0x35, 0xb8, 0x8d, 0xf8, 0xf4, 0x20, + 0x56, 0xe8, 0x41, 0x44, 0x83, 0xa0, 0xb2, 0x42, 0x7a, 0x5c, 0xdc, 0x01, 0x89, 0x95, 0x39, 0xf9, + 0x89, 0x29, 0x1e, 0x89, 0xc5, 0x19, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0x3c, 0x4e, 0x3c, 0x27, 0xee, + 0xc9, 0x33, 0xdc, 0xba, 0x27, 0xcf, 0x02, 0x12, 0x0b, 0x42, 0x56, 0x20, 0xa4, 0x00, 0x57, 0x1f, + 0x9c, 0x59, 0x95, 0x2a, 0xc1, 0xac, 0xc0, 0xa8, 0xc1, 0x12, 0x84, 0x2c, 0x24, 0x24, 0xc7, 0xc5, + 0x15, 0x0c, 0xf2, 0x94, 0x6b, 0x41, 0x7e, 0x72, 0x86, 0x04, 0x0b, 0x58, 0x01, 0x92, 0x88, 0x93, + 0xc3, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe3, 0xb1, + 0x1c, 0x43, 0x94, 0x1e, 0x92, 0x4f, 0xf3, 0x8a, 0x0b, 0x92, 0x93, 0x75, 0x53, 0x52, 0xcb, 0xf4, + 0xf3, 0x52, 0xf3, 0xd3, 0x8a, 0x75, 0xf3, 0xf2, 0x53, 0x52, 0xf5, 0x51, 0x83, 0x32, 0x89, 0x0d, + 0xec, 0x63, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x20, 0xb3, 0xa4, 0x63, 0x01, 0x00, + 0x00, +} + +func (m *ObjectMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.StoreEpoch != 0 { + i = encodeVarintLocalstore(dAtA, i, uint64(m.StoreEpoch)) + i-- + dAtA[i] = 0x20 + } + if m.PayloadSize != 0 { + i = encodeVarintLocalstore(dAtA, i, uint64(m.PayloadSize)) + i-- + dAtA[i] = 0x18 + } + { + size := m.PayloadHash.Size() + i -= size + if _, err := m.PayloadHash.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintLocalstore(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintLocalstore(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintLocalstore(dAtA []byte, offset int, v uint64) int { + offset -= sovLocalstore(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ObjectMeta) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovLocalstore(uint64(l)) + } + l = m.PayloadHash.Size() + n += 1 + l + sovLocalstore(uint64(l)) + if m.PayloadSize != 0 { + n += 1 + sovLocalstore(uint64(m.PayloadSize)) + } + if m.StoreEpoch != 0 { + n += 1 + sovLocalstore(uint64(m.StoreEpoch)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovLocalstore(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozLocalstore(x uint64) (n int) { + return sovLocalstore(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ObjectMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLocalstore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLocalstore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLocalstore + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLocalstore + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &object.Object{} + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PayloadHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLocalstore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthLocalstore + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthLocalstore + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PayloadHash.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PayloadSize", wireType) + } + m.PayloadSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLocalstore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PayloadSize |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StoreEpoch", wireType) + } + m.StoreEpoch = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLocalstore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StoreEpoch |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipLocalstore(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLocalstore + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLocalstore + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipLocalstore(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLocalstore + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLocalstore + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLocalstore + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthLocalstore + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupLocalstore + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthLocalstore + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthLocalstore = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowLocalstore = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupLocalstore = fmt.Errorf("proto: unexpected end of group") +) diff --git a/lib/localstore/localstore.proto b/lib/localstore/localstore.proto new file mode 100644 index 000000000..db5ec83e9 --- /dev/null +++ b/lib/localstore/localstore.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; +option go_package = "github.com/nspcc-dev/neofs-node/lib/localstore"; + +package localstore; + +import "object/types.proto"; +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +message ObjectMeta { + object.Object Object = 1; + bytes PayloadHash = 2 [(gogoproto.nullable) = false, (gogoproto.customtype) = "Hash"]; + uint64 PayloadSize = 3; + uint64 StoreEpoch = 4; +} diff --git a/lib/localstore/localstore_test.go b/lib/localstore/localstore_test.go new file mode 100644 index 000000000..06925e6bd --- /dev/null +++ b/lib/localstore/localstore_test.go @@ -0,0 +1,501 @@ +package localstore + +import ( + "context" + "sync" + "testing" + + "github.com/google/uuid" + "github.com/nspcc-dev/neofs-api-go/container" + "github.com/nspcc-dev/neofs-api-go/hash" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-node/lib/meta" + "github.com/nspcc-dev/neofs-node/lib/metrics" + "github.com/nspcc-dev/neofs-node/lib/test" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +type ( + testBucket struct { + sync.RWMutex + items map[string][]byte + } + + fakeCollector struct { + sync.Mutex + items map[refs.CID]uint64 + } +) + +func (f *fakeCollector) Start(_ context.Context) { panic("implement me") } +func (f *fakeCollector) UpdateSpaceUsage() { panic("implement me") } +func (f *fakeCollector) SetIterator(_ meta.Iterator) { panic("implement me") } +func (f *fakeCollector) SetCounter(counter metrics.ObjectCounter) { panic("implement me") } + +func (f *fakeCollector) UpdateContainer(cid refs.CID, size uint64, op metrics.SpaceOp) { + f.Lock() + defer f.Unlock() + + switch op { + case metrics.AddSpace: + f.items[cid] += size + case metrics.RemSpace: + if val, ok := f.items[cid]; !ok || val < size { + return + } + + f.items[cid] -= size + default: + return + } +} + +func newCollector() metrics.Collector { + return &fakeCollector{ + items: make(map[refs.CID]uint64), + } +} + +func newTestBucket() *testBucket { + return &testBucket{ + items: make(map[string][]byte), + } +} + +// +// func (t *testBucket) Get(key []byte) ([]byte, error) { +// t.Lock() +// defer t.Unlock() +// +// val, ok := t.items[base58.Encode(key)] +// if !ok { +// return nil, errors.New("item not found") +// } +// +// return val, nil +// } +// +// func (t *testBucket) Set(key, value []byte) error { +// t.Lock() +// defer t.Unlock() +// +// t.items[base58.Encode(key)] = value +// +// return nil +// } +// +// func (t *testBucket) Del(key []byte) error { +// t.RLock() +// defer t.RUnlock() +// +// delete(t.items, base58.Encode(key)) +// +// return nil +// } +// +// func (t *testBucket) Has(key []byte) bool { +// panic("implement me") +// } +// +// func (t *testBucket) Size() int64 { +// panic("implement me") +// } +// +// func (t *testBucket) List() ([]core.BucketItem, error) { +// t.Lock() +// defer t.Unlock() +// +// res := make([]core.BucketItem, 0) +// +// for k, v := range t.items { +// sk, err := base58.Decode(k) +// if err != nil { +// return nil, err +// } +// +// res = append(res, core.BucketItem{ +// Key: sk, +// Val: v, +// }) +// } +// +// return res, nil +// } +// +// func (t *testBucket) Filter(core.FilterHandler) ([]core.BucketItem, error) { +// panic("implement me") +// } +// +// func (t *testBucket) Close() error { +// panic("implement me") +// } +// +// func (t *testBucket) PRead(key []byte, rng object.Range) ([]byte, error) { +// panic("implement me") +// } + +func testObject(t *testing.T) *Object { + var ( + uid refs.UUID + cid CID + ) + + t.Run("Prepare object", func(t *testing.T) { + cnr, err := container.NewTestContainer() + require.NoError(t, err) + + cid, err = cnr.ID() + require.NoError(t, err) + + id, err := uuid.NewRandom() + uid = refs.UUID(id) + require.NoError(t, err) + }) + + obj := &Object{ + SystemHeader: object.SystemHeader{ + Version: 1, + ID: uid, + CID: cid, + OwnerID: refs.OwnerID([refs.OwnerIDSize]byte{}), // TODO: avoid hardcode + }, + Headers: []Header{ + { + Value: &object.Header_UserHeader{ + UserHeader: &object.UserHeader{ + Key: "Profession", + Value: "Developer", + }, + }, + }, + { + Value: &object.Header_UserHeader{ + UserHeader: &object.UserHeader{ + Key: "Language", + Value: "GO", + }, + }, + }, + }, + } + + return obj +} + +func newLocalstore(t *testing.T) Localstore { + ls, err := New(Params{ + BlobBucket: test.Bucket(), + MetaBucket: test.Bucket(), + Logger: zap.L(), + Collector: newCollector(), + }) + require.NoError(t, err) + + return ls +} + +func TestNew(t *testing.T) { + t.Run("New localstore", func(t *testing.T) { + var err error + + _, err = New(Params{}) + require.Error(t, err) + + _, err = New(Params{ + BlobBucket: test.Bucket(), + MetaBucket: test.Bucket(), + Logger: zap.L(), + Collector: newCollector(), + }) + require.NoError(t, err) + }) +} + +func TestLocalstore_Del(t *testing.T) { + t.Run("Del method", func(t *testing.T) { + var ( + err error + ls Localstore + obj *Object + ) + + ls = newLocalstore(t) + + obj = testObject(t) + obj.SetPayload([]byte("Hello, world")) + + k := *obj.Address() + + store, ok := ls.(*localstore) + require.True(t, ok) + require.NotNil(t, store) + + metric, ok := store.col.(*fakeCollector) + require.True(t, ok) + require.NotNil(t, metric) + + err = ls.Put(context.Background(), obj) + require.NoError(t, err) + require.NotEmpty(t, obj.Payload) + require.Contains(t, metric.items, obj.SystemHeader.CID) + require.Equal(t, obj.SystemHeader.PayloadLength, metric.items[obj.SystemHeader.CID]) + + err = ls.Del(k) + require.NoError(t, err) + require.Contains(t, metric.items, obj.SystemHeader.CID) + require.Equal(t, uint64(0), metric.items[obj.SystemHeader.CID]) + + _, err = ls.Get(k) + require.Error(t, err) + }) +} + +func TestLocalstore_Get(t *testing.T) { + t.Run("Get method (default)", func(t *testing.T) { + var ( + err error + ls Localstore + obj *Object + ) + + ls = newLocalstore(t) + + obj = testObject(t) + + err = ls.Put(context.Background(), obj) + require.NoError(t, err) + + k := *obj.Address() + + o, err := ls.Get(k) + require.NoError(t, err) + require.Equal(t, obj, o) + }) +} + +func TestLocalstore_Put(t *testing.T) { + t.Run("Put method", func(t *testing.T) { + var ( + err error + ls Localstore + obj *Object + ) + + ls = newLocalstore(t) + store, ok := ls.(*localstore) + require.True(t, ok) + require.NotNil(t, store) + + metric, ok := store.col.(*fakeCollector) + require.True(t, ok) + require.NotNil(t, metric) + + obj = testObject(t) + + err = ls.Put(context.Background(), obj) + require.NoError(t, err) + require.Contains(t, metric.items, obj.SystemHeader.CID) + require.Equal(t, obj.SystemHeader.PayloadLength, metric.items[obj.SystemHeader.CID]) + + o, err := ls.Get(*obj.Address()) + require.NoError(t, err) + require.Equal(t, obj, o) + }) +} + +func TestLocalstore_List(t *testing.T) { + t.Run("List method (no filters)", func(t *testing.T) { + var ( + err error + ls Localstore + objCount = 10 + objs = make([]Object, objCount) + ) + + for i := range objs { + objs[i] = *testObject(t) + } + + ls = newLocalstore(t) + + for i := range objs { + err = ls.Put(context.Background(), &objs[i]) + require.NoError(t, err) + } + + items, err := ListItems(ls, nil) + require.NoError(t, err) + require.Len(t, items, objCount) + + for i := range items { + require.Contains(t, objs, *items[i].Object) + } + }) + + t.Run("List method ('bad' filter)", func(t *testing.T) { + var ( + err error + ls Localstore + objCount = 10 + objs = make([]Object, objCount) + ) + + for i := range objs { + objs[i] = *testObject(t) + } + + ls = newLocalstore(t) + + for i := range objs { + err = ls.Put(context.Background(), &objs[i]) + require.NoError(t, err) + } + + items, err := ListItems(ls, NewFilter(&FilterParams{ + FilterFunc: ContainerFilterFunc([]CID{}), + })) + require.NoError(t, err) + require.Len(t, items, 0) + }) + + t.Run("List method (filter by cid)", func(t *testing.T) { + var ( + err error + ls Localstore + objCount = 10 + objs = make([]Object, objCount) + ) + + for i := range objs { + objs[i] = *testObject(t) + } + + ls = newLocalstore(t) + + for i := range objs { + err = ls.Put(context.Background(), &objs[i]) + require.NoError(t, err) + } + + cidVals := []CID{objs[0].SystemHeader.CID} + + items, err := ListItems(ls, NewFilter(&FilterParams{ + FilterFunc: ContainerFilterFunc(cidVals), + })) + require.NoError(t, err) + require.Len(t, items, 1) + + for i := range items { + require.Contains(t, objs, *items[i].Object) + } + }) + + t.Run("Filter stored earlier", func(t *testing.T) { + var ( + err error + ls Localstore + objCount = 10 + objs = make([]Object, objCount) + epoch uint64 = 100 + list []ListItem + ) + + for i := range objs { + objs[i] = *testObject(t) + } + + ls = newLocalstore(t) + + ctx := context.WithValue(context.Background(), StoreEpochValue, epoch) + + for i := range objs { + err = ls.Put(ctx, &objs[i]) + require.NoError(t, err) + } + + list, err = ListItems(ls, NewFilter(&FilterParams{ + FilterFunc: StoredEarlierThanFilterFunc(epoch - 1), + })) + require.NoError(t, err) + require.Empty(t, list) + + list, err = ListItems(ls, NewFilter(&FilterParams{ + FilterFunc: StoredEarlierThanFilterFunc(epoch), + })) + require.NoError(t, err) + require.Empty(t, list) + + list, err = ListItems(ls, NewFilter(&FilterParams{ + FilterFunc: StoredEarlierThanFilterFunc(epoch + 1), + })) + require.NoError(t, err) + require.Len(t, list, objCount) + }) + + t.Run("Filter with complex filter", func(t *testing.T) { + var ( + err error + ls Localstore + objCount = 10 + objs = make([]Object, objCount) + ) + + for i := range objs { + objs[i] = *testObject(t) + } + + ls = newLocalstore(t) + + for i := range objs { + err = ls.Put(context.Background(), &objs[i]) + require.NoError(t, err) + } + + cidVals := []CID{objs[0].SystemHeader.CID} + + mainF, err := AllPassIncludingFilter("TEST_FILTER", &FilterParams{ + Name: "CID_LIST", + FilterFunc: ContainerFilterFunc(cidVals), + }) + + items, err := ListItems(ls, mainF) + require.NoError(t, err) + require.Len(t, items, 1) + }) + + t.Run("Meta info", func(t *testing.T) { + var ( + err error + ls Localstore + objCount = 10 + objs = make([]Object, objCount) + epoch uint64 = 100 + ) + + for i := range objs { + objs[i] = *testObject(t) + } + + ls = newLocalstore(t) + + ctx := context.WithValue(context.Background(), StoreEpochValue, epoch) + + for i := range objs { + err = ls.Put(ctx, &objs[i]) + require.NoError(t, err) + + meta, err := ls.Meta(*objs[i].Address()) + require.NoError(t, err) + + noPayload := objs[i] + noPayload.Payload = nil + + require.Equal(t, *meta.Object, noPayload) + require.Equal(t, meta.PayloadHash, hash.Sum(objs[i].Payload)) + require.Equal(t, meta.PayloadSize, uint64(len(objs[i].Payload))) + require.Equal(t, epoch, meta.StoreEpoch) + } + }) +} diff --git a/lib/localstore/meta.go b/lib/localstore/meta.go new file mode 100644 index 000000000..ba1acd14b --- /dev/null +++ b/lib/localstore/meta.go @@ -0,0 +1,52 @@ +package localstore + +import ( + "context" + + "github.com/nspcc-dev/neofs-api-go/hash" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/pkg/errors" +) + +// StoreEpochValue is a context key of object storing epoch number. +const StoreEpochValue = "store epoch" + +func (l *localstore) Meta(key refs.Address) (*ObjectMeta, error) { + var ( + err error + meta ObjectMeta + k, v []byte + ) + + k, err = key.Hash() + if err != nil { + return nil, errors.Wrap(err, "Localstore Meta failed on key.Marshal") + } + + v, err = l.metaBucket.Get(k) + if err != nil { + return nil, errors.Wrap(err, "Localstore Meta failed on metaBucket.Get") + } + + if err := meta.Unmarshal(v); err != nil { + return nil, errors.Wrap(err, "Localstore Metafailed on ObjectMeta.Unmarshal") + } + + return &meta, nil +} + +func metaFromObject(ctx context.Context, obj *Object) *ObjectMeta { + meta := new(ObjectMeta) + o := *obj + meta.Object = &o + meta.Object.Payload = nil + meta.PayloadSize = uint64(len(obj.Payload)) + meta.PayloadHash = hash.Sum(obj.Payload) + + storeEpoch, ok := ctx.Value(StoreEpochValue).(uint64) + if ok { + meta.StoreEpoch = storeEpoch + } + + return meta +} diff --git a/lib/localstore/put.go b/lib/localstore/put.go new file mode 100644 index 000000000..6f0421429 --- /dev/null +++ b/lib/localstore/put.go @@ -0,0 +1,47 @@ +package localstore + +import ( + "context" + + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-node/lib/metrics" + "github.com/pkg/errors" +) + +func (l *localstore) Put(ctx context.Context, obj *Object) error { + var ( + oa refs.Address + k, v []byte + err error + ) + + oa = *obj.Address() + k, err = oa.Hash() + + if err != nil { + return errors.Wrap(err, "Localstore Put failed on StorageKey.marshal") + } + + if v, err = obj.Marshal(); err != nil { + return errors.Wrap(err, "Localstore Put failed on blobValue") + } + + if err = l.blobBucket.Set(k, v); err != nil { + return errors.Wrap(err, "Localstore Put failed on BlobBucket.Set") + } + + if v, err = metaFromObject(ctx, obj).Marshal(); err != nil { + return errors.Wrap(err, "Localstore Put failed on metaValue") + } + + if err = l.metaBucket.Set(k, v); err != nil { + return errors.Wrap(err, "Localstore Put failed on MetaBucket.Set") + } + + l.col.UpdateContainer( + obj.SystemHeader.CID, + obj.SystemHeader.PayloadLength, + metrics.AddSpace) + + return nil +} diff --git a/lib/localstore/range.go b/lib/localstore/range.go new file mode 100644 index 000000000..05e43f531 --- /dev/null +++ b/lib/localstore/range.go @@ -0,0 +1,36 @@ +package localstore + +import ( + "context" + + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/pkg/errors" +) + +func (l *localstore) PRead(ctx context.Context, key Address, rng object.Range) ([]byte, error) { + var ( + err error + k, v []byte + obj Object + ) + + k, err = key.Hash() + if err != nil { + return nil, errors.Wrap(err, "Localstore Get failed on key.Marshal") + } + + v, err = l.blobBucket.Get(k) + if err != nil { + return nil, errors.Wrap(err, "Localstore Get failed on blobBucket.Get") + } + + if err := obj.Unmarshal(v); err != nil { + return nil, errors.Wrap(err, "Localstore Get failed on object.Unmarshal") + } + + if rng.Offset+rng.Length > uint64(len(obj.Payload)) { + return nil, ErrOutOfRange + } + + return obj.Payload[rng.Offset : rng.Offset+rng.Length], nil +} diff --git a/lib/meta/iterator.go b/lib/meta/iterator.go new file mode 100644 index 000000000..f5d3642ff --- /dev/null +++ b/lib/meta/iterator.go @@ -0,0 +1,15 @@ +package meta + +import ( + "github.com/nspcc-dev/neofs-api-go/object" +) + +type ( + // Iterator is an interface of the iterator over object storage. + Iterator interface { + Iterate(IterateFunc) error + } + + // IterateFunc is a function that checks whether an object matches a specific criterion. + IterateFunc func(*object.Object) error +) diff --git a/lib/metrics/meta.go b/lib/metrics/meta.go new file mode 100644 index 000000000..d11685a53 --- /dev/null +++ b/lib/metrics/meta.go @@ -0,0 +1,33 @@ +package metrics + +import ( + "sync" + + "github.com/nspcc-dev/neofs-node/lib/meta" +) + +type metaWrapper struct { + sync.Mutex + iter meta.Iterator +} + +func newMetaWrapper() *metaWrapper { + return &metaWrapper{} +} + +func (m *metaWrapper) changeIter(iter meta.Iterator) { + m.Lock() + m.iter = iter + m.Unlock() +} + +func (m *metaWrapper) Iterate(h meta.IterateFunc) error { + m.Lock() + defer m.Unlock() + + if m.iter == nil { + return errEmptyMetaStore + } + + return m.iter.Iterate(h) +} diff --git a/lib/metrics/metrics.go b/lib/metrics/metrics.go new file mode 100644 index 000000000..143c66ac8 --- /dev/null +++ b/lib/metrics/metrics.go @@ -0,0 +1,175 @@ +package metrics + +import ( + "context" + "sync" + "time" + + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/nspcc-dev/neofs-node/lib/meta" + "go.uber.org/zap" +) + +type ( + // Collector is an interface of the metrics collector. + Collector interface { + Start(ctx context.Context) + UpdateSpaceUsage() + + SetCounter(ObjectCounter) + SetIterator(iter meta.Iterator) + UpdateContainer(cid refs.CID, size uint64, op SpaceOp) + } + + collector struct { + log *zap.Logger + interval time.Duration + counter *counterWrapper + + sizes *syncStore + metas *metaWrapper + + updateSpaceSize func() + updateObjectCount func() + } + + // Params groups the parameters of metrics collector's constructor. + Params struct { + Options []string + Logger *zap.Logger + Interval time.Duration + MetricsStore core.Bucket + } + + // ObjectCounter is an interface of object number storage. + ObjectCounter interface { + ObjectsCount() (uint64, error) + } + + // CounterSetter is an interface of ObjectCounter container. + CounterSetter interface { + SetCounter(ObjectCounter) + } + + counterWrapper struct { + sync.Mutex + counter ObjectCounter + } +) + +const ( + errEmptyCounter = internal.Error("empty object counter") + errEmptyLogger = internal.Error("empty logger") + errEmptyMetaStore = internal.Error("empty meta store") + errEmptyMetricsStore = internal.Error("empty metrics store") + + defaultMetricsInterval = 5 * time.Second +) + +// New constructs metrics collector and returns Collector interface. +func New(p Params) (Collector, error) { + switch { + case p.Logger == nil: + return nil, errEmptyLogger + case p.MetricsStore == nil: + return nil, errEmptyMetricsStore + } + + if p.Interval <= 0 { + p.Interval = defaultMetricsInterval + } + + metas := newMetaWrapper() + sizes := newSyncStore(p.Logger, p.MetricsStore) + + sizes.Load() + + return &collector{ + log: p.Logger, + interval: p.Interval, + counter: new(counterWrapper), + + metas: metas, + sizes: sizes, + + updateSpaceSize: spaceUpdater(sizes), + updateObjectCount: metricsUpdater(p.Options), + }, nil +} + +func (c *counterWrapper) SetCounter(counter ObjectCounter) { + c.Lock() + defer c.Unlock() + + c.counter = counter +} + +func (c *counterWrapper) ObjectsCount() (uint64, error) { + c.Lock() + defer c.Unlock() + + if c.counter == nil { + return 0, errEmptyCounter + } + + return c.counter.ObjectsCount() +} + +func (c *collector) SetCounter(counter ObjectCounter) { + c.counter.SetCounter(counter) +} + +func (c *collector) SetIterator(iter meta.Iterator) { + c.metas.changeIter(iter) +} + +func (c *collector) UpdateContainer(cid refs.CID, size uint64, op SpaceOp) { + c.sizes.Update(cid, size, op) + c.updateSpaceSize() +} + +func (c *collector) UpdateSpaceUsage() { + sizes := make(map[refs.CID]uint64) + + err := c.metas.Iterate(func(obj *object.Object) error { + if !obj.IsTombstone() { + cid := obj.SystemHeader.CID + sizes[cid] += obj.SystemHeader.PayloadLength + } + + return nil + }) + + if err != nil { + c.log.Error("could not update space metrics", zap.Error(err)) + } + + c.sizes.Reset(sizes) + c.updateSpaceSize() +} + +func (c *collector) Start(ctx context.Context) { + t := time.NewTicker(c.interval) + +loop: + for { + select { + case <-ctx.Done(): + c.log.Warn("stop collecting metrics", zap.Error(ctx.Err())) + break loop + case <-t.C: + count, err := c.counter.ObjectsCount() + if err != nil { + c.log.Warn("get object count failure", zap.Error(err)) + continue loop + } + counter.Store(float64(count)) + c.updateObjectCount() + } + } + + t.Stop() +} diff --git a/lib/metrics/metrics_test.go b/lib/metrics/metrics_test.go new file mode 100644 index 000000000..7e2b585d5 --- /dev/null +++ b/lib/metrics/metrics_test.go @@ -0,0 +1,275 @@ +package metrics + +import ( + "context" + "encoding/binary" + "sync" + "testing" + "time" + + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-node/lib/meta" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +type ( + fakeCounter int + fakeIterator string + fakeMetaStore []*object.Object +) + +var ( + _ ObjectCounter = (*fakeCounter)(nil) + _ meta.Iterator = (*fakeIterator)(nil) +) + +func (f fakeCounter) ObjectsCount() (uint64, error) { + return uint64(f), nil +} + +func (f fakeIterator) Iterate(_ meta.IterateFunc) error { + if f == "" { + return nil + } + + return errors.New(string(f)) +} + +func (f fakeMetaStore) Iterate(cb meta.IterateFunc) error { + if cb == nil { + return nil + } + + for i := range f { + if err := cb(f[i]); err != nil { + return err + } + } + + return nil +} + +func TestCollector(t *testing.T) { + buck := &fakeBucket{items: make(map[uint64]int)} + + t.Run("check errors", func(t *testing.T) { + t.Run("empty logger", func(t *testing.T) { + svc, err := New(Params{MetricsStore: buck}) + require.Nil(t, svc) + require.EqualError(t, err, errEmptyLogger.Error()) + }) + + t.Run("empty metrics store", func(t *testing.T) { + svc, err := New(Params{Logger: zap.L()}) + require.Nil(t, svc) + require.EqualError(t, err, errEmptyMetricsStore.Error()) + }) + }) + + svc, err := New(Params{ + Logger: zap.L(), + MetricsStore: buck, + Options: []string{ + "/Location:Europe/Country:Russia/City:Moscow", + "/Some:Another/Key:Value", + }, + }) + + require.NoError(t, err) + require.NotNil(t, svc) + + coll, ok := svc.(*collector) + require.True(t, ok) + require.NotNil(t, coll) + + t.Run("check start", func(t *testing.T) { + coll.interval = time.Second + + t.Run("stop by context", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + wg := new(sync.WaitGroup) + wg.Add(1) + + counter.Store(-1) + + go func() { + svc.Start(ctx) + wg.Done() + }() + + cancel() + wg.Wait() + + require.Equal(t, float64(-1), counter.Load()) + }) + + t.Run("should fail on empty counter", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + wg := new(sync.WaitGroup) + wg.Add(1) + + counter.Store(0) + + go func() { + svc.Start(ctx) + wg.Done() + }() + + time.Sleep(2 * time.Second) + cancel() + wg.Wait() + + require.Equal(t, float64(0), counter.Load()) + }) + + t.Run("should success on fakeCounter", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + wg := new(sync.WaitGroup) + wg.Add(1) + + coll.SetCounter(fakeCounter(8)) + counter.Store(0) + + go func() { + svc.Start(ctx) + wg.Done() + }() + + time.Sleep(2 * time.Second) + cancel() + wg.Wait() + + require.Equal(t, float64(8), counter.Load()) + }) + }) + + t.Run("iterator", func(t *testing.T) { + { + coll.SetIterator(nil) + require.Nil(t, coll.metas.iter) + require.EqualError(t, coll.metas.Iterate(nil), errEmptyMetaStore.Error()) + } + + { + iter := fakeIterator("") + coll.SetIterator(iter) + require.Equal(t, iter, coll.metas.iter) + require.NoError(t, coll.metas.Iterate(nil)) + } + + { + iter := fakeIterator("test") + coll.SetIterator(iter) + require.Equal(t, iter, coll.metas.iter) + require.EqualError(t, coll.metas.Iterate(nil), string(iter)) + } + }) + + t.Run("add-rem space", func(t *testing.T) { + cid := refs.CID{1, 2, 3, 4, 5} + buf := make([]byte, 8) + key := keyFromBytes(cid.Bytes()) + + zero := make([]byte, 8) + size := uint64(100) + + binary.BigEndian.PutUint64(buf, size) + + { + coll.UpdateContainer(cid, size, AddSpace) + require.Len(t, coll.sizes.items, 1) + require.Len(t, buck.items, 1) + require.Contains(t, buck.items, key) + require.Contains(t, buck.kv, fakeKV{key: cid.Bytes(), val: buf}) + } + + { + coll.UpdateContainer(cid, size, RemSpace) + require.Len(t, coll.sizes.items, 1) + require.Len(t, buck.items, 1) + require.Contains(t, buck.items, key) + require.Contains(t, buck.kv, fakeKV{key: cid.Bytes(), val: zero}) + } + + { + coll.UpdateContainer(cid, size, RemSpace) + require.Len(t, coll.sizes.items, 1) + require.Len(t, buck.items, 1) + require.Contains(t, buck.kv, fakeKV{key: cid.Bytes(), val: zero}) + } + }) + + t.Run("add-rem multi thread", func(t *testing.T) { + wg := new(sync.WaitGroup) + wg.Add(10) + + size := uint64(100) + zero := make([]byte, 8) + + // reset + coll.UpdateSpaceUsage() + + for i := 0; i < 10; i++ { + cid := refs.CID{1, 2, 3, 4, byte(i)} + coll.UpdateContainer(cid, size, AddSpace) + + go func() { + coll.UpdateContainer(cid, size, RemSpace) + wg.Done() + }() + } + + wg.Wait() + + require.Len(t, coll.sizes.items, 10) + require.Len(t, buck.items, 10) + + for i := 0; i < 10; i++ { + cid := refs.CID{1, 2, 3, 4, byte(i)} + require.Contains(t, buck.kv, fakeKV{key: cid.Bytes(), val: zero}) + } + }) + + t.Run("reset buckets", func(t *testing.T) { + coll.UpdateSpaceUsage() + require.Len(t, coll.sizes.items, 0) + require.Len(t, buck.items, 0) + }) + + t.Run("reset from metaStore", func(t *testing.T) { + cid := refs.CID{1, 2, 3, 4, 5} + buf := make([]byte, 8) + key := keyFromBytes(cid.Bytes()) + size := uint64(100) + binary.BigEndian.PutUint64(buf, size) + + iter := fakeMetaStore{ + { + SystemHeader: object.SystemHeader{ + PayloadLength: size, + CID: cid, + }, + }, + + { + Headers: []object.Header{ + { + Value: &object.Header_Tombstone{Tombstone: &object.Tombstone{}}, + }, + }, + }, + } + + coll.SetIterator(iter) + + coll.UpdateSpaceUsage() + require.Len(t, coll.sizes.items, 1) + require.Len(t, buck.items, 1) + + require.Contains(t, buck.items, key) + require.Contains(t, buck.kv, fakeKV{key: cid.Bytes(), val: buf}) + }) +} diff --git a/lib/metrics/prometeus.go b/lib/metrics/prometeus.go new file mode 100644 index 000000000..438e85f56 --- /dev/null +++ b/lib/metrics/prometeus.go @@ -0,0 +1,83 @@ +package metrics + +import ( + "strings" + + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/atomic" +) + +const ( + locationLabel = "location" + countryLabel = "country" + cityLabel = "city" + + containerLabel = "cid" +) + +var ( + objectsCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "neofs", + Name: "count_objects_on_node", + Help: "Number of objects stored on this node", + }, []string{locationLabel, countryLabel, cityLabel}) + + spaceCounter = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "neofs", + Name: "container_space_sizes", + Help: "Space allocated by ContainerID", + }, []string{containerLabel}) + + counter = atomic.NewFloat64(0) +) + +func init() { + prometheus.MustRegister( + objectsCount, + spaceCounter, + ) +} + +func spaceUpdater(m *syncStore) func() { + return func() { + m.mutex.RLock() + for cid := range m.items { + spaceCounter. + With(prometheus.Labels{ + containerLabel: cid.String(), + }). + Set(float64(m.items[cid])) + } + m.mutex.RUnlock() + } +} + +func metricsUpdater(opts []string) func() { + var ( + locationCode string + countryCode string + cityCode string + ) + + for i := range opts { + ss := strings.Split(opts[i], "/") + for j := range ss { + switch s := strings.SplitN(ss[j], ":", 2); strings.ToLower(s[0]) { + case locationLabel: + locationCode = s[1] + case countryLabel: + countryCode = s[1] + case cityLabel: + cityCode = s[1] + } + } + } + + return func() { + objectsCount.With(prometheus.Labels{ + locationLabel: locationCode, + countryLabel: countryCode, + cityLabel: cityCode, + }).Set(counter.Load()) + } +} diff --git a/lib/metrics/store.go b/lib/metrics/store.go new file mode 100644 index 000000000..85f72434c --- /dev/null +++ b/lib/metrics/store.go @@ -0,0 +1,122 @@ +package metrics + +import ( + "encoding/binary" + "encoding/hex" + "sync" + + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-node/lib/core" + "go.uber.org/zap" +) + +type ( + syncStore struct { + log *zap.Logger + store core.Bucket + mutex sync.RWMutex + items map[refs.CID]uint64 + } + + // SpaceOp is an enumeration of space size operations. + SpaceOp int +) + +const ( + _ SpaceOp = iota + + // AddSpace is a SpaceOp of space size increasing. + AddSpace + + // RemSpace is a SpaceOp of space size decreasing. + RemSpace +) + +func newSyncStore(log *zap.Logger, store core.Bucket) *syncStore { + return &syncStore{ + log: log, + store: store, + items: make(map[refs.CID]uint64), + } +} + +func (m *syncStore) Load() { + m.mutex.Lock() + defer m.mutex.Unlock() + + _ = m.store.Iterate(func(key, val []byte) bool { + cid, err := refs.CIDFromBytes(key) + if err != nil { + m.log.Error("could not load space value", zap.Error(err)) + return true + } + + m.items[cid] += binary.BigEndian.Uint64(val) + return true + }) +} + +func (m *syncStore) Reset(items map[refs.CID]uint64) { + m.mutex.Lock() + defer m.mutex.Unlock() + + m.items = items + if items == nil { + m.items = make(map[refs.CID]uint64) + } + + keys, err := m.store.List() + if err != nil { + m.log.Error("could not fetch keys space metrics", zap.Error(err)) + return + } + + // cleanup metrics store + for i := range keys { + if err := m.store.Del(keys[i]); err != nil { + cid := hex.EncodeToString(keys[i]) + m.log.Error("could not remove key", + zap.String("cid", cid), + zap.Error(err)) + } + } + + buf := make([]byte, 8) + + for cid := range items { + binary.BigEndian.PutUint64(buf, items[cid]) + + if err := m.store.Set(cid.Bytes(), buf); err != nil { + m.log.Error("could not store space value", + zap.Stringer("cid", cid), + zap.Error(err)) + } + } +} + +func (m *syncStore) Update(cid refs.CID, size uint64, op SpaceOp) { + m.mutex.Lock() + defer m.mutex.Unlock() + + switch op { + case RemSpace: + if m.items[cid] < size { + m.log.Error("space could not be negative") + return + } + + m.items[cid] -= size + case AddSpace: + m.items[cid] += size + default: + m.log.Error("unknown space operation", zap.Int("op", int(op))) + return + } + + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, m.items[cid]) + + if err := m.store.Set(cid.Bytes(), buf); err != nil { + m.log.Error("could not update space size", zap.Int("op", int(op))) + } +} diff --git a/lib/metrics/store_test.go b/lib/metrics/store_test.go new file mode 100644 index 000000000..2827308ec --- /dev/null +++ b/lib/metrics/store_test.go @@ -0,0 +1,156 @@ +package metrics + +import ( + "sync" + "testing" + + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/spaolacci/murmur3" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +type ( + fakeKV struct { + key []byte + val []byte + } + + fakeBucket struct { + sync.RWMutex + kv []fakeKV + items map[uint64]int + } +) + +var _ core.Bucket = (*fakeBucket)(nil) + +func keyFromBytes(b []byte) uint64 { + return murmur3.Sum64(b) +} + +func (f *fakeBucket) Set(key, value []byte) error { + f.Lock() + defer f.Unlock() + + var ( + id int + ok bool + uid = keyFromBytes(key) + ) + + if id, ok = f.items[uid]; !ok || id >= len(f.kv) { + id = len(f.kv) + f.items[uid] = id + f.kv = append(f.kv, fakeKV{ + key: key, + val: value, + }) + + return nil + } + + f.kv[id] = fakeKV{ + key: key, + val: value, + } + + return nil +} + +func (f *fakeBucket) Del(key []byte) error { + f.Lock() + defer f.Unlock() + + delete(f.items, keyFromBytes(key)) + + return nil +} + +func (f *fakeBucket) List() ([][]byte, error) { + f.RLock() + defer f.RUnlock() + + items := make([][]byte, 0, len(f.items)) + for _, id := range f.items { + // ignore unknown KV + if id >= len(f.kv) { + continue + } + + items = append(items, f.kv[id].key) + } + + return items, nil +} + +func (f *fakeBucket) Iterate(handler core.FilterHandler) error { + f.Lock() + defer f.Unlock() + + for _, id := range f.items { + // ignore unknown KV + if id >= len(f.kv) { + continue + } + + kv := f.kv[id] + + if !handler(kv.key, kv.val) { + break + } + } + + return nil +} + +func (f *fakeBucket) Get(_ []byte) ([]byte, error) { panic("implement me") } +func (f *fakeBucket) Has(_ []byte) bool { panic("implement me") } +func (f *fakeBucket) Size() int64 { panic("implement me") } +func (f *fakeBucket) Close() error { panic("implement me") } + +func TestSyncStore(t *testing.T) { + buck := &fakeBucket{items: make(map[uint64]int)} + sizes := newSyncStore(zap.L(), buck) + + for i := 0; i < 10; i++ { + cid := refs.CID{0, 0, 0, byte(i)} + require.NoError(t, buck.Set(cid.Bytes(), []byte{1, 2, 3, 4, 5, 6, 7, byte(i)})) + } + + t.Run("load", func(t *testing.T) { + sizes.Load() + require.Len(t, sizes.items, len(buck.items)) + }) + + t.Run("reset", func(t *testing.T) { + sizes.Reset(nil) + require.Len(t, sizes.items, 0) + }) + + t.Run("update", func(t *testing.T) { + cid := refs.CID{1, 2, 3, 4, 5} + + { // add space + sizes.Update(cid, 8, AddSpace) + val, ok := sizes.items[cid] + require.True(t, ok) + require.Equal(t, uint64(8), val) + } + + { // rem space + sizes.Update(cid, 8, RemSpace) + val, ok := sizes.items[cid] + require.True(t, ok) + require.Zero(t, val) + } + + { // rem space (zero - val) + sizes.Update(cid, 8, RemSpace) + val, ok := sizes.items[cid] + require.True(t, ok) + require.Zero(t, val) + } + }) +} diff --git a/lib/muxer/listener.go b/lib/muxer/listener.go new file mode 100644 index 000000000..9ba669951 --- /dev/null +++ b/lib/muxer/listener.go @@ -0,0 +1,51 @@ +package muxer + +import ( + "net" + + manet "github.com/multiformats/go-multiaddr-net" + "github.com/pkg/errors" +) + +type netListenerAdapter struct { + manet.Listener +} + +var errNothingAccept = errors.New("nothing to accept") + +// Accept waits for and returns the next connection to the listener. +func (l *netListenerAdapter) Accept() (net.Conn, error) { + if l.Listener == nil { + return nil, errNothingAccept + } + + return l.Listener.Accept() +} + +// Close closes the listener. +// Any blocked Accept operations will be unblocked and return errors. +func (l *netListenerAdapter) Close() error { + if l.Listener == nil { + return nil + } + + return l.Listener.Close() +} + +// Addr returns the net.Listener's network address. +func (l *netListenerAdapter) Addr() net.Addr { + if l.Listener == nil { + return (*net.TCPAddr)(nil) + } + + return l.Listener.Addr() +} + +// NetListener turns this Listener into a net.Listener. +// +// * Connections returned from Accept implement multiaddr-net Conn. +// * Calling WrapNetListener on the net.Listener returned by this function will +// return the original (underlying) multiaddr-net Listener. +func NetListener(l manet.Listener) net.Listener { + return &netListenerAdapter{Listener: l} +} diff --git a/lib/muxer/muxer.go b/lib/muxer/muxer.go new file mode 100644 index 000000000..9aff7cbb0 --- /dev/null +++ b/lib/muxer/muxer.go @@ -0,0 +1,247 @@ +package muxer + +import ( + "context" + "net" + "strings" + "sync/atomic" + "time" + + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-node/lib/peers" + "github.com/soheilhy/cmux" + "github.com/valyala/fasthttp" + "go.uber.org/zap" + "google.golang.org/grpc" +) + +type ( + // StoreParams groups the parameters of network connections muxer constructor. + Params struct { + Logger *zap.Logger + API *fasthttp.Server + Address multiaddr.Multiaddr + ShutdownTTL time.Duration + P2P *grpc.Server + Peers peers.Interface + } + + // Mux is an interface of network connections muxer. + Mux interface { + Start(ctx context.Context) + Stop() + } + + muxer struct { + peers peers.Interface + maddr multiaddr.Multiaddr + run *int32 + lis net.Listener + log *zap.Logger + ttl time.Duration + + p2p *grpc.Server + api *fasthttp.Server + + done chan struct{} + } +) + +const ( + // we close listener, that's why we ignore this errors + errClosedConnection = "use of closed network connection" + errMuxListenerClose = "mux: listener closed" + errHTTPServerClosed = "http: Server closed" +) + +var ( + ignoredErrors = []string{ + errClosedConnection, + errMuxListenerClose, + errHTTPServerClosed, + } +) + +// New constructs network connections muxer and returns Mux interface. +func New(p Params) Mux { + return &muxer{ + maddr: p.Address, + ttl: p.ShutdownTTL, + run: new(int32), + api: p.API, + p2p: p.P2P, + log: p.Logger, + peers: p.Peers, + done: make(chan struct{}), + } +} + +func needCatch(err error) bool { + if err == nil || containsErr(err) { + return false + } + + return true +} + +func containsErr(err error) bool { + for _, msg := range ignoredErrors { + if strings.Contains(err.Error(), msg) { + return true + } + } + + return false +} + +func (m *muxer) Start(ctx context.Context) { + var err error + + // if already started - ignore + if !atomic.CompareAndSwapInt32(m.run, 0, 1) { + m.log.Warn("already started") + return + } else if m.lis != nil { + m.log.Info("try close old listener") + if err = m.lis.Close(); err != nil { + m.log.Fatal("could not close old listener", + zap.Error(err)) + } + } + + lis, err := m.peers.Listen(m.maddr) + if err != nil { + m.log.Fatal("could not close old listener", + zap.Error(err)) + } + + m.lis = NetListener(lis) + + m.log.Info("create mux-listener", + zap.String("bind-address", m.lis.Addr().String())) + + mux := cmux.New(m.lis) + mux.HandleError(func(e error) bool { + if needCatch(e) { + m.log.Error("error-handler: something went wrong", + zap.Error(e)) + } + return true + }) + + // trpcL := mux.Match(cmux.Any()) // Any means anything that is not yet matched. + hLis := mux.Match(cmux.HTTP1Fast()) + gLis := mux.Match(cmux.HTTP2()) + pLis := mux.Match(cmux.Any()) + + m.log.Debug("delay context worker") + + go func() { + <-ctx.Done() + m.Stop() + }() + + m.log.Debug("delay tcp") + + go func() { + m.log.Debug("tcp: serve") + loop: + for { + select { + case <-ctx.Done(): + break loop + default: + } + + con, err := pLis.Accept() + if err != nil { + break loop + } + + _ = con.Close() + } + + m.log.Debug("tcp: stopped") + }() + + m.log.Debug("delay p2p") + + go func() { + if m.p2p == nil { + m.log.Info("p2p: service is empty") + return + } + + m.log.Debug("p2p: serve") + + if err := m.p2p.Serve(gLis); needCatch(err) { + m.log.Error("p2p: something went wrong", + zap.Error(err)) + } + + m.log.Debug("p2p: stopped") + }() + + m.log.Debug("delay api") + + go func() { + if m.api == nil { + m.log.Info("api: service is empty") + return + } + + m.log.Debug("api: serve") + + if err := m.api.Serve(hLis); needCatch(err) { + m.log.Error("rpc: something went wrong", + zap.Error(err)) + } + + m.log.Debug("rpc: stopped") + }() + + m.log.Debug("delay serve") + + go func() { + defer func() { close(m.done) }() + + m.log.Debug("mux: serve") + + if err := mux.Serve(); needCatch(err) { + m.log.Fatal("mux: something went wrong", + zap.Error(err)) + } + + m.log.Debug("mux: stopped") + }() +} + +func (m *muxer) Stop() { + if !atomic.CompareAndSwapInt32(m.run, 1, 0) { + m.log.Warn("already stopped") + return + } + + if err := m.lis.Close(); err != nil { + m.log.Error("could not close connection", + zap.Error(err)) + } + + m.log.Debug("lis: close ok") + + <-m.done // muxer stopped + + if m.api != nil { + if err := m.api.Shutdown(); needCatch(err) { + m.log.Error("api: could not shutdown", + zap.Error(err)) + } + + m.log.Debug("api: shutdown ok") + } + + if m.p2p != nil { + m.p2p.GracefulStop() + m.log.Debug("p2p: shutdown ok") + } +} diff --git a/lib/muxer/muxer_test.go b/lib/muxer/muxer_test.go new file mode 100644 index 000000000..fc728d3c0 --- /dev/null +++ b/lib/muxer/muxer_test.go @@ -0,0 +1,415 @@ +package muxer + +import ( + "context" + "net" + "net/http" + "os" + "reflect" + "strings" + "sync" + "testing" + "time" + + "bou.ke/monkey" + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-node/lib/peers" + "github.com/nspcc-dev/neofs-node/lib/test" + "github.com/nspcc-dev/neofs-node/lib/transport" + "github.com/pkg/errors" + "github.com/soheilhy/cmux" + "github.com/spf13/viper" + "github.com/stretchr/testify/require" + "github.com/valyala/fasthttp" + "go.uber.org/atomic" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "google.golang.org/grpc" +) + +type ( + errListener struct { + net.TCPListener + } + + syncListener struct { + sync.Mutex + net.Listener + } + + errMuxer struct { + handleError func(error) bool + } + + testWriter struct{} + + // service is used to implement GreaterServer. + service struct{} +) + +const MIMEApplicationJSON = "application/json" + +// Hello is simple handler +func (*service) Hello(ctx context.Context, req *HelloRequest) (*HelloResponse, error) { + return &HelloResponse{ + Message: "Hello " + req.Name, + }, nil +} + +func (testWriter) Sync() error { return nil } +func (testWriter) Write(p []byte) (n int, err error) { return len(p), nil } + +func (errMuxer) Match(...cmux.Matcher) net.Listener { + return &errListener{} +} + +func (errMuxer) MatchWithWriters(...cmux.MatchWriter) net.Listener { + return &errListener{} +} + +func (errMuxer) Serve() error { + return errors.New("cmux.Serve error") +} + +func (e *errMuxer) HandleError(h cmux.ErrorHandler) { + e.handleError = h +} + +func (errMuxer) SetReadTimeout(time.Duration) { + panic("implement me") +} + +func (l *syncListener) Close() error { + l.Lock() + err := l.Listener.Close() + l.Unlock() + return err +} + +func (errListener) Close() error { return errors.New("close error") } + +func testMultiAddr(is *require.Assertions) multiaddr.Multiaddr { + mAddr, err := multiaddr.NewMultiaddr("/ip4/0.0.0.0/tcp/0") + is.NoError(err) + return mAddr +} + +func testPeers(is *require.Assertions, a multiaddr.Multiaddr) peers.Interface { + s, err := peers.New(peers.Params{ + Address: a, + Transport: transport.New(5, time.Second), + Logger: test.NewTestLogger(false), + }) + is.NoError(err) + return s +} + +func testLogger() *zap.Logger { + encoderCfg := zapcore.EncoderConfig{ + MessageKey: "msg", + LevelKey: "level", + NameKey: "logger", + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + } + core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderCfg), testWriter{}, zap.DPanicLevel) + return zap.New(core).WithOptions() +} + +func testHTTPServer() *fasthttp.Server { + return &fasthttp.Server{Handler: func(ctx *fasthttp.RequestCtx) {}} +} + +func TestSuite(t *testing.T) { + t.Run("it should run, stop and not panic", func(t *testing.T) { + var ( + is = require.New(t) + v = viper.New() + g = grpc.NewServer() + l = testLogger() + a = testMultiAddr(is) + s = time.Second + err error + ) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + v.SetDefault("api.address", "/ip4/0.0.0.0/tcp/0") + v.SetDefault("api.shutdown_timeout", time.Second) + + m := New(Params{ + Logger: l, + Address: a, + ShutdownTTL: s, + API: testHTTPServer(), + P2P: g, + Peers: testPeers(is, a), + }) + + is.NotPanics(func() { + m.Start(ctx) + }) + + res, err := http.Post("http://"+m.(*muxer).lis.Addr().String(), MIMEApplicationJSON, strings.NewReader(`{ + "jsonrpc": "2.0", + "id": 1 + "method": "get_version", + "params": [], + }`)) + is.NoError(err) + defer res.Body.Close() + + time.Sleep(100 * time.Millisecond) + + is.NotPanics(m.Stop) + }) + + t.Run("it should work with gRPC", func(t *testing.T) { + var ( + is = require.New(t) + g = grpc.NewServer() + l = testLogger() + s = time.Second + err error + ) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + addr, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/63090") + is.NoError(err) + + ps := testPeers(is, addr) + + RegisterGreeterServer(g, &service{}) + + m := New(Params{ + Logger: l, + Address: addr, + ShutdownTTL: s, + P2P: g, + Peers: ps, + }) + + is.NotPanics(func() { + m.Start(ctx) + }) + + con, err := ps.GRPCConnection(ctx, addr, false) + is.NoError(err) + + res, err := NewGreeterClient(con).Hello(ctx, &HelloRequest{Name: "test"}) + is.NoError(err) + is.Contains(res.Message, "test") + + time.Sleep(100 * time.Millisecond) + + is.NotPanics(m.Stop) + }) + + t.Run("it should not start if already started", func(t *testing.T) { + var ( + is = require.New(t) + g = grpc.NewServer() + l = testLogger() + a = testMultiAddr(is) + s = time.Second + ) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + m := New(Params{ + Logger: l, + Address: a, + ShutdownTTL: s, + API: testHTTPServer(), + P2P: g, + Peers: testPeers(is, a), + }) + is.NotNil(m) + + mux, ok := m.(*muxer) + is.True(ok) + is.NotNil(mux) + + *mux.run = 1 + + is.NotPanics(func() { + mux.Start(ctx) + }) + + *mux.run = 0 + + is.NotPanics(mux.Stop) + }) + + t.Run("it should fail on close listener", func(t *testing.T) { + var ( + is = require.New(t) + g = grpc.NewServer() + l = testLogger() + a = testMultiAddr(is) + s = time.Second + ) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + m := New(Params{ + Logger: l, + Address: a, + ShutdownTTL: s, + API: testHTTPServer(), + P2P: g, + Peers: testPeers(is, a), + }) + is.NotNil(m) + + mux, ok := m.(*muxer) + is.True(ok) + is.NotNil(mux) + + mux.lis = &errListener{} + + exit := atomic.NewInt32(0) + + monkey.Patch(os.Exit, func(v int) { exit.Store(int32(v)) }) + + is.NotPanics(func() { + mux.Start(ctx) + }) + is.Equal(int32(1), exit.Load()) + }) + + t.Run("it should fail on create/close Listener without handlers", func(t *testing.T) { + var ( + is = require.New(t) + l = testLogger() + a = testMultiAddr(is) + err error + ) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mux := new(muxer) + mux.log = l + mux.peers = testPeers(is, a) + mux.run = new(int32) + mux.done = make(chan struct{}) + mux.maddr, err = multiaddr.NewMultiaddr("/ip4/1.1.1.1/tcp/2") + is.NoError(err) + + mux.lis, err = net.ListenTCP("tcp", nil) + is.NoError(err) + + exit := atomic.NewInt32(0) + monkey.Patch(os.Exit, func(v int) { + exit.Store(int32(v)) + }) + + m := &errMuxer{handleError: func(e error) bool { return true }} + monkey.Patch(cmux.New, func(net.Listener) cmux.CMux { + // prevent panic: + mux.lis, err = net.ListenTCP("tcp", nil) + return m + }) + + mux.Start(ctx) + // c.So(mux.Start, ShouldNotPanic) + + m.handleError(errors.New("test")) + + is.Equal(int32(1), exit.Load()) + + mux.lis = &errListener{} + *mux.run = 1 + + is.NotPanics(mux.Stop) + }) + + t.Run("it should fail on create/close Listener with handlers", func(t *testing.T) { + var ( + is = require.New(t) + g = grpc.NewServer() + l = testLogger() + a = testMultiAddr(is) + err error + ) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mux := new(muxer) + mux.api = testHTTPServer() + mux.p2p = g + mux.log = l + mux.peers = testPeers(is, a) + mux.run = new(int32) + mux.done = make(chan struct{}) + mux.maddr, err = multiaddr.NewMultiaddr("/ip4/1.1.1.1/tcp/2") + is.NoError(err) + + mu := new(sync.Mutex) + + exit := atomic.NewInt32(0) + monkey.Patch(os.Exit, func(v int) { + exit.Store(int32(v)) + + mu.Lock() + if l, ok := mux.lis.(*syncListener); ok { + l.Lock() + l.Listener, _ = net.ListenTCP("tcp", nil) + l.Unlock() + } + mu.Unlock() + }) + + m := &errMuxer{handleError: func(e error) bool { return true }} + monkey.Patch(cmux.New, func(net.Listener) cmux.CMux { + // prevent panic: + return m + }) + + is.NotPanics(func() { + mux.Start(ctx) + }) + + m.handleError(errors.New("test")) + + is.Equal(int32(1), exit.Load()) + + mu.Lock() + mux.lis = &syncListener{Listener: &errListener{}} + mu.Unlock() + *mux.run = 1 + + monkey.PatchInstanceMethod(reflect.TypeOf(&http.Server{}), "Shutdown", func(*http.Server, context.Context) error { + return errors.New("http.Shutdown error") + }) + + is.NotPanics(mux.Stop) + }) + + t.Run("should not panic when work with nil listener", func(t *testing.T) { + var ( + is = require.New(t) + err error + ) + + lis := NetListener(nil) + is.NotPanics(func() { + is.NoError(lis.Close()) + }) + is.NotPanics(func() { + lis.Addr() + }) + is.NotPanics(func() { + _, err = lis.Accept() + is.EqualError(err, errNothingAccept.Error()) + }) + }) +} diff --git a/lib/muxer/muxer_test.pb.go b/lib/muxer/muxer_test.pb.go new file mode 100644 index 000000000..f998ce85b --- /dev/null +++ b/lib/muxer/muxer_test.pb.go @@ -0,0 +1,600 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: lib/muxer/muxer_test.proto + +package muxer + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Request message example +type HelloRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HelloRequest) Reset() { *m = HelloRequest{} } +func (m *HelloRequest) String() string { return proto.CompactTextString(m) } +func (*HelloRequest) ProtoMessage() {} +func (*HelloRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_df873e89a9eb361d, []int{0} +} +func (m *HelloRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HelloRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HelloRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HelloRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_HelloRequest.Merge(m, src) +} +func (m *HelloRequest) XXX_Size() int { + return m.Size() +} +func (m *HelloRequest) XXX_DiscardUnknown() { + xxx_messageInfo_HelloRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_HelloRequest proto.InternalMessageInfo + +func (m *HelloRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type HelloResponse struct { + Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HelloResponse) Reset() { *m = HelloResponse{} } +func (m *HelloResponse) String() string { return proto.CompactTextString(m) } +func (*HelloResponse) ProtoMessage() {} +func (*HelloResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_df873e89a9eb361d, []int{1} +} +func (m *HelloResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HelloResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HelloResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HelloResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_HelloResponse.Merge(m, src) +} +func (m *HelloResponse) XXX_Size() int { + return m.Size() +} +func (m *HelloResponse) XXX_DiscardUnknown() { + xxx_messageInfo_HelloResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_HelloResponse proto.InternalMessageInfo + +func (m *HelloResponse) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func init() { + proto.RegisterType((*HelloRequest)(nil), "muxer.HelloRequest") + proto.RegisterType((*HelloResponse)(nil), "muxer.HelloResponse") +} + +func init() { proto.RegisterFile("lib/muxer/muxer_test.proto", fileDescriptor_df873e89a9eb361d) } + +var fileDescriptor_df873e89a9eb361d = []byte{ + // 202 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xca, 0xc9, 0x4c, 0xd2, + 0xcf, 0x2d, 0xad, 0x48, 0x2d, 0x82, 0x90, 0xf1, 0x25, 0xa9, 0xc5, 0x25, 0x7a, 0x05, 0x45, 0xf9, + 0x25, 0xf9, 0x42, 0xac, 0x60, 0x11, 0x25, 0x25, 0x2e, 0x1e, 0x8f, 0xd4, 0x9c, 0x9c, 0xfc, 0xa0, + 0xd4, 0xc2, 0xd2, 0xd4, 0xe2, 0x12, 0x21, 0x21, 0x2e, 0x96, 0xbc, 0xc4, 0xdc, 0x54, 0x09, 0x46, + 0x05, 0x46, 0x0d, 0xce, 0x20, 0x30, 0x5b, 0x49, 0x93, 0x8b, 0x17, 0xaa, 0xa6, 0xb8, 0x20, 0x3f, + 0xaf, 0x38, 0x55, 0x48, 0x82, 0x8b, 0x3d, 0x37, 0xb5, 0xb8, 0x38, 0x31, 0x1d, 0xa6, 0x0e, 0xc6, + 0x35, 0xb2, 0xe5, 0x62, 0x77, 0x2f, 0x4a, 0x4d, 0x2d, 0x49, 0x2d, 0x12, 0x32, 0xe2, 0x62, 0x05, + 0xeb, 0x12, 0x12, 0xd6, 0x03, 0x5b, 0xa5, 0x87, 0x6c, 0x8f, 0x94, 0x08, 0xaa, 0x20, 0xc4, 0x60, + 0x27, 0xeb, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc6, + 0x63, 0x39, 0x86, 0x28, 0xcd, 0xf4, 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0xfd, + 0xbc, 0xe2, 0x82, 0xe4, 0x64, 0xdd, 0x94, 0xd4, 0x32, 0xfd, 0xbc, 0xd4, 0xfc, 0xb4, 0x62, 0xdd, + 0xbc, 0xfc, 0x94, 0x54, 0x7d, 0xb8, 0x17, 0x93, 0xd8, 0xc0, 0x1e, 0x33, 0x06, 0x04, 0x00, 0x00, + 0xff, 0xff, 0xbb, 0x65, 0x62, 0xb2, 0xf6, 0x00, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GreeterClient is the client API for Greeter service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GreeterClient interface { + Hello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloResponse, error) +} + +type greeterClient struct { + cc *grpc.ClientConn +} + +func NewGreeterClient(cc *grpc.ClientConn) GreeterClient { + return &greeterClient{cc} +} + +func (c *greeterClient) Hello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloResponse, error) { + out := new(HelloResponse) + err := c.cc.Invoke(ctx, "/muxer.Greeter/Hello", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// GreeterServer is the server API for Greeter service. +type GreeterServer interface { + Hello(context.Context, *HelloRequest) (*HelloResponse, error) +} + +// UnimplementedGreeterServer can be embedded to have forward compatible implementations. +type UnimplementedGreeterServer struct { +} + +func (*UnimplementedGreeterServer) Hello(ctx context.Context, req *HelloRequest) (*HelloResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Hello not implemented") +} + +func RegisterGreeterServer(s *grpc.Server, srv GreeterServer) { + s.RegisterService(&_Greeter_serviceDesc, srv) +} + +func _Greeter_Hello_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HelloRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GreeterServer).Hello(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/muxer.Greeter/Hello", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GreeterServer).Hello(ctx, req.(*HelloRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Greeter_serviceDesc = grpc.ServiceDesc{ + ServiceName: "muxer.Greeter", + HandlerType: (*GreeterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Hello", + Handler: _Greeter_Hello_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "lib/muxer/muxer_test.proto", +} + +func (m *HelloRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HelloRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HelloRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintMuxerTest(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HelloResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HelloResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HelloResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintMuxerTest(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintMuxerTest(dAtA []byte, offset int, v uint64) int { + offset -= sovMuxerTest(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *HelloRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovMuxerTest(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *HelloResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Message) + if l > 0 { + n += 1 + l + sovMuxerTest(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovMuxerTest(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMuxerTest(x uint64) (n int) { + return sovMuxerTest(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *HelloRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMuxerTest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HelloRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HelloRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMuxerTest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMuxerTest + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMuxerTest + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMuxerTest(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMuxerTest + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMuxerTest + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HelloResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMuxerTest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HelloResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HelloResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMuxerTest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMuxerTest + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMuxerTest + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMuxerTest(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMuxerTest + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMuxerTest + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMuxerTest(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMuxerTest + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMuxerTest + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMuxerTest + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMuxerTest + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMuxerTest + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMuxerTest + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMuxerTest = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMuxerTest = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMuxerTest = fmt.Errorf("proto: unexpected end of group") +) diff --git a/lib/muxer/muxer_test.proto b/lib/muxer/muxer_test.proto new file mode 100644 index 000000000..b3a723f98 --- /dev/null +++ b/lib/muxer/muxer_test.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; +option go_package = "github.com/nspcc-dev/neofs-node/lib/muxer"; + +package muxer; + +// The Greater service definition. +service Greeter { + rpc Hello(HelloRequest) returns (HelloResponse); +} + +// Request message example +message HelloRequest { + string name = 1; +} + +message HelloResponse { + string message = 1; +} diff --git a/lib/netmap/netmap.go b/lib/netmap/netmap.go new file mode 100644 index 000000000..e339d0f9b --- /dev/null +++ b/lib/netmap/netmap.go @@ -0,0 +1,392 @@ +package netmap + +import ( + "crypto/sha256" + "encoding/json" + "reflect" + "sort" + "sync" + + "github.com/nspcc-dev/neofs-api-go/bootstrap" + "github.com/nspcc-dev/netmap" + "github.com/pkg/errors" + "github.com/spaolacci/murmur3" +) + +type ( + // Bucket is an alias for github.com/nspcc-dev/netmap.Bucket + Bucket = netmap.Bucket + // SFGroup is an alias for github.com/nspcc-dev/netmap.SFGroup + SFGroup = netmap.SFGroup + // Select is an alias for github.com/nspcc-dev/netmap.Select + Select = netmap.Select + // Filter is an alias for github.com/nspcc-dev/netmap.Filter + Filter = netmap.Filter + // SimpleFilter is an alias for github.com/nspcc-dev/netmap.Filter + SimpleFilter = netmap.SimpleFilter + // PlacementRule is an alias for github.com/nspcc-dev/netmap.Filter + PlacementRule = netmap.PlacementRule + + // NetMap is a general network map structure for NeoFS + NetMap struct { + mu *sync.RWMutex + root Bucket + items Nodes + } + + // Nodes is an alias for slice of NodeInfo which is structure that describes every host + Nodes []bootstrap.NodeInfo +) + +const ( + // Separator separates key:value pairs in string representation of options. + Separator = netmap.Separator + + // NodesBucket is the name for optionless bucket containing only nodes. + NodesBucket = netmap.NodesBucket +) + +var ( + // FilterIn returns filter, which checks if value is in specified list. + FilterIn = netmap.FilterIn + // FilterNotIn returns filter, which checks if value is not in specified list. + FilterNotIn = netmap.FilterNotIn + // FilterOR returns OR combination of filters. + FilterOR = netmap.FilterOR + // FilterAND returns AND combination of filters. + FilterAND = netmap.FilterAND + // FilterEQ returns filter, which checks if value is equal to v. + FilterEQ = netmap.FilterEQ + // FilterNE returns filter, which checks if value is not equal to v. + FilterNE = netmap.FilterNE + // FilterGT returns filter, which checks if value is greater than v. + FilterGT = netmap.FilterGT + // FilterGE returns filter, which checks if value is greater or equal than v. + FilterGE = netmap.FilterGE + // FilterLT returns filter, which checks if value is less than v. + FilterLT = netmap.FilterLT + // FilterLE returns filter, which checks if value is less or equal than v. + FilterLE = netmap.FilterLE +) + +var errNetMapsConflict = errors.New("netmaps are in conflict") + +// Copy creates new slice of copied nodes. +func (n Nodes) Copy() Nodes { + res := make(Nodes, len(n)) + for i := range n { + res[i].Address = n[i].Address + res[i].Status = n[i].Status + + if n[i].PubKey != nil { + res[i].PubKey = make([]byte, len(n[i].PubKey)) + copy(res[i].PubKey, n[i].PubKey) + } + + if n[i].Options != nil { + res[i].Options = make([]string, len(n[i].Options)) + copy(res[i].Options, n[i].Options) + } + } + + return res +} + +// NewNetmap is an constructor. +func NewNetmap() *NetMap { + return &NetMap{ + items: make([]bootstrap.NodeInfo, 0), + mu: new(sync.RWMutex), + } +} + +// Equals return whether two netmap are identical. +func (n *NetMap) Equals(nm *NetMap) bool { + n.mu.RLock() + defer n.mu.RUnlock() + + return len(n.items) == len(nm.items) && + n.root.Equals(nm.root) && + reflect.DeepEqual(n.items, nm.items) +} + +// Root returns netmap root-bucket. +func (n *NetMap) Root() *Bucket { + n.mu.RLock() + cp := n.root.Copy() + n.mu.RUnlock() + + return &cp +} + +// Copy creates and returns full copy of target netmap. +func (n *NetMap) Copy() *NetMap { + n.mu.RLock() + defer n.mu.RUnlock() + + nm := NewNetmap() + nm.items = n.items.Copy() + nm.root = n.root.Copy() + + return nm +} + +type hashedItem struct { + h uint32 + info *bootstrap.NodeInfo +} + +// Normalise reorders netmap items into some canonical order. +func (n *NetMap) Normalise() *NetMap { + nm := NewNetmap() + items := n.items.Copy() + + if len(items) == 0 { + return nm + } + + itemsH := make([]hashedItem, len(n.items)) + for i := range itemsH { + itemsH[i].h = murmur3.Sum32(n.items[i].PubKey) + itemsH[i].info = &items[i] + } + + sort.Slice(itemsH, func(i, j int) bool { + if itemsH[i].h == itemsH[j].h { + return itemsH[i].info.Address < itemsH[j].info.Address + } + return itemsH[i].h < itemsH[j].h + }) + + lastHash := ^itemsH[0].h + lastAddr := "" + + for i := range itemsH { + if itemsH[i].h != lastHash || itemsH[i].info.Address != lastAddr { + _ = nm.AddNode(itemsH[i].info) + lastHash = itemsH[i].h + } + } + + return nm +} + +// Hash returns hash of n. +func (n *NetMap) Hash() (sum [32]byte) { + items := n.Normalise().Items() + w := sha256.New() + + for i := range items { + data, _ := items[i].Marshal() + _, _ = w.Write(data) + } + + s := w.Sum(nil) + copy(sum[:], s) + + return +} + +// InheritWeights calculates average capacity and minimal price, then provides buckets with IQR weight. +func (n *NetMap) InheritWeights() *NetMap { + nm := n.Copy() + + // find average capacity in the network map + meanCap := nm.root.Traverse(netmap.NewMeanAgg(), netmap.CapWeightFunc).Compute() + capNorm := netmap.NewSigmoidNorm(meanCap) + + // find minimal price in the network map + minPrice := nm.root.Traverse(netmap.NewMinAgg(), netmap.PriceWeightFunc).Compute() + priceNorm := netmap.NewReverseMinNorm(minPrice) + + // provide all buckets with + wf := netmap.NewWeightFunc(capNorm, priceNorm) + meanAF := netmap.AggregatorFactory{New: netmap.NewMeanIQRAgg} + nm.root.TraverseTree(meanAF, wf) + + return nm +} + +// Merge checks if merge is possible and then add new elements from given netmap. +func (n *NetMap) Merge(n1 *NetMap) error { + n.mu.Lock() + defer n.mu.Unlock() + + var ( + tr = make(map[uint32]netmap.Node, len(n1.items)) + items = n.items + ) + +loop: + for j := range n1.items { + for i := range n.items { + if n.items[i].Equals(n1.items[j]) { + tr[uint32(j)] = netmap.Node{ + N: uint32(i), + C: n.items[i].Capacity(), + P: n.items[i].Price(), + } + continue loop + } + } + tr[uint32(j)] = netmap.Node{ + N: uint32(len(items)), + C: n1.items[j].Capacity(), + P: n1.items[j].Price(), + } + items = append(items, n1.items[j]) + } + + root := n1.root.UpdateIndices(tr) + if n.root.CheckConflicts(root) { + return errNetMapsConflict + } + + n.items = items + n.root.Merge(root) + + return nil +} + +// FindGraph finds sub-graph filtered by given SFGroup. +func (n *NetMap) FindGraph(pivot []byte, ss ...SFGroup) (c *Bucket) { + n.mu.RLock() + defer n.mu.RUnlock() + + return n.root.FindGraph(pivot, ss...) +} + +// FindNodes finds sub-graph filtered by given SFGroup and returns all sub-graph items. +func (n *NetMap) FindNodes(pivot []byte, ss ...SFGroup) (nodes []uint32) { + n.mu.RLock() + defer n.mu.RUnlock() + + return n.root.FindNodes(pivot, ss...).Nodes() +} + +// Items return slice of all NodeInfo in netmap. +func (n *NetMap) Items() []bootstrap.NodeInfo { + n.mu.RLock() + defer n.mu.RUnlock() + + return n.items +} + +// ItemsCopy return copied slice of all NodeInfo in netmap (is it useful?). +func (n *NetMap) ItemsCopy() Nodes { + n.mu.RLock() + defer n.mu.RUnlock() + + return n.items.Copy() +} + +// Add adds node with given address and given options. +func (n *NetMap) Add(addr string, pk []byte, st bootstrap.NodeStatus, opts ...string) error { + return n.AddNode(&bootstrap.NodeInfo{Address: addr, PubKey: pk, Status: st, Options: opts}) +} + +// Update replaces netmap with given netmap. +func (n *NetMap) Update(nxt *NetMap) { + n.mu.Lock() + defer n.mu.Unlock() + + n.root = nxt.root + n.items = nxt.items +} + +// GetMaxSelection returns 'maximal container' -- subgraph which contains +// any other subgraph satisfying specified selects and filters. +func (n *NetMap) GetMaxSelection(ss []Select, fs []Filter) (r *Bucket) { + return n.root.GetMaxSelection(netmap.SFGroup{Selectors: ss, Filters: fs}) +} + +// AddNode adds to exited or new node slice of given options. +func (n *NetMap) AddNode(nodeInfo *bootstrap.NodeInfo, opts ...string) error { + n.mu.Lock() + defer n.mu.Unlock() + + info := *nodeInfo + + info.Options = append(info.Options, opts...) + + num := -1 + + // looking for existed node info item + for i := range n.items { + if n.items[i].Equals(info) { + num = i + break + } + } + // if item is not existed - add it + if num < 0 { + num = len(n.items) + n.items = append(n.items, info) + } + + return n.root.AddStrawNode(netmap.Node{ + N: uint32(num), + C: n.items[num].Capacity(), + P: n.items[num].Price(), + }, info.Options...) +} + +// GetNodesByOption returns slice of NodeInfo that has given option. +func (n *NetMap) GetNodesByOption(opts ...string) []bootstrap.NodeInfo { + n.mu.RLock() + defer n.mu.RUnlock() + + ns := n.root.GetNodesByOption(opts...) + nodes := make([]bootstrap.NodeInfo, 0, len(ns)) + + for _, info := range ns { + nodes = append(nodes, n.items[info.N]) + } + + return nodes +} + +// MarshalJSON custom marshaller. +func (n *NetMap) MarshalJSON() ([]byte, error) { + n.mu.RLock() + defer n.mu.RUnlock() + + return json.Marshal(n.items) +} + +// UnmarshalJSON custom unmarshaller. +func (n *NetMap) UnmarshalJSON(data []byte) error { + var ( + nm = NewNetmap() + items []bootstrap.NodeInfo + ) + + if err := json.Unmarshal(data, &items); err != nil { + return err + } + + for i := range items { + if err := nm.Add(items[i].Address, items[i].PubKey, items[i].Status, items[i].Options...); err != nil { + return err + } + } + + if n.mu == nil { + n.mu = new(sync.RWMutex) + } + + n.mu.Lock() + n.root = nm.root + n.items = nm.items + n.mu.Unlock() + + return nil +} + +// Size returns number of nodes in network map. +func (n *NetMap) Size() int { + n.mu.RLock() + defer n.mu.RUnlock() + + return len(n.items) +} diff --git a/lib/netmap/netmap_test.go b/lib/netmap/netmap_test.go new file mode 100644 index 000000000..1cd579b61 --- /dev/null +++ b/lib/netmap/netmap_test.go @@ -0,0 +1,261 @@ +package netmap + +import ( + "bytes" + "encoding/json" + "math/rand" + "sync" + "testing" + "time" + + "github.com/nspcc-dev/neofs-api-go/bootstrap" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/netmap" + "github.com/stretchr/testify/require" +) + +func TestNetMap_DataRace(t *testing.T) { + var ( + nm = NewNetmap() + wg = new(sync.WaitGroup) + nodes = []bootstrap.NodeInfo{ + {Address: "SPB1", Options: []string{"/Location:Europe/Country:USA"}}, + {Address: "SPB2", Options: []string{"/Location:Europe/Country:Italy"}}, + {Address: "MSK1", Options: []string{"/Location:Europe/Country:Germany"}}, + {Address: "MSK2", Options: []string{"/Location:Europe/Country:Russia"}}, + } + ) + + wg.Add(10) + for i := 0; i < 10; i++ { + go func(n int) { + for _, node := range nodes { + require.NoError(t, nm.Add(node.Address, node.PubKey, 0, node.Options...)) + // t.Logf("%02d: add node %q", n, node.Address) + } + + wg.Done() + }(i) + } + + wg.Add(3 * 10) + for i := 0; i < 10; i++ { + go func(n int) { + nm.Copy() + // t.Logf("%02d: Copy", n) + wg.Done() + }(i) + go func(n int) { + nm.Items() + // t.Logf("%02d: Items", n) + wg.Done() + }(i) + go func(n int) { + nm.Root() + // t.Logf("%02d: Root", n) + wg.Done() + }(i) + } + + wg.Wait() +} + +func TestNetMapSuite(t *testing.T) { + var ( + err error + nm1 = NewNetmap() + nodes = []bootstrap.NodeInfo{ + {Address: "SPB1", Options: []string{"/Location:Europe/Country:USA"}, Status: 1}, + {Address: "SPB2", Options: []string{"/Location:Europe/Country:Italy"}, Status: 2}, + {Address: "MSK1", Options: []string{"/Location:Europe/Country:Germany"}, Status: 3}, + {Address: "MSK2", Options: []string{"/Location:Europe/Country:Russia"}, Status: 4}, + } + ) + + for _, node := range nodes { + err = nm1.Add(node.Address, nil, node.Status, node.Options...) + require.NoError(t, err) + } + + t.Run("copy should work like expected", func(t *testing.T) { + nm2 := nm1.Copy() + require.Equal(t, nm1.root, nm2.root) + require.Equal(t, nm1.items, nm2.items) + }) + + t.Run("add node should not ignore options", func(t *testing.T) { + items := nm1.ItemsCopy() + + nm2 := NewNetmap() + err = nm2.AddNode(&items[0], "/New/Option") + require.NoError(t, err) + require.Len(t, nm2.items, 1) + require.Equal(t, append(items[0].Options, "/New/Option"), nm2.items[0].Options) + }) + + t.Run("copyItems should work like expected", func(t *testing.T) { + require.Equal(t, nm1.items, nm1.ItemsCopy()) + }) + + t.Run("marshal / unmarshal should be identical on same data", func(t *testing.T) { + var nm2 *NetMap + want, err := json.Marshal(nodes) + require.NoError(t, err) + + actual, err := json.Marshal(nm1) + require.NoError(t, err) + + require.Equal(t, want, actual) + + err = json.Unmarshal(actual, &nm2) + require.NoError(t, err) + require.Equal(t, nm1.root, nm2.root) + require.Equal(t, nm1.items, nm2.items) + }) + + t.Run("unmarshal should override existing data", func(t *testing.T) { + var nm2 *NetMap + + want, err := json.Marshal(nodes) + require.NoError(t, err) + + actual, err := json.Marshal(nm1) + require.NoError(t, err) + + require.Equal(t, want, actual) + + nm2 = nm1.Copy() + err = nm2.Add("SOMEADDR", nil, 0, "/Location:Europe/Country:USA") + require.NoError(t, err) + + err = json.Unmarshal(actual, &nm2) + require.NoError(t, err) + require.Equal(t, nm1.root, nm2.root) + require.Equal(t, nm1.items, nm2.items) + }) + + t.Run("unmarshal should fail on bad data", func(t *testing.T) { + var nm2 *NetMap + require.Error(t, json.Unmarshal([]byte(`"some bad data"`), &nm2)) + }) + + t.Run("unmarshal should fail on add nodes", func(t *testing.T) { + var nm2 *NetMap + require.Error(t, json.Unmarshal([]byte(`[{"address": "SPB1","options":["1-2-3-4"]}]`), &nm2)) + }) + + t.Run("merge two netmaps", func(t *testing.T) { + newNodes := []bootstrap.NodeInfo{ + {Address: "SPB3", Options: []string{"/Location:Europe/Country:France"}}, + } + nm2 := NewNetmap() + for _, node := range newNodes { + err = nm2.Add(node.Address, nil, 0, node.Options...) + require.NoError(t, err) + } + + err = nm2.Merge(nm1) + require.NoError(t, err) + require.Len(t, nm2.items, len(nodes)+len(newNodes)) + + ns := nm2.FindNodes([]byte("pivot"), netmap.SFGroup{ + Filters: []Filter{{Key: "Country", F: FilterEQ("Germany")}}, + Selectors: []Select{{Count: 1, Key: NodesBucket}}, + }) + require.Len(t, ns, 1) + }) + + t.Run("weighted netmaps", func(t *testing.T) { + strawNodes := []bootstrap.NodeInfo{ + {Address: "SPB2", Options: []string{"/Location:Europe/Country:Italy", "/Capacity:10", "/Price:100"}}, + {Address: "MSK1", Options: []string{"/Location:Europe/Country:Germany", "/Capacity:10", "/Price:1"}}, + {Address: "MSK2", Options: []string{"/Location:Europe/Country:Russia", "/Capacity:5", "/Price:10"}}, + {Address: "SPB1", Options: []string{"/Location:Europe/Country:France", "/Capacity:20", "/Price:2"}}, + } + nm2 := NewNetmap() + for _, node := range strawNodes { + err = nm2.Add(node.Address, nil, 0, node.Options...) + require.NoError(t, err) + } + + ns1 := nm1.FindNodes([]byte("pivot"), netmap.SFGroup{ + Selectors: []Select{{Count: 2, Key: NodesBucket}}, + }) + require.Len(t, ns1, 2) + + ns2 := nm2.FindNodes([]byte("pivot"), netmap.SFGroup{ + Selectors: []Select{{Count: 2, Key: NodesBucket}}, + }) + require.Len(t, ns2, 2) + require.NotEqual(t, ns1, ns2) + require.Equal(t, []uint32{1, 3}, ns2) + }) +} + +func TestNetMap_Normalise(t *testing.T) { + const testCount = 5 + + nodes := []bootstrap.NodeInfo{ + {Address: "SPB2", PubKey: []byte{4}, Options: []string{"/Location:Europe/Country:Italy", "/Capacity:10", "/Price:100"}}, + {Address: "MSK1", PubKey: []byte{2}, Options: []string{"/Location:Europe/Country:Germany", "/Capacity:10", "/Price:1"}}, + {Address: "MSK2", PubKey: []byte{3}, Options: []string{"/Location:Europe/Country:Russia", "/Capacity:5", "/Price:10"}}, + {Address: "SPB1", PubKey: []byte{1}, Options: []string{"/Location:Europe/Country:France", "/Capacity:20", "/Price:2"}}, + } + + add := func(nm *NetMap, indices ...int) { + for _, i := range indices { + err := nm.Add(nodes[i].Address, nodes[i].PubKey, 0, nodes[i].Options...) + require.NoError(t, err) + } + } + + indices := []int{0, 1, 2, 3} + + nm1 := NewNetmap() + add(nm1, indices...) + norm := nm1.Normalise() + + for i := 0; i < testCount; i++ { + rand.Seed(time.Now().UnixNano()) + rand.Shuffle(len(indices), func(i, j int) { indices[i], indices[j] = indices[j], indices[i] }) + + nm := NewNetmap() + add(nm, indices...) + require.Equal(t, norm, nm.Normalise()) + } + + t.Run("normalise removes duplicates", func(t *testing.T) { + before := NewNetmap() + add(before, indices...) + before.items = append(before.items, before.items...) + + nm := before.Normalise() + require.Len(t, nm.items, len(indices)) + + loop: + for i := range nodes { + for j := range nm.items { + if bytes.Equal(nm.items[j].PubKey, nodes[i].PubKey) { + continue loop + } + } + require.Fail(t, "normalized netmap does not contain '%s' node", nodes[i].Address) + } + }) +} + +func TestNodeInfo_Price(t *testing.T) { + var info bootstrap.NodeInfo + + // too small value + info = bootstrap.NodeInfo{Options: []string{"/Price:0.01048575"}} + require.Equal(t, uint64(0), info.Price()) + + // min value + info = bootstrap.NodeInfo{Options: []string{"/Price:0.01048576"}} + require.Equal(t, uint64(1), info.Price()) + + // big value + info = bootstrap.NodeInfo{Options: []string{"/Price:1000000000.666"}} + require.Equal(t, uint64(1000000000.666*1e8/object.UnitsMB), info.Price()) +} diff --git a/lib/netmap/storage.go b/lib/netmap/storage.go new file mode 100644 index 000000000..fc26bb555 --- /dev/null +++ b/lib/netmap/storage.go @@ -0,0 +1,27 @@ +package netmap + +// GetParams is a group of parameters +// for network map receiving operation. +type GetParams struct { +} + +// GetResult is a group of values +// returned by container receiving operation. +type GetResult struct { + nm *NetMap +} + +// Storage is an interface of the storage of NeoFS network map. +type Storage interface { + GetNetMap(GetParams) (*GetResult, error) +} + +// NetMap is a network map getter. +func (s GetResult) NetMap() *NetMap { + return s.nm +} + +// SetNetMap is a network map setter. +func (s *GetResult) SetNetMap(v *NetMap) { + s.nm = v +} diff --git a/lib/netmap/storage_test.go b/lib/netmap/storage_test.go new file mode 100644 index 000000000..27315f8b5 --- /dev/null +++ b/lib/netmap/storage_test.go @@ -0,0 +1,23 @@ +package netmap + +import ( + "testing" + + "github.com/nspcc-dev/neofs-api-go/bootstrap" + "github.com/stretchr/testify/require" +) + +func TestGetResult(t *testing.T) { + s := GetResult{} + + nm := NewNetmap() + require.NoError(t, + nm.AddNode(&bootstrap.NodeInfo{ + Address: "address", + PubKey: []byte{1, 2, 3}, + }), + ) + s.SetNetMap(nm) + + require.Equal(t, nm, s.NetMap()) +} diff --git a/lib/objio/range.go b/lib/objio/range.go new file mode 100644 index 000000000..183fb7398 --- /dev/null +++ b/lib/objio/range.go @@ -0,0 +1,459 @@ +package objio + +import ( + "context" + "io" + "sync" + + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-node/lib/localstore" + "github.com/pkg/errors" +) + +type ( + // Address is a type alias of + // Address from refs package of neofs-api-go. + Address = refs.Address + + // ChopperTable is an interface of RangeChopper storage. + ChopperTable interface { + PutChopper(addr Address, chopper RangeChopper) error + GetChopper(addr Address, rc RCType) (RangeChopper, error) + } + + // RangeChopper is an interface of the chooper of object payload range. + RangeChopper interface { + GetType() RCType + GetAddress() Address + Closed() bool + Chop(ctx context.Context, length, offset int64, fromStart bool) ([]RangeDescriptor, error) + } + + // RelativeReceiver is an interface of object relations controller. + RelativeReceiver interface { + Base(ctx context.Context, addr Address) (RangeDescriptor, error) + Neighbor(ctx context.Context, addr Address, left bool) (RangeDescriptor, error) + } + + // ChildLister is an interface of object children info storage. + ChildLister interface { + List(ctx context.Context, parent Address) ([]RangeDescriptor, error) + } + + // RangeDescriptor groups the information about object payload range. + RangeDescriptor struct { + Size int64 + Offset int64 + Addr Address + + LeftBound bool + RightBound bool + } + + chopCache struct { + rangeList []RangeDescriptor + } + + chopper struct { + *sync.RWMutex + ct RCType + addr Address + nr RelativeReceiver + cacheOffset int64 + cache *chopCache + } + + // ChopperParams groups the parameters of Scylla chopper. + ChopperParams struct { + RelativeReceiver RelativeReceiver + Addr Address + } + + charybdis struct { + skr *chopper + cl ChildLister + } + + // CharybdisParams groups the parameters of Charybdis chopper. + CharybdisParams struct { + Addr Address + ChildLister ChildLister + + ReadySelection []RangeDescriptor + } + + // RCType is an enumeration of object payload range chopper types. + RCType int + + chopperTable struct { + *sync.RWMutex + items map[RCType]map[string]RangeChopper + } +) + +const ( + // RCScylla is an RCType of payload range post-pouncing chopper. + RCScylla RCType = iota + + // RCCharybdis is an RCType of payload range pre-pouncing chopper. + RCCharybdis +) + +var errNilRelativeReceiver = errors.New("relative receiver is nil") + +var errEmptyObjectID = errors.New("object ID is empty") + +var errNilChildLister = errors.New("child lister is nil") + +var errNotFound = errors.New("object range chopper not found") + +var errInvalidBound = errors.New("invalid payload bounds") + +// NewChopperTable is a RangeChopper storage constructor. +func NewChopperTable() ChopperTable { + return &chopperTable{ + new(sync.RWMutex), + make(map[RCType]map[string]RangeChopper), + } +} + +// NewScylla constructs object payload range chopper that collects parts of a range on the go. +func NewScylla(p *ChopperParams) (RangeChopper, error) { + if p.RelativeReceiver == nil { + return nil, errNilRelativeReceiver + } + + if p.Addr.ObjectID.Empty() { + return nil, errEmptyObjectID + } + + return &chopper{ + RWMutex: new(sync.RWMutex), + ct: RCScylla, + nr: p.RelativeReceiver, + addr: p.Addr, + cache: &chopCache{ + rangeList: make([]RangeDescriptor, 0), + }, + }, nil +} + +// NewCharybdis constructs object payload range that pre-collects all parts of the range. +func NewCharybdis(p *CharybdisParams) (RangeChopper, error) { + if p.ChildLister == nil && len(p.ReadySelection) == 0 { + return nil, errNilChildLister + } + + if p.Addr.ObjectID.Empty() { + return nil, errEmptyObjectID + } + + cache := new(chopCache) + + if len(p.ReadySelection) > 0 { + cache.rangeList = p.ReadySelection + } + + return &charybdis{ + skr: &chopper{ + RWMutex: new(sync.RWMutex), + ct: RCCharybdis, + addr: p.Addr, + cache: cache, + }, + cl: p.ChildLister, + }, nil +} + +func (ct *chopperTable) PutChopper(addr Address, chopper RangeChopper) error { + ct.Lock() + defer ct.Unlock() + + sAddr := addr.String() + chopperType := chopper.GetType() + + m, ok := ct.items[chopperType] + if !ok { + m = make(map[string]RangeChopper) + } + + if _, ok := m[sAddr]; !ok { + m[sAddr] = chopper + } + + ct.items[chopperType] = m + + return nil +} + +func (ct *chopperTable) GetChopper(addr Address, rc RCType) (RangeChopper, error) { + ct.Lock() + defer ct.Unlock() + + choppers, ok := ct.items[rc] + if !ok { + return nil, errNotFound + } + + chp, ok := choppers[addr.String()] + if !ok { + return nil, errNotFound + } + + return chp, nil +} + +func (c charybdis) GetAddress() Address { + return c.skr.addr +} + +func (c charybdis) GetType() RCType { + return c.skr.ct +} + +func (c charybdis) Closed() bool { + return len(c.skr.cache.rangeList) > 0 +} + +func (c *charybdis) devour(ctx context.Context) error { + if len(c.skr.cache.rangeList) == 0 { + rngs, err := c.cl.List(ctx, c.skr.addr) + if err != nil { + return errors.Wrap(err, "charybdis.pounce faild on children list") + } + + if ln := len(rngs); ln > 0 { + rngs[0].LeftBound = true + rngs[ln-1].RightBound = true + } + + c.skr.cache.rangeList = rngs + } + + return nil +} + +func (c *charybdis) Chop(ctx context.Context, length, offset int64, fromStart bool) ([]RangeDescriptor, error) { + if err := c.devour(ctx); err != nil { + return nil, errors.Wrap(err, "charybdis.Chop failed on devour") + } + + return c.skr.Chop(ctx, length, offset, fromStart) +} + +func (sc *chopCache) Size() (res int64) { + for i := range sc.rangeList { + res += sc.rangeList[i].Size + } + + return +} + +func (sc *chopCache) touchStart() bool { + return len(sc.rangeList) > 0 && sc.rangeList[0].LeftBound +} + +func (sc *chopCache) touchEnd() bool { + ln := len(sc.rangeList) + + return ln > 0 && sc.rangeList[ln-1].RightBound +} + +func min(a, b int64) int64 { + if a < b { + return a + } + + return b +} + +func (sc *chopCache) Chop(offset, size int64) ([]RangeDescriptor, error) { + if offset*size < 0 { + return nil, errInvalidBound + } + + if offset+size > sc.Size() { + return nil, localstore.ErrOutOfRange + } + + var ( + off int64 + res = make([]RangeDescriptor, 0) + ind int + firstOffset int64 + ) + + for i := range sc.rangeList { + diff := offset - off + if diff > sc.rangeList[i].Size { + off += sc.rangeList[i].Size + continue + } else if diff < sc.rangeList[i].Size { + ind = i + firstOffset = diff + break + } + + ind = i + 1 + + break + } + + var ( + r RangeDescriptor + num int64 + ) + + for i := ind; num < size; i++ { + cut := min(size-num, sc.rangeList[i].Size-firstOffset) + r = RangeDescriptor{ + Size: cut, + Addr: sc.rangeList[i].Addr, + + LeftBound: sc.rangeList[i].LeftBound, + RightBound: sc.rangeList[i].RightBound, + } + + if i == ind { + r.Offset = firstOffset + firstOffset = 0 + } + + if cut == size-num { + r.Size = cut + } + + res = append(res, r) + + num += cut + } + + return res, nil +} + +func (c *chopper) GetAddress() Address { + return c.addr +} + +func (c *chopper) GetType() RCType { + return c.ct +} + +func (c *chopper) Closed() bool { + return c.cache.touchStart() && c.cache.touchEnd() +} + +func (c *chopper) pounce(ctx context.Context, off int64, set bool) error { + if len(c.cache.rangeList) == 0 { + child, err := c.nr.Base(ctx, c.addr) + if err != nil { + return errors.Wrap(err, "chopper.pounce failed on cache init") + } + + c.cache.rangeList = []RangeDescriptor{child} + } + + oldOff := c.cacheOffset + + defer func() { + if !set { + c.cacheOffset = oldOff + } + }() + + var ( + cacheSize = c.cache.Size() + v = c.cacheOffset + off + ) + + switch { + case v >= 0 && v <= cacheSize: + c.cacheOffset = v + return nil + case v < 0 && c.cache.touchStart(): + c.cacheOffset = 0 + return io.EOF + case v > cacheSize && c.cache.touchEnd(): + c.cacheOffset = cacheSize + return io.EOF + } + + var ( + alloc, written int64 + toLeft = v < 0 + procAddr Address + fPush = func(r RangeDescriptor) { + if toLeft { + c.cache.rangeList = append([]RangeDescriptor{r}, c.cache.rangeList...) + return + } + c.cache.rangeList = append(c.cache.rangeList, r) + } + ) + + if toLeft { + alloc = -v + procAddr = c.cache.rangeList[0].Addr + c.cacheOffset -= cacheSize + } else { + alloc = v - cacheSize + procAddr = c.cache.rangeList[len(c.cache.rangeList)-1].Addr + c.cacheOffset += cacheSize + } + + for written < alloc { + rng, err := c.nr.Neighbor(ctx, procAddr, toLeft) + if err != nil { + return errors.Wrap(err, "chopper.pounce failed on get neighbor") + } + + if diff := alloc - written; diff < rng.Size { + if toLeft { + rng.Offset = rng.Size - diff + } + + c.cacheOffset += diff + + fPush(rng) + + break + } + + c.cacheOffset += rng.Size + fPush(rng) + + written += rng.Size + + if written < alloc && + (rng.LeftBound && toLeft || rng.RightBound && !toLeft) { + return localstore.ErrOutOfRange + } + + procAddr = rng.Addr + } + + return nil +} + +func (c *chopper) Chop(ctx context.Context, length, offset int64, fromStart bool) ([]RangeDescriptor, error) { + c.Lock() + defer c.Unlock() + + if fromStart { + if err := c.pounce(ctx, -(1 << 63), true); err != nil && err != io.EOF { + return nil, errors.Wrap(err, "chopper.Chop failed on chopper.pounce to start") + } + } + + if err := c.pounce(ctx, offset, true); err != nil && err != io.EOF { + return nil, errors.Wrap(err, "chopper.Chop failed on chopper.pounce with set") + } + + if c.cache.Size()-c.cacheOffset < length { + if err := c.pounce(ctx, length, false); err != nil && err != io.EOF { + return nil, errors.Wrap(err, "chopper.Chop failed on chopper.pounce") + } + } + + return c.cache.Chop(c.cacheOffset, length) +} diff --git a/lib/objio/range_test.go b/lib/objio/range_test.go new file mode 100644 index 000000000..6d7290d94 --- /dev/null +++ b/lib/objio/range_test.go @@ -0,0 +1,386 @@ +package objio + +import ( + "context" + "crypto/rand" + "io" + "sync" + "testing" + + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" +) + +type ( + addressSet struct { + *sync.RWMutex + items []RangeDescriptor + data [][]byte + } + + testReader struct { + pr object.PositionReader + ct ChopperTable + } +) + +func (r testReader) Read(ctx context.Context, rd RangeDescriptor, rc RCType) ([]byte, error) { + chopper, err := r.ct.GetChopper(rd.Addr, rc) + if err != nil { + return nil, errors.Wrap(err, "testReader.Read failed on get range chopper") + } + + rngs, err := chopper.Chop(ctx, rd.Size, rd.Offset, true) + if err != nil { + return nil, errors.Wrap(err, "testReader.Read failed on chopper.Chop") + } + + var sz int64 + for i := range rngs { + sz += rngs[i].Size + } + + res := make([]byte, 0, sz) + + for i := range rngs { + data, err := r.pr.PRead(ctx, rngs[i].Addr, object.Range{ + Offset: uint64(rngs[i].Offset), + Length: uint64(rngs[i].Size), + }) + if err != nil { + return nil, errors.Wrapf(err, "testReader.Read failed on PRead of range #%d", i) + } + + res = append(res, data...) + } + + return res, nil +} + +func (as addressSet) PRead(ctx context.Context, addr refs.Address, rng object.Range) ([]byte, error) { + as.RLock() + defer as.RUnlock() + + for i := range as.items { + if as.items[i].Addr.CID.Equal(addr.CID) && as.items[i].Addr.ObjectID.Equal(addr.ObjectID) { + return as.data[i][rng.Offset : rng.Offset+rng.Length], nil + } + } + + return nil, errors.New("pread failed") +} + +func (as addressSet) List(ctx context.Context, parent Address) ([]RangeDescriptor, error) { + return as.items, nil +} + +func (as addressSet) Base(ctx context.Context, addr Address) (RangeDescriptor, error) { + return as.items[0], nil +} + +func (as addressSet) Neighbor(ctx context.Context, addr Address, left bool) (RangeDescriptor, error) { + as.Lock() + defer as.Unlock() + + ind := -1 + for i := range as.items { + if as.items[i].Addr.CID.Equal(addr.CID) && as.items[i].Addr.ObjectID.Equal(addr.ObjectID) { + ind = i + break + } + } + + if ind == -1 { + return RangeDescriptor{}, errors.New("range not found") + } + + if left { + if ind > 0 { + ind-- + } else { + return RangeDescriptor{}, io.EOF + } + } else { + if ind < len(as.items)-1 { + ind++ + } else { + return RangeDescriptor{}, io.EOF + } + } + + return as.items[ind], nil +} + +func newTestNeighbor(rngs []RangeDescriptor, data [][]byte) *addressSet { + return &addressSet{ + RWMutex: new(sync.RWMutex), + items: rngs, + data: data, + } +} + +func rangeSize(rngs []RangeDescriptor) (res int64) { + for i := range rngs { + res += rngs[i].Size + } + return +} + +func TestScylla(t *testing.T) { + var ( + cid = [refs.CIDSize]byte{1} + rngs = make([]RangeDescriptor, 0, 10) + pieceSize int64 = 100 + pieceCount int64 = 99 + fullSize = pieceCount * pieceSize + ) + + for i := int64(0); i < pieceCount; i++ { + oid, err := refs.NewObjectID() + require.NoError(t, err) + + rngs = append(rngs, RangeDescriptor{ + Size: pieceSize, + Offset: 0, + Addr: Address{ + ObjectID: oid, + CID: cid, + }, + LeftBound: i == 0, + RightBound: i == pieceCount-1, + }) + } + + oid, err := refs.NewObjectID() + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + t.Run("Zero values in scylla notch/chop", func(t *testing.T) { + scylla, err := NewScylla(&ChopperParams{ + RelativeReceiver: newTestNeighbor(rngs, nil), + Addr: Address{ + ObjectID: oid, + CID: cid, + }, + }) + require.NoError(t, err) + + res, err := scylla.Chop(ctx, 0, 0, true) + require.NoError(t, err) + require.Len(t, res, 0) + }) + + t.Run("Common scylla operations in both directions", func(t *testing.T) { + var ( + off = fullSize / 2 + length = fullSize / 4 + ) + + scylla, err := NewScylla(&ChopperParams{ + RelativeReceiver: newTestNeighbor(rngs, nil), + Addr: Address{ + ObjectID: oid, + CID: cid, + }, + }) + require.NoError(t, err) + + choppedCount := int((length-1)/pieceSize + 1) + + if pieceCount > 1 && off%pieceSize > 0 { + choppedCount++ + } + + res, err := scylla.Chop(ctx, fullSize, 0, true) + require.NoError(t, err) + require.Len(t, res, int(pieceCount)) + require.Equal(t, rangeSize(res), fullSize) + require.Equal(t, res, rngs) + + res, err = scylla.Chop(ctx, length, off, true) + require.NoError(t, err) + require.Len(t, res, choppedCount) + + for i := int64(0); i < int64(choppedCount); i++ { + require.Equal(t, res[i].Addr.ObjectID, rngs[pieceCount/2+i].Addr.ObjectID) + } + + require.Equal(t, rangeSize(res), length) + + res, err = scylla.Chop(ctx, length, -length, false) + require.NoError(t, err) + require.Len(t, res, choppedCount) + + for i := int64(0); i < int64(choppedCount); i++ { + require.Equal(t, res[i].Addr.ObjectID, rngs[pieceCount/4+i].Addr.ObjectID) + } + + require.Equal(t, rangeSize(res), length) + }) + + t.Run("Border scylla Chop", func(t *testing.T) { + var ( + err error + res []RangeDescriptor + ) + + scylla, err := NewScylla(&ChopperParams{ + RelativeReceiver: newTestNeighbor(rngs, nil), + Addr: Address{ + ObjectID: oid, + CID: cid, + }, + }) + require.NoError(t, err) + + res, err = scylla.Chop(ctx, fullSize, 0, false) + require.NoError(t, err) + require.Equal(t, res, rngs) + + res, err = scylla.Chop(ctx, fullSize, -100, false) + require.NoError(t, err) + require.Equal(t, res, rngs) + + res, err = scylla.Chop(ctx, fullSize, 1, false) + require.Error(t, err) + + res, err = scylla.Chop(ctx, fullSize, -fullSize, false) + require.NoError(t, err) + require.Equal(t, rangeSize(res), fullSize) + }) +} + +func TestCharybdis(t *testing.T) { + var ( + cid = [refs.CIDSize]byte{1} + rngs = make([]RangeDescriptor, 0, 10) + pieceSize int64 = 100 + pieceCount int64 = 99 + fullSize = pieceCount * pieceSize + data = make([]byte, fullSize) + dataChunks = make([][]byte, 0, pieceCount) + ) + + _, err := rand.Read(data) + require.NoError(t, err) + + for i := int64(0); i < pieceCount; i++ { + oid, err := refs.NewObjectID() + require.NoError(t, err) + + dataChunks = append(dataChunks, data[i*pieceSize:(i+1)*pieceSize]) + + rngs = append(rngs, RangeDescriptor{ + Size: pieceSize, + Offset: 0, + Addr: Address{ + ObjectID: oid, + CID: cid, + }, + }) + } + + oid, err := refs.NewObjectID() + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + t.Run("Zero values in scylla notch/chop", func(t *testing.T) { + charybdis, err := NewCharybdis(&CharybdisParams{ + ChildLister: newTestNeighbor(rngs, nil), + Addr: Address{ + ObjectID: oid, + CID: cid, + }, + }) + require.NoError(t, err) + + res, err := charybdis.Chop(ctx, 0, 0, false) + require.NoError(t, err) + require.Len(t, res, 0) + }) + + t.Run("Common charybdis operations in both directions", func(t *testing.T) { + var ( + off = fullSize / 2 + length = fullSize / 4 + ) + + charybdis, err := NewCharybdis(&CharybdisParams{ + ChildLister: newTestNeighbor(rngs, nil), + Addr: Address{ + ObjectID: oid, + CID: cid, + }, + }) + require.NoError(t, err) + + choppedCount := int((length-1)/pieceSize + 1) + + if pieceCount > 1 && off%pieceSize > 0 { + choppedCount++ + } + + res, err := charybdis.Chop(ctx, fullSize, 0, false) + require.NoError(t, err) + require.Len(t, res, int(pieceCount)) + require.Equal(t, rangeSize(res), fullSize) + require.Equal(t, res, rngs) + + res, err = charybdis.Chop(ctx, length, off, false) + require.NoError(t, err) + require.Len(t, res, choppedCount) + + for i := int64(0); i < int64(choppedCount); i++ { + require.Equal(t, res[i].Addr.ObjectID, rngs[pieceCount/2+i].Addr.ObjectID) + } + + require.Equal(t, rangeSize(res), length) + + res, err = charybdis.Chop(ctx, length, -length, false) + require.NoError(t, err) + require.Len(t, res, choppedCount) + + for i := int64(0); i < int64(choppedCount); i++ { + require.Equal(t, res[i].Addr.ObjectID, rngs[pieceCount/4+i].Addr.ObjectID) + } + + require.Equal(t, rangeSize(res), length) + }) + + t.Run("Border charybdis Chop", func(t *testing.T) { + var ( + err error + res []RangeDescriptor + ) + + charybdis, err := NewCharybdis(&CharybdisParams{ + ChildLister: newTestNeighbor(rngs, nil), + Addr: Address{ + ObjectID: oid, + CID: cid, + }, + }) + require.NoError(t, err) + + res, err = charybdis.Chop(ctx, fullSize, 0, false) + require.NoError(t, err) + require.Equal(t, res, rngs) + + res, err = charybdis.Chop(ctx, fullSize, -100, false) + require.NoError(t, err) + require.Equal(t, res, rngs) + + res, err = charybdis.Chop(ctx, fullSize, 1, false) + require.Error(t, err) + + res, err = charybdis.Chop(ctx, fullSize, -fullSize, false) + require.NoError(t, err) + require.Equal(t, rangeSize(res), fullSize) + }) +} diff --git a/lib/objutil/verifier.go b/lib/objutil/verifier.go new file mode 100644 index 000000000..a31dfff43 --- /dev/null +++ b/lib/objutil/verifier.go @@ -0,0 +1,35 @@ +package objutil + +import ( + "bytes" + "context" + + "github.com/nspcc-dev/neofs-api-go/object" +) + +// Verifier is an interface for checking whether an object conforms to a certain criterion. +// Nil error is equivalent to matching the criterion. +type Verifier interface { + Verify(context.Context, *object.Object) error +} + +// MarshalHeaders marshals all object headers which are "higher" than to-th extended header. +func MarshalHeaders(obj *object.Object, to int) ([]byte, error) { + buf := new(bytes.Buffer) + + if sysHdr, err := obj.SystemHeader.Marshal(); err != nil { + return nil, err + } else if _, err := buf.Write(sysHdr); err != nil { + return nil, err + } + + for i := range obj.Headers[:to] { + if header, err := obj.Headers[i].Marshal(); err != nil { + return nil, err + } else if _, err := buf.Write(header); err != nil { + return nil, err + } + } + + return buf.Bytes(), nil +} diff --git a/lib/peers/metrics.go b/lib/peers/metrics.go new file mode 100644 index 000000000..9391f7f18 --- /dev/null +++ b/lib/peers/metrics.go @@ -0,0 +1,45 @@ +package peers + +import ( + "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc/connectivity" +) + +const stateLabel = "state" + +var grpcConnections = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Help: "gRPC connections", + Name: "grpc_connections", + Namespace: "neofs", + }, + []string{stateLabel}, +) + +var conStates = []connectivity.State{ + connectivity.Idle, + connectivity.Connecting, + connectivity.Ready, + connectivity.TransientFailure, + connectivity.Shutdown, +} + +func updateMetrics(items map[connectivity.State]float64) { + for _, state := range conStates { + grpcConnections.With(prometheus.Labels{ + stateLabel: state.String(), + }).Set(items[state]) + } +} + +func init() { + prometheus.MustRegister( + grpcConnections, + ) + + for _, state := range conStates { + grpcConnections.With(prometheus.Labels{ + stateLabel: state.String(), + }).Set(0) + } +} diff --git a/lib/peers/peers.go b/lib/peers/peers.go new file mode 100644 index 000000000..406e495a8 --- /dev/null +++ b/lib/peers/peers.go @@ -0,0 +1,455 @@ +package peers + +import ( + "context" + "net" + "sync" + "time" + + "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr-net" + "github.com/nspcc-dev/neofs-node/lib/transport" + "github.com/pkg/errors" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/status" +) + +type ( + // Interface is an interface of network connections controller. + Interface interface { + Shutdown() error + Job(context.Context) + Address() multiaddr.Multiaddr + RemoveConnection(maddr multiaddr.Multiaddr) error + Listen(maddr multiaddr.Multiaddr) (manet.Listener, error) + Connect(ctx context.Context, maddr multiaddr.Multiaddr) (manet.Conn, error) + GRPCConnector + } + + // GRPCConnector is an interface of gRPC virtual connector. + GRPCConnector interface { + GRPCConnection(ctx context.Context, maddr multiaddr.Multiaddr, reset bool) (*grpc.ClientConn, error) + } + + // Params groups the parameters of Interface. + Params struct { + Address multiaddr.Multiaddr + Transport transport.Transport + Logger *zap.Logger + Attempts int64 + AttemptsTTL time.Duration + ConnectionTTL time.Duration + ConnectionIDLE time.Duration + MetricsTimeout time.Duration + KeepAliveTTL time.Duration + KeepAlivePingTTL time.Duration + } + + connItem struct { + sync.RWMutex + conn *grpc.ClientConn + used time.Time + } + + iface struct { + log *zap.Logger + addr multiaddr.Multiaddr // self address + tr transport.Transport + tick time.Duration + idle time.Duration + + keepAlive time.Duration + pingTTL time.Duration + + metricsTimeout time.Duration + + grpc struct { + // globalMutex used by garbage collector and other high + globalMutex *sync.RWMutex + // bookMutex resolves concurrent access to the new connection + bookMutex *sync.RWMutex + // connBook contains connection info + // it's mutex resolves concurrent access to existed connection + connBook map[string]*connItem + } + + cons struct { + *sync.RWMutex + items map[string]transport.Connection + } + + lis struct { + *sync.RWMutex + items map[string]manet.Listener + } + } +) + +const ( + defaultAttemptsCount = 5 + defaultAttemptsTTL = 30 * time.Second + defaultCloseTimer = 30 * time.Second + defaultConIdleTTL = 30 * time.Second + defaultKeepAliveTTL = 5 * time.Second + defaultMetricsTimeout = 5 * time.Second + defaultKeepAlivePingTTL = 50 * time.Millisecond +) + +var ( + // ErrDialToSelf is returned if we attempt to dial our own peer + ErrDialToSelf = errors.New("dial to self attempted") + // ErrEmptyAddress returns when you try to create Interface with empty address + ErrEmptyAddress = errors.New("self address could not be empty") + // ErrEmptyTransport returns when you try to create Interface with empty transport + ErrEmptyTransport = errors.New("transport could not be empty") +) + +var errNilMultiaddr = errors.New("empty multi-address") + +func (s *iface) Shutdown() error { + s.lis.Lock() + s.cons.Lock() + s.grpc.globalMutex.Lock() + + defer func() { + s.lis.Unlock() + s.cons.Unlock() + s.grpc.globalMutex.Unlock() + }() + + for addr := range s.cons.items { + if err := s.removeNetConnection(addr); err != nil { + return errors.Wrapf(err, "could not remove net connection `%s`", addr) + } + } + + for addr := range s.grpc.connBook { + if err := s.removeGRPCConnection(addr); err != nil { + return errors.Wrapf(err, "could not remove net connection `%s`", addr) + } + } + + for addr := range s.lis.items { + if err := s.removeListener(addr); err != nil { + return errors.Wrapf(err, "could not remove listener `%s`", addr) + } + } + + return nil +} + +// RemoveConnection from Interface. +// Used only in tests, consider removing. +func (s *iface) RemoveConnection(maddr multiaddr.Multiaddr) error { + addr, err := convertAddress(maddr) + if err != nil { + return err + } + + s.cons.Lock() + s.grpc.globalMutex.Lock() + + defer func() { + s.cons.Unlock() + s.grpc.globalMutex.Unlock() + }() + + // Try to remove connection + if err := s.removeNetConnection(maddr.String()); err != nil { + return errors.Wrapf(err, "could not remove net connection `%s`", maddr.String()) + } + + // Try to remove gRPC connection + if err := s.removeGRPCConnection(addr); err != nil { + return errors.Wrapf(err, "could not remove gRPC connection `%s`", addr) + } + + // TODO remove another connections + + return nil +} + +func (s *iface) removeListener(addr string) error { + if lis, ok := s.lis.items[addr]; ok { + if err := lis.Close(); err != nil { + return err + } + + delete(s.lis.items, addr) + } + + return nil +} + +func (s *iface) removeNetConnection(addr string) error { + // Try to remove simple connection + if con, ok := s.cons.items[addr]; ok { + if err := con.Close(); err != nil { + return err + } + + delete(s.cons.items, addr) + } + + return nil +} + +func (s *iface) removeGRPCConnection(addr string) error { + if gCon, ok := s.grpc.connBook[addr]; ok && gCon.conn != nil { + if err := gCon.conn.Close(); err != nil { + state, ok := status.FromError(err) + if !ok { + return err + } + + s.log.Debug("error state", + zap.String("address", addr), + zap.Any("code", state.Code()), + zap.String("state", state.Message()), + zap.Any("details", state.Details())) + } + } + + delete(s.grpc.connBook, addr) + + return nil +} + +// Connect to address +// Used only in tests, consider removing. +func (s *iface) Connect(ctx context.Context, maddr multiaddr.Multiaddr) (manet.Conn, error) { + var ( + err error + con transport.Connection + ) + + if maddr.Equal(s.addr) { + return nil, ErrDialToSelf + } + + s.cons.RLock() + con, ok := s.cons.items[maddr.String()] + s.cons.RUnlock() + + if ok && !con.Closed() { + return con, nil + } + + if con, err = s.newConnection(ctx, maddr, false); err != nil { + return nil, err + } + + s.cons.Lock() + s.cons.items[maddr.String()] = con + s.cons.Unlock() + + return con, nil +} + +// Listen try to find listener or creates new. +func (s *iface) Listen(maddr multiaddr.Multiaddr) (manet.Listener, error) { + // fixme: concurrency issue there, same as 5260f04d + // but it's not so bad, because `Listen()` used + // once during startup routine. + s.lis.RLock() + lis, ok := s.lis.items[maddr.String()] + s.lis.RUnlock() + + if ok { + return lis, nil + } + + lis, err := s.tr.Listen(maddr) + if err != nil { + return nil, err + } + + s.lis.Lock() + s.lis.items[maddr.String()] = lis + s.lis.Unlock() + + return lis, nil +} + +// Address of current Interface instance. +func (s *iface) Address() multiaddr.Multiaddr { + return s.addr +} + +func isGRPCClosed(con *grpc.ClientConn) bool { + switch con.GetState() { + case connectivity.Idle, connectivity.Connecting, connectivity.Ready: + return false + default: + // connectivity.TransientFailure, connectivity.Shutdown + return true + } +} + +func (s *iface) newConnection(ctx context.Context, addr multiaddr.Multiaddr, reset bool) (transport.Connection, error) { + return s.tr.Dial(ctx, addr, reset) +} + +func gRPCKeepAlive(ping, ttl time.Duration) grpc.DialOption { + return grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: ping, + Timeout: ttl, + PermitWithoutStream: true, + }) +} + +func convertAddress(maddr multiaddr.Multiaddr) (string, error) { + if maddr == nil { + return "", errNilMultiaddr + } + + addr, err := manet.ToNetAddr(maddr) + if err != nil { + return "", errors.Wrapf(err, "could not convert address `%s`", maddr) + } + + return addr.String(), nil +} + +// GRPCConnection creates gRPC connection over peers connection. +func (s *iface) GRPCConnection(ctx context.Context, maddr multiaddr.Multiaddr, reset bool) (*grpc.ClientConn, error) { + addr, err := convertAddress(maddr) + if err != nil { + return nil, errors.Wrapf(err, "could not convert `%v`", maddr) + } + + // Get global mutex on read. + // All high level function e.g. peers garbage collector + // or shutdown must use globalMutex.Lock instead + s.grpc.globalMutex.RLock() + + // Get connection item from connection book or create a new one. + // Concurrent map access resolved by bookMutex. + s.grpc.bookMutex.Lock() + + item, ok := s.grpc.connBook[addr] + if !ok { + item = new(connItem) + s.grpc.connBook[addr] = item + } + + s.grpc.bookMutex.Unlock() + + // Now lock connection item. + // This denies concurrent access to the same address, + // but allows concurrent access to a different addresses. + item.Lock() + + if item.conn != nil && !isGRPCClosed(item.conn) { + item.used = time.Now() + + item.Unlock() + s.grpc.globalMutex.RUnlock() + + return item.conn, nil + } + + // Если вышеописанные строки переместить внутрь WithDialer, + // мы получим сломанный коннекшн, но ошибка не будет возвращена, + // поэтому мы сначала проверяем коннекшн и лишь потом возвращаем + // *gRPC.ClientConn + // + // Это будет работать с `grpc.WithBlock()`, см. ниже + conn, err := grpc.DialContext(ctx, maddr.String(), + gRPCKeepAlive(s.pingTTL, s.keepAlive), + // TODO: we must provide grpc.WithInsecure() or set credentials + grpc.WithInsecure(), + grpc.WithBlock(), + grpc.WithContextDialer(func(ctx context.Context, _ string) (net.Conn, error) { + return s.newConnection(ctx, maddr, reset) + }), + ) + if err == nil { + item.conn = conn + item.used = time.Now() + } + + item.Unlock() + s.grpc.globalMutex.RUnlock() + + return conn, err +} + +// New create iface instance and check arguments. +func New(p Params) (Interface, error) { + if p.Address == nil { + return nil, ErrEmptyAddress + } + + if p.Transport == nil { + return nil, ErrEmptyTransport + } + + if p.Attempts <= 0 { + p.Attempts = defaultAttemptsCount + } + + if p.AttemptsTTL <= 0 { + p.AttemptsTTL = defaultAttemptsTTL + } + + if p.ConnectionTTL <= 0 { + p.ConnectionTTL = defaultCloseTimer + } + + if p.ConnectionIDLE <= 0 { + p.ConnectionIDLE = defaultConIdleTTL + } + + if p.KeepAliveTTL <= 0 { + p.KeepAliveTTL = defaultKeepAliveTTL + } + + if p.KeepAlivePingTTL <= 0 { + p.KeepAlivePingTTL = defaultKeepAlivePingTTL + } + + if p.MetricsTimeout <= 0 { + p.MetricsTimeout = defaultMetricsTimeout + } + + return &iface{ + tick: p.ConnectionTTL, + idle: p.ConnectionIDLE, + + keepAlive: p.KeepAliveTTL, + pingTTL: p.KeepAlivePingTTL, + + metricsTimeout: p.MetricsTimeout, + + log: p.Logger, + addr: p.Address, + tr: p.Transport, + grpc: struct { + globalMutex *sync.RWMutex + bookMutex *sync.RWMutex + connBook map[string]*connItem + }{ + globalMutex: new(sync.RWMutex), + bookMutex: new(sync.RWMutex), + connBook: make(map[string]*connItem), + }, + cons: struct { + *sync.RWMutex + items map[string]transport.Connection + }{ + RWMutex: new(sync.RWMutex), + items: make(map[string]transport.Connection), + }, + lis: struct { + *sync.RWMutex + items map[string]manet.Listener + }{ + RWMutex: new(sync.RWMutex), + items: make(map[string]manet.Listener), + }, + }, nil +} diff --git a/lib/peers/peers_test.go b/lib/peers/peers_test.go new file mode 100644 index 000000000..d71ef7b52 --- /dev/null +++ b/lib/peers/peers_test.go @@ -0,0 +1,484 @@ +package peers + +import ( + "context" + "encoding" + "encoding/json" + "net" + "strings" + "sync" + "testing" + "time" + + "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr-net" + "github.com/nspcc-dev/neofs-node/lib/transport" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" +) + +type ( + fakeAddress struct { + json.Marshaler + json.Unmarshaler + encoding.TextMarshaler + encoding.TextUnmarshaler + encoding.BinaryMarshaler + encoding.BinaryUnmarshaler + } + + // service is used to implement GreaterServer. + service struct{} +) + +// Hello is simple handler +func (*service) Hello(ctx context.Context, req *HelloRequest) (*HelloResponse, error) { + return &HelloResponse{ + Message: "Hello " + req.Name, + }, nil +} + +var _ multiaddr.Multiaddr = (*fakeAddress)(nil) + +func (fakeAddress) Equal(multiaddr.Multiaddr) bool { + return false +} + +func (fakeAddress) Bytes() []byte { + return nil +} + +func (fakeAddress) String() string { + return "fake" +} + +func (fakeAddress) Protocols() []multiaddr.Protocol { + return []multiaddr.Protocol{{Name: "fake"}} +} + +func (fakeAddress) Encapsulate(multiaddr.Multiaddr) multiaddr.Multiaddr { + panic("implement me") +} + +func (fakeAddress) Decapsulate(multiaddr.Multiaddr) multiaddr.Multiaddr { + panic("implement me") +} + +func (fakeAddress) ValueForProtocol(code int) (string, error) { + return "", nil +} + +const testCount = 10 + +func newTestAddress(t *testing.T) multiaddr.Multiaddr { + lis, err := net.Listen("tcp", "0.0.0.0:0") // nolint:gosec + require.NoError(t, err) + require.NoError(t, lis.Close()) + + l, ok := lis.(*net.TCPListener) + require.True(t, ok) + + _, port, err := net.SplitHostPort(l.Addr().String()) + require.NoError(t, err) + + items := []string{ + "ip4", + "127.0.0.1", + "tcp", + port, + } + + maddr, err := multiaddr.NewMultiaddr("/" + strings.Join(items, "/")) + require.NoError(t, err) + + return maddr +} + +func createTestInterface(t *testing.T) Interface { + s, err := New(Params{ + Address: newTestAddress(t), + Transport: transport.New(5, time.Second), + }) + require.NoError(t, err) + return s +} + +func createTestInterfaces(t *testing.T) []Interface { + var ifaces = make([]Interface, 0, testCount) + for i := 0; i < testCount; i++ { + ifaces = append(ifaces, createTestInterface(t)) + } + return ifaces +} + +func connectEachOther(t *testing.T, ifaces []Interface) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + for _, s := range ifaces { + _, err := s.Listen(s.Address()) + require.NoError(t, err) + + for _, n := range ifaces { + if s.Address().Equal(n.Address()) { + continue // do not connect itself + } + + _, err = n.Connect(ctx, s.Address()) + require.NoError(t, err) + } + } +} + +func TestInterface(t *testing.T) { + t.Run("should fail on empty address", func(t *testing.T) { + _, err := New(Params{}) + require.EqualError(t, err, ErrEmptyAddress.Error()) + }) + + t.Run("should fail on empty transport", func(t *testing.T) { + _, err := New(Params{Address: newTestAddress(t)}) + require.EqualError(t, err, ErrEmptyTransport.Error()) + }) + + t.Run("try to create multiple Interface and connect each other", func(t *testing.T) { + ifaces := createTestInterfaces(t) + connectEachOther(t, ifaces) + }) + + t.Run("should fail on itself connection", func(t *testing.T) { + s := createTestInterface(t) + _, err := s.Connect(context.Background(), s.Address()) + require.EqualError(t, err, ErrDialToSelf.Error()) + }) + + t.Run("should fail when you try to remove closed connection", func(t *testing.T) { + s1, err := New(Params{ + Address: newTestAddress(t), + Transport: transport.New(5, time.Second), + }) + require.NoError(t, err) + + s2, err := New(Params{ + Address: newTestAddress(t), + Transport: transport.New(5, time.Second), + }) + require.NoError(t, err) + + _, err = s1.Listen(s1.Address()) + require.NoError(t, err) + + _, err = s2.Listen(s2.Address()) + require.NoError(t, err) + + con, err := s1.Connect(context.Background(), s2.Address()) + require.NoError(t, err) + require.NoError(t, con.Close()) + + err = s1.RemoveConnection(s2.Address()) + require.NoError(t, err) + }) + + t.Run("should not create connection / listener twice", func(t *testing.T) { + s1, err := New(Params{ + Address: newTestAddress(t), + Transport: transport.New(5, time.Second), + }) + require.NoError(t, err) + + s2, err := New(Params{ + Address: newTestAddress(t), + Transport: transport.New(5, time.Second), + }) + require.NoError(t, err) + + l1, err := s1.Listen(s1.Address()) + require.NoError(t, err) + + l2, err := s1.Listen(s1.Address()) + require.NoError(t, err) + + require.Equal(t, l1, l2) + + _, err = s2.Listen(s2.Address()) + require.NoError(t, err) + + c1, err := s1.Connect(context.Background(), s2.Address()) + require.NoError(t, err) + + c2, err := s1.Connect(context.Background(), s2.Address()) + require.NoError(t, err) + + require.Equal(t, c1, c2) + require.NoError(t, c1.Close()) + + err = s1.RemoveConnection(s2.Address()) + require.NoError(t, err) + }) + + t.Run("should not try to close unknown connection", func(t *testing.T) { + s1, err := New(Params{ + Address: newTestAddress(t), + Transport: transport.New(5, time.Second), + }) + require.NoError(t, err) + + s2, err := New(Params{ + Address: newTestAddress(t), + Transport: transport.New(5, time.Second), + }) + require.NoError(t, err) + + l1, err := s1.Listen(s1.Address()) + require.NoError(t, err) + + l2, err := s1.Listen(s1.Address()) + require.NoError(t, err) + + require.Equal(t, l1, l2) + + _, err = s2.Listen(s2.Address()) + require.NoError(t, err) + + _, err = s1.Connect(context.Background(), s2.Address()) + require.NoError(t, err) + + err = s1.RemoveConnection(s2.Address()) + require.NoError(t, err) + + err = s1.RemoveConnection(s2.Address()) + require.NoError(t, err) + }) + + t.Run("should shutdown without errors", func(t *testing.T) { + s1, err := New(Params{ + Address: newTestAddress(t), + Transport: transport.New(5, time.Second), + }) + require.NoError(t, err) + + s2, err := New(Params{ + Address: newTestAddress(t), + Transport: transport.New(5, time.Second), + }) + require.NoError(t, err) + + l1, err := s1.Listen(s1.Address()) + require.NoError(t, err) + + l2, err := s1.Listen(s1.Address()) + require.NoError(t, err) + + require.Equal(t, l1, l2) + + _, err = s2.Listen(s2.Address()) + require.NoError(t, err) + + _, err = s1.Connect(context.Background(), s2.Address()) + require.NoError(t, err) + + err = s1.Shutdown() + require.NoError(t, err) + + err = s2.Shutdown() + require.NoError(t, err) + }) + + t.Run("should fail, when shutdown with closed connections or listeners", func(t *testing.T) { + s1, err := New(Params{ + Address: newTestAddress(t), + Transport: transport.New(5, time.Second), + }) + require.NoError(t, err) + + s2, err := New(Params{ + Address: newTestAddress(t), + Transport: transport.New(5, time.Second), + }) + require.NoError(t, err) + + l1, err := s1.Listen(s1.Address()) + require.NoError(t, err) + + l2, err := s1.Listen(s1.Address()) + require.NoError(t, err) + + require.Equal(t, l1, l2) + + lis, err := s2.Listen(s2.Address()) + require.NoError(t, err) + + con, err := s1.Connect(context.Background(), s2.Address()) + require.NoError(t, err) + + require.NoError(t, con.Close()) + + err = s1.Shutdown() + require.NoError(t, err) + + require.NoError(t, lis.Close()) + + err = s2.Shutdown() + require.Error(t, err) + }) + + t.Run("circuit breaker should start fail connection after N-fails", func(t *testing.T) { + s1, err := New(Params{ + Address: newTestAddress(t), + Transport: transport.New(5, time.Second), + }) + require.NoError(t, err) + + addr := newTestAddress(t) + for i := 0; i < defaultAttemptsCount*2; i++ { + _, err = s1.Connect(context.Background(), addr) + require.Error(t, err) + + if i+1 == defaultAttemptsCount { + _, err = s1.Listen(addr) + require.NoError(t, err) + } + } + }) + + t.Run("should return error on bad multi-address", func(t *testing.T) { + s1, err := New(Params{ + Address: newTestAddress(t), + Transport: transport.New(5, time.Second), + }) + require.NoError(t, err) + + _, err = s1.Listen(&fakeAddress{}) + require.Error(t, err) + }) + + t.Run("gRPC connection test", func(t *testing.T) { + var ( + err error + s1, s2 Interface + h = &service{} + g = grpc.NewServer() + a1, a2 = newTestAddress(t), newTestAddress(t) + tr = transport.New(5, time.Second) + _ = h + done = make(chan struct{}) + ) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s1, err = New(Params{ + Address: a1, + Transport: tr, + }) + require.NoError(t, err) + + s2, err = New(Params{ + Address: a2, + Transport: tr, + }) + require.NoError(t, err) + + RegisterGreeterServer(g, h) // register service + + l, err := s1.Listen(a1) + require.NoError(t, err) + + defer l.Close() // nolint:golint + + wg := new(sync.WaitGroup) + wg.Add(1) + + go func() { + close(done) + + _ = g.Serve(manet.NetListener(l)) + + wg.Done() + }() + + <-done // wait for server is start listening connections: + + // Fail connection + con, err := s2.GRPCConnection(ctx, &fakeAddress{}, false) + require.Nil(t, con) + require.Error(t, err) + + con, err = s2.GRPCConnection(ctx, a1, false) + require.NoError(t, err) + + cli := NewGreeterClient(con) + resp, err := cli.Hello(ctx, &HelloRequest{ + Name: "Interface test", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.Equal(t, "Hello Interface test", resp.Message) + + g.GracefulStop() + + wg.Wait() + }) + + t.Run("test grpc connections", func(t *testing.T) { + var ( + ifaces = make([]Interface, 0, testCount) + addresses = make([]multiaddr.Multiaddr, 0, testCount) + ) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + for i := 0; i < testCount; i++ { + addresses = append(addresses, newTestAddress(t)) + + s, err := New(Params{ + Address: addresses[i], + Transport: transport.New(5, time.Second), + }) + require.NoError(t, err) + + lis, err := s.Listen(addresses[i]) + require.NoError(t, err) + + svc := &service{} + srv := grpc.NewServer() + + RegisterGreeterServer(srv, svc) + + ifaces = append(ifaces, s) + + go func() { + l := manet.NetListener(lis) + require.NoError(t, srv.Serve(l)) + }() + } + + const reqName = "test" + wg := new(sync.WaitGroup) + + for i := 0; i < testCount; i++ { + for j := 0; j < testCount; j++ { + wg.Add(1) + go func(i, j int) { + defer wg.Done() + + con, err := ifaces[i].GRPCConnection(ctx, addresses[j], false) + require.NoError(t, err) + + cli := NewGreeterClient(con) + + resp, err := cli.Hello(ctx, &HelloRequest{Name: reqName}) + require.NoError(t, err) + + require.Equal(t, "Hello "+reqName, resp.Message) + + require.NoError(t, con.Close()) + }(i, j) + + } + } + + wg.Wait() + }) +} diff --git a/lib/peers/peers_test.pb.go b/lib/peers/peers_test.pb.go new file mode 100644 index 000000000..aa6fe950d --- /dev/null +++ b/lib/peers/peers_test.pb.go @@ -0,0 +1,600 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: lib/peers/peers_test.proto + +package peers + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Request message example +type HelloRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HelloRequest) Reset() { *m = HelloRequest{} } +func (m *HelloRequest) String() string { return proto.CompactTextString(m) } +func (*HelloRequest) ProtoMessage() {} +func (*HelloRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_c1d278c7f8e3113e, []int{0} +} +func (m *HelloRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HelloRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HelloRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HelloRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_HelloRequest.Merge(m, src) +} +func (m *HelloRequest) XXX_Size() int { + return m.Size() +} +func (m *HelloRequest) XXX_DiscardUnknown() { + xxx_messageInfo_HelloRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_HelloRequest proto.InternalMessageInfo + +func (m *HelloRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type HelloResponse struct { + Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HelloResponse) Reset() { *m = HelloResponse{} } +func (m *HelloResponse) String() string { return proto.CompactTextString(m) } +func (*HelloResponse) ProtoMessage() {} +func (*HelloResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_c1d278c7f8e3113e, []int{1} +} +func (m *HelloResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HelloResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HelloResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HelloResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_HelloResponse.Merge(m, src) +} +func (m *HelloResponse) XXX_Size() int { + return m.Size() +} +func (m *HelloResponse) XXX_DiscardUnknown() { + xxx_messageInfo_HelloResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_HelloResponse proto.InternalMessageInfo + +func (m *HelloResponse) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func init() { + proto.RegisterType((*HelloRequest)(nil), "peers.HelloRequest") + proto.RegisterType((*HelloResponse)(nil), "peers.HelloResponse") +} + +func init() { proto.RegisterFile("lib/peers/peers_test.proto", fileDescriptor_c1d278c7f8e3113e) } + +var fileDescriptor_c1d278c7f8e3113e = []byte{ + // 202 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xca, 0xc9, 0x4c, 0xd2, + 0x2f, 0x48, 0x4d, 0x2d, 0x2a, 0x86, 0x90, 0xf1, 0x25, 0xa9, 0xc5, 0x25, 0x7a, 0x05, 0x45, 0xf9, + 0x25, 0xf9, 0x42, 0xac, 0x60, 0x11, 0x25, 0x25, 0x2e, 0x1e, 0x8f, 0xd4, 0x9c, 0x9c, 0xfc, 0xa0, + 0xd4, 0xc2, 0xd2, 0xd4, 0xe2, 0x12, 0x21, 0x21, 0x2e, 0x96, 0xbc, 0xc4, 0xdc, 0x54, 0x09, 0x46, + 0x05, 0x46, 0x0d, 0xce, 0x20, 0x30, 0x5b, 0x49, 0x93, 0x8b, 0x17, 0xaa, 0xa6, 0xb8, 0x20, 0x3f, + 0xaf, 0x38, 0x55, 0x48, 0x82, 0x8b, 0x3d, 0x37, 0xb5, 0xb8, 0x38, 0x31, 0x1d, 0xa6, 0x0e, 0xc6, + 0x35, 0xb2, 0xe5, 0x62, 0x77, 0x2f, 0x4a, 0x4d, 0x2d, 0x49, 0x2d, 0x12, 0x32, 0xe2, 0x62, 0x05, + 0xeb, 0x12, 0x12, 0xd6, 0x03, 0x5b, 0xa5, 0x87, 0x6c, 0x8f, 0x94, 0x08, 0xaa, 0x20, 0xc4, 0x60, + 0x27, 0xeb, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc6, + 0x63, 0x39, 0x86, 0x28, 0xcd, 0xf4, 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0xfd, + 0xbc, 0xe2, 0x82, 0xe4, 0x64, 0xdd, 0x94, 0xd4, 0x32, 0xfd, 0xbc, 0xd4, 0xfc, 0xb4, 0x62, 0xdd, + 0xbc, 0xfc, 0x94, 0x54, 0x7d, 0xb8, 0x17, 0x93, 0xd8, 0xc0, 0x1e, 0x33, 0x06, 0x04, 0x00, 0x00, + 0xff, 0xff, 0xd1, 0x3d, 0x22, 0x05, 0xf6, 0x00, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GreeterClient is the client API for Greeter service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GreeterClient interface { + Hello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloResponse, error) +} + +type greeterClient struct { + cc *grpc.ClientConn +} + +func NewGreeterClient(cc *grpc.ClientConn) GreeterClient { + return &greeterClient{cc} +} + +func (c *greeterClient) Hello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloResponse, error) { + out := new(HelloResponse) + err := c.cc.Invoke(ctx, "/peers.Greeter/Hello", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// GreeterServer is the server API for Greeter service. +type GreeterServer interface { + Hello(context.Context, *HelloRequest) (*HelloResponse, error) +} + +// UnimplementedGreeterServer can be embedded to have forward compatible implementations. +type UnimplementedGreeterServer struct { +} + +func (*UnimplementedGreeterServer) Hello(ctx context.Context, req *HelloRequest) (*HelloResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Hello not implemented") +} + +func RegisterGreeterServer(s *grpc.Server, srv GreeterServer) { + s.RegisterService(&_Greeter_serviceDesc, srv) +} + +func _Greeter_Hello_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HelloRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GreeterServer).Hello(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/peers.Greeter/Hello", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GreeterServer).Hello(ctx, req.(*HelloRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Greeter_serviceDesc = grpc.ServiceDesc{ + ServiceName: "peers.Greeter", + HandlerType: (*GreeterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Hello", + Handler: _Greeter_Hello_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "lib/peers/peers_test.proto", +} + +func (m *HelloRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HelloRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HelloRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintPeersTest(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HelloResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HelloResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HelloResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintPeersTest(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintPeersTest(dAtA []byte, offset int, v uint64) int { + offset -= sovPeersTest(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *HelloRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPeersTest(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *HelloResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Message) + if l > 0 { + n += 1 + l + sovPeersTest(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovPeersTest(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozPeersTest(x uint64) (n int) { + return sovPeersTest(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *HelloRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPeersTest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HelloRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HelloRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPeersTest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPeersTest + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPeersTest + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPeersTest(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPeersTest + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPeersTest + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HelloResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPeersTest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HelloResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HelloResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPeersTest + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPeersTest + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPeersTest + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPeersTest(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPeersTest + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPeersTest + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPeersTest(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPeersTest + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPeersTest + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPeersTest + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthPeersTest + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupPeersTest + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthPeersTest + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthPeersTest = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPeersTest = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupPeersTest = fmt.Errorf("proto: unexpected end of group") +) diff --git a/lib/peers/peers_test.proto b/lib/peers/peers_test.proto new file mode 100644 index 000000000..574409f51 --- /dev/null +++ b/lib/peers/peers_test.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; +option go_package = "github.com/nspcc-dev/neofs-node/lib/peers"; + +package peers; + +// The Greater service definition. +service Greeter { + rpc Hello(HelloRequest) returns (HelloResponse); +} + +// Request message example +message HelloRequest { + string name = 1; +} + +message HelloResponse { + string message = 1; +} diff --git a/lib/peers/peerstore.go b/lib/peers/peerstore.go new file mode 100644 index 000000000..550b5c9db --- /dev/null +++ b/lib/peers/peerstore.go @@ -0,0 +1,238 @@ +package peers + +import ( + "crypto/ecdsa" + "crypto/elliptic" + + "github.com/multiformats/go-multiaddr" + crypto "github.com/nspcc-dev/neofs-crypto" + "github.com/nspcc-dev/neofs-node/lib/netmap" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +type ( + // Store is an interface to storage of all p2p connections + Store interface { + SelfIDReceiver + PublicKeyStore + AddressIDReceiver + AddPeer(multiaddr.Multiaddr, *ecdsa.PublicKey, *ecdsa.PrivateKey) (ID, error) + DeletePeer(ID) + PeerNetAddressStore + GetPrivateKey(ID) (*ecdsa.PrivateKey, error) + Update(*netmap.NetMap) error + Sign([]byte) ([]byte, error) + Verify(id ID, data, sign []byte) error + Check(min int) error + } + + // PublicKeyStore is an interface of the storage of peer's public keys. + PublicKeyStore interface { + GetPublicKey(ID) (*ecdsa.PublicKey, error) + } + + // SelfIDReceiver is an interface of local peer ID value with read access. + SelfIDReceiver interface { + SelfID() ID + } + + // AddressIDReceiver is an interface of Multiaddr to ID converter. + AddressIDReceiver interface { + AddressID(multiaddr.Multiaddr) (ID, error) + } + + // PeerNetAddressStore is an interface of ID to Multiaddr converter. + PeerNetAddressStore interface { + GetAddr(ID) (multiaddr.Multiaddr, error) + } + + // StoreParams for creating new Store. + StoreParams struct { + Addr multiaddr.Multiaddr + Key *ecdsa.PrivateKey + Storage Storage + StoreCap int + Logger *zap.Logger + } + + store struct { + self ID + addr multiaddr.Multiaddr + storage Storage + log *zap.Logger + key *ecdsa.PrivateKey + } +) + +const defaultMinimalSignaturesCount = 3 + +var errPeerNotFound = errors.New("peer not found") + +func (p *store) AddressID(addr multiaddr.Multiaddr) (ID, error) { + if p.addr.Equal(addr) { + return p.self, nil + } + + res := p.storage.Filter(maddrFilter(addr)) + if len(res) == 0 { + return "", errPeerNotFound + } + + return res[0], nil +} + +func maddrFilter(addr multiaddr.Multiaddr) PeerFilter { + return func(p Peer) bool { return addr.Equal(p.Address()) } +} + +// SelfID return ID of current Node. +func (p *store) SelfID() ID { + return p.self +} + +// AddPeer to store.. +// Try to get PeerID from PublicKey, or return error +// Store Address and PublicKey for that PeerID. +func (p *store) AddPeer(addr multiaddr.Multiaddr, pub *ecdsa.PublicKey, key *ecdsa.PrivateKey) (ID, error) { + item := NewPeer(addr, pub, key) + if err := p.storage.Set(item.ID(), item); err != nil { + return "", err + } + + return item.ID(), nil +} + +// DeletePeer from store. +func (p *store) DeletePeer(id ID) { + if err := p.storage.Rem(id); err != nil { + p.log.Error("could not delete peer", + zap.Stringer("id", id), + zap.Error(err)) + } +} + +// Update update Store by new network map. +func (p *store) Update(nm *netmap.NetMap) error { + if err := p.storage.Update(nm); err != nil { + return err + } + + // we must provide our PrivateKey, after updating + if peer, err := p.storage.Get(p.self); err != nil { + peer = NewPeer(p.addr, &p.key.PublicKey, p.key) + return p.storage.Set(p.self, peer) + } else if err := peer.SetPrivateKey(p.key); err != nil { + return errors.Wrapf(err, "could not update private key (%s)", p.self.String()) + } else if err := p.storage.Set(p.self, peer); err != nil { + return errors.Wrapf(err, "could not save peer(%s)", p.self.String()) + } + + return nil +} + +// GetAddr by PeerID. +func (p *store) GetAddr(id ID) (multiaddr.Multiaddr, error) { + n, err := p.storage.Get(id) + if err != nil { + return nil, err + } + + return n.Address(), nil +} + +// GetPublicKey by PeerID. +func (p *store) GetPublicKey(id ID) (*ecdsa.PublicKey, error) { + n, err := p.storage.Get(id) + if err != nil { + return nil, err + } + + return n.PublicKey(), nil +} + +// GetPrivateKey by PeerID. +func (p *store) GetPrivateKey(id ID) (*ecdsa.PrivateKey, error) { + n, err := p.storage.Get(id) + if err != nil { + return nil, err + } + + return n.PrivateKey() +} + +// Sign signs a data using the private key. If the data is longer than +// the bit-length of the private key's curve order, the hash will be +// truncated to that length. It returns the signature as slice bytes. +// The security of the private key depends on the entropy of rand. +func (p *store) Sign(data []byte) ([]byte, error) { + return crypto.Sign(p.key, data) +} + +// Verify verifies the signature in r, s of hash using the public key, pub. Its +// return value records whether the signature is valid. +// If store doesn't contains public key for ID, +// returns error about that +// TODO we must provide same method, but for IR list, to check, +// that we have valid signatures of needed IR members +func (p *store) Verify(id ID, data, sign []byte) error { + if pub, err := p.GetPublicKey(id); err != nil { + return errors.Wrap(err, "could not get PublicKey") + } else if err := crypto.Verify(pub, data, sign); err != nil { + return errors.Wrapf(err, "could not verify signature: sign(`%x`) & data(`%x`)", sign, data) + } + + return nil +} + +// Neighbours peers that which are distributed by hrw(id). +func (p *store) Neighbours(seed int64, count int) ([]ID, error) { + return p.storage.List(p.self, seed, count) +} + +// Check validate signatures count +// TODO replace with settings or something else. +// We can fetch min-count from settings, or +// use another method for validate this.. +func (p *store) Check(min int) error { + if min <= defaultMinimalSignaturesCount { + return errors.Errorf("invalid count of valid signatures: minimum %d, actual %d", + defaultMinimalSignaturesCount, + min, + ) + } + + return nil +} + +// NewStore creates new store by params. +func NewStore(p StoreParams) (Store, error) { + var storage Storage + + if p.Key == nil || p.Key.Curve != elliptic.P256() { + return nil, crypto.ErrEmptyPrivateKey + } + + if p.Addr == nil { + return nil, errNilMultiaddr + } + + if storage = p.Storage; storage == nil { + storage = NewSimpleStorage(p.StoreCap, p.Logger) + } + + id := IDFromPublicKey(&p.Key.PublicKey) + peer := NewPeer(p.Addr, &p.Key.PublicKey, p.Key) + + if err := storage.Set(id, peer); err != nil { + return nil, err + } + + return &store{ + self: id, + storage: storage, + key: p.Key, + addr: p.Addr, + log: p.Logger, + }, nil +} diff --git a/lib/peers/peerstore_test.go b/lib/peers/peerstore_test.go new file mode 100644 index 000000000..2f6449b01 --- /dev/null +++ b/lib/peers/peerstore_test.go @@ -0,0 +1,245 @@ +package peers + +import ( + "strconv" + "testing" + + "github.com/multiformats/go-multiaddr" + crypto "github.com/nspcc-dev/neofs-crypto" + "github.com/nspcc-dev/neofs-node/lib/netmap" + "github.com/nspcc-dev/neofs-node/lib/test" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +type testSign struct { + ID ID + Sign []byte +} + +const debug = false + +func createNetworkMap(t *testing.T) *netmap.NetMap { + var ( + Region = []string{"America", "Europe", "Asia"} + Country = map[string][]string{ + "America": {"USA", "Canada", "Brazil"}, + "Europe": {"France", "Germany", "Sweden"}, + "Asia": {"Russia", "China", "Korea", "Japan"}, + } + City = map[string][]string{ + "USA": {"Washington", "New-York", "Seattle", "Chicago", "Detroit"}, + "Canada": {"Toronto", "Ottawa", "Quebec", "Winnipeg"}, + "Brazil": {"Rio-de-Janeiro", "San-Paulo", "Salvador"}, + "France": {"Paris", "Lion", "Nice", "Marseille"}, + "Germany": {"Berlin", "Munich", "Dortmund", "Hamburg", "Cologne"}, + "Sweden": {"Stockholm", "Malmo", "Uppsala"}, + "Russia": {"Moscow", "Saint-Petersburg", "Ekaterinburg", "Novosibirsk"}, + "China": {"Beijing", "Shanghai", "Shenzhen", "Guangzhou"}, + "Korea": {"Seoul", "Busan"}, + "Japan": {"Tokyo", "Kyoto", "Yokohama", "Osaka"}, + } + nm = netmap.NewNetmap() + port int64 = 4000 + i = 0 + ) + for _, r := range Region { + for _, co := range Country[r] { + for _, ci := range City[co] { + addr := "/ip4/127.0.0.1/tcp/" + strconv.FormatInt(port, 10) + port++ + option := "/Region:" + r + "/Country:" + co + "/City:" + ci + pk := crypto.MarshalPublicKey(&test.DecodeKey(i).PublicKey) + i++ + + require.NoError(t, nm.Add(addr, pk, 0, option)) + } + } + } + return nm +} + +func testMulatiAddress(t *testing.T) multiaddr.Multiaddr { + addr, err := multiaddr.NewMultiaddr("/ip4/0.0.0.0/tcp/0") + require.NoError(t, err) + return addr +} + +func TestPeerstore(t *testing.T) { + var ( + l = test.NewTestLogger(debug) + key = test.DecodeKey(1) + ) + + t.Run("it should creates new store", func(t *testing.T) { + ps, err := NewStore(StoreParams{ + Key: key, + Logger: l, + Addr: testMulatiAddress(t), + }) + require.NoError(t, err) + require.NotNil(t, ps) + + maddr, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/4000") + require.NoError(t, err) + + expect := crypto.MarshalPublicKey(&key.PublicKey) + + id, err := ps.AddPeer(maddr, &key.PublicKey, key) + require.NoError(t, err) + + pub, err := ps.GetPublicKey(id) + require.NoError(t, err) + + actual := crypto.MarshalPublicKey(pub) + require.Equal(t, expect, actual) + + addr1, err := ps.GetAddr(id) + require.NoError(t, err) + require.True(t, maddr.Equal(addr1)) + + ps.DeletePeer(id) + addr1, err = ps.GetAddr(id) + require.Nil(t, addr1) + require.Error(t, err) + }) + + t.Run("it should creates new store based on netmap", func(t *testing.T) { + var nm = createNetworkMap(t) + + ps, err := NewStore(StoreParams{ + Key: key, + Logger: l, + Addr: testMulatiAddress(t), + }) + require.NoError(t, err) + require.NotNil(t, ps) + + err = ps.Update(nm) + require.NoError(t, err) + + expect := nm.Items()[0].PubKey + + id := IDFromBinary(expect) + + addr, err := ps.GetAddr(id) + require.NoError(t, err) + require.Equal(t, nm.Items()[0].Address, addr.String()) + + pub, err := ps.GetPublicKey(id) + require.NoError(t, err) + + actual := crypto.MarshalPublicKey(pub) + require.Equal(t, expect, actual) + }) + + t.Run("multiple store's", func(t *testing.T) { + var ( + count = 10 + items = make([]Store, 0, count) + + data = []byte("Hello world") + peers = make([]Peer, 0, count) + signs = make([]*testSign, 0, count) + ) + + for i := 0; i < count; i++ { + key := test.DecodeKey(i) + addr, err := multiaddr.NewMultiaddr("/ip4/0.0.0.0/tcp/0") + require.NoError(t, err) + + peers = append(peers, NewLocalPeer(addr, key)) + } + + for i := 0; i < count; i++ { + key, err := peers[i].PrivateKey() + require.NoError(t, err) + + store, err := NewStore(StoreParams{ + Addr: peers[i].Address(), + Key: key, + Logger: zap.L(), + }) + require.NoError(t, err) + + items = append(items, store) + + hash, err := store.Sign(data) + require.NoError(t, err) + + sign := &testSign{ + ID: peers[i].ID(), + Sign: hash, + } + signs = append(signs, sign) + l.Info("add peer", + zap.Stringer("id", peers[i].ID())) + } + + for i := 0; i < count; i++ { + signature, err := items[i].Sign(data) + require.NoError(t, err) + + // check the newly generated signature + err = items[i].Verify(peers[i].ID(), data, signature) + require.NoError(t, err) + + for j := 0; j < count; j++ { + // check previously generated signature + addr, pub := peers[j].Address(), peers[j].PublicKey() + key, err := peers[j].PrivateKey() + require.NoError(t, err) + + _, err = items[i].AddPeer(addr, pub, key) + require.NoError(t, err) + + err = items[i].Verify(signs[j].ID, data, signs[j].Sign) + require.NoError(t, err) + } + } + }) + + t.Run("Get self address", func(t *testing.T) { + addr := testMulatiAddress(t) + + ps, err := NewStore(StoreParams{ + Key: key, + Logger: l, + Addr: addr, + }) + require.NoError(t, err) + require.NotNil(t, ps) + + selfAddr, err := ps.GetAddr(ps.SelfID()) + require.NoError(t, err) + require.Equal(t, selfAddr, addr) + }) + + t.Run("Get ID for multi address", func(t *testing.T) { + addr := testMulatiAddress(t) + + ps, err := NewStore(StoreParams{ + Key: key, + Logger: l, + Addr: addr, + }) + require.NoError(t, err) + require.NotNil(t, ps) + + maddr, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/4000") + require.NoError(t, err) + + id, err := ps.AddPeer(maddr, &key.PublicKey, key) + require.NoError(t, err) + + res, err := ps.AddressID(maddr) + require.NoError(t, err) + require.True(t, id.Equal(res)) + + maddr2, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/4001") + require.NoError(t, err) + + res, err = ps.AddressID(maddr2) + require.EqualError(t, err, errPeerNotFound.Error()) + }) +} diff --git a/lib/peers/storage.go b/lib/peers/storage.go new file mode 100644 index 000000000..506464941 --- /dev/null +++ b/lib/peers/storage.go @@ -0,0 +1,296 @@ +package peers + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/sha256" + "sync" + + "github.com/multiformats/go-multiaddr" + "github.com/multiformats/go-multihash" + "github.com/nspcc-dev/hrw" + crypto "github.com/nspcc-dev/neofs-crypto" + "github.com/nspcc-dev/neofs-node/lib/netmap" + "github.com/pkg/errors" + "github.com/spaolacci/murmur3" + "go.uber.org/zap" +) + +type ( + // Peer is value, that stores in Store storage + Peer interface { + ID() ID + Address() multiaddr.Multiaddr + PublicKey() *ecdsa.PublicKey + PrivateKey() (*ecdsa.PrivateKey, error) + SetPrivateKey(*ecdsa.PrivateKey) error + + // TODO implement marshal/unmarshal binary. + // Not sure that this method need for now, + // that's why let's leave it on future + // encoding.BinaryMarshaler + // encoding.BinaryUnmarshaler + } + + peer struct { + id ID + pub *ecdsa.PublicKey + key *ecdsa.PrivateKey + addr multiaddr.Multiaddr + } + + // ID is a type of peer identification + ID string + + storage struct { + log *zap.Logger + + mu *sync.RWMutex + items map[ID]Peer + } + + // PeerFilter is a Peer filtering function. + PeerFilter func(Peer) bool + + // Storage is storage interface for Store + Storage interface { + Get(ID) (Peer, error) + Set(ID, Peer) error + Has(ID) bool + Rem(ID) error + List(ID, int64, int) ([]ID, error) + Filter(PeerFilter) []ID + Update(*netmap.NetMap) error + } +) + +const defaultStoreCapacity = 100 + +var ( + errUnknownPeer = errors.New("unknown peer") + errBadPublicKey = errors.New("bad public key") +) + +var errNilNetMap = errors.New("netmap is nil") + +// Hash method used in HRW-library. +func (i ID) Hash() uint64 { + return murmur3.Sum64(i.Bytes()) +} + +// NewLocalPeer creates new peer instance. +func NewLocalPeer(addr multiaddr.Multiaddr, key *ecdsa.PrivateKey) Peer { + pub := &key.PublicKey + + return &peer{ + id: IDFromPublicKey(pub), + pub: pub, + key: key, + addr: addr, + } +} + +// NewPeer creates new peer instance. +func NewPeer(addr multiaddr.Multiaddr, pub *ecdsa.PublicKey, key *ecdsa.PrivateKey) Peer { + return &peer{ + id: IDFromPublicKey(pub), + pub: pub, + key: key, + addr: addr, + } +} + +func (p *peer) SetPrivateKey(key *ecdsa.PrivateKey) error { + if key == nil || key.Curve != elliptic.P256() { + return crypto.ErrEmptyPrivateKey + } + + p.key = key + + return nil +} + +// ID of peer. +func (p peer) ID() ID { + return p.id +} + +// Address of peer. +func (p peer) Address() multiaddr.Multiaddr { + return p.addr +} + +// PublicKey returns copy of peer public key. +func (p peer) PublicKey() *ecdsa.PublicKey { + return p.pub +} + +func (p peer) PrivateKey() (*ecdsa.PrivateKey, error) { + if p.key == nil { + return nil, crypto.ErrEmptyPrivateKey + } + + return p.key, nil +} + +// String returns string representation of PeerID. +func (i ID) String() string { + return string(i) +} + +// -- -- // + +// Bytes returns bytes representation of PeerID. +func (i ID) Bytes() []byte { + return []byte(i) +} + +// Equal checks that both id's are identical. +func (i ID) Equal(id ID) bool { + return i == id +} + +// IDFromPublicKey returns peer ID for host with given public key. +func IDFromPublicKey(pk *ecdsa.PublicKey) ID { + if pk == nil { + return "" + } + + return IDFromBinary(crypto.MarshalPublicKey(pk)) +} + +// IDFromBinary returns peer ID for host with given slice of byte. +func IDFromBinary(b []byte) ID { + bytes := sha256.Sum256(b) + hash, _ := multihash.Encode(bytes[:], multihash.IDENTITY) + ident := multihash.Multihash(hash) + + return ID(ident.B58String()) +} + +// NewSimpleStorage is implementation over map. +func NewSimpleStorage(capacity int, l *zap.Logger) Storage { + if capacity <= 0 { + capacity = defaultStoreCapacity + } + + return &storage{ + log: l, + mu: new(sync.RWMutex), + items: make(map[ID]Peer, capacity), + } +} + +// List peers that which are distributed by hrw(seed). +func (s *storage) List(id ID, seed int64, count int) ([]ID, error) { + s.mu.RLock() + items := make([]ID, 0, len(s.items)) + + for key := range s.items { + // ignore ourselves + if id.Equal(key) { + continue + } + + items = append(items, key) + } + s.mu.RUnlock() + + // distribute keys by hrw(seed) + hrw.SortSliceByValue(items, + uint64(seed)) + + return items[:count], nil +} + +// Get peer by ID. +func (s *storage) Get(id ID) (Peer, error) { + s.mu.RLock() + p, ok := s.items[id] + s.mu.RUnlock() + + if ok { + return p, nil + } + + return nil, errors.Wrapf(errUnknownPeer, "peer(%s)", id) +} + +// Set peer by id. +func (s *storage) Set(id ID, p Peer) error { + s.mu.Lock() + s.items[id] = p + s.mu.Unlock() + + return nil +} + +// Has checks peer exists by id. +func (s *storage) Has(id ID) bool { + s.mu.RLock() + _, ok := s.items[id] + s.mu.RUnlock() + + return ok +} + +// Rem peer by id. +func (s *storage) Rem(id ID) error { + s.mu.Lock() + delete(s.items, id) + s.mu.Unlock() + + return nil +} + +// Update storage by network map. +func (s *storage) Update(nm *netmap.NetMap) error { + s.mu.Lock() + defer s.mu.Unlock() + + list := nm.ItemsCopy() + if len(list) == 0 { + return errNilNetMap + } + + items := make(map[ID]Peer, len(s.items)) + + for i := range list { + addr, err := multiaddr.NewMultiaddr(list[i].Address) + if err != nil { + return errors.Wrapf(err, "address=`%s`", list[i].Address) + } + + pk := crypto.UnmarshalPublicKey(list[i].PubKey) + if pk == nil && list[i].PubKey != nil { + return errors.Wrapf(errBadPublicKey, "pubkey=`%x`", list[i].PubKey) + } + + id := IDFromPublicKey(pk) + if pv, ok := s.items[id]; ok { + if pv.Address() != nil && pv.Address().Equal(addr) { + items[id] = pv + continue + } + } + + items[id] = NewPeer(addr, pk, nil) + } + + s.items = items + + return nil +} + +func (s *storage) Filter(filter PeerFilter) (res []ID) { + s.mu.RLock() + defer s.mu.RUnlock() + + for id, peer := range s.items { + if filter(peer) { + res = append(res, id) + } + } + + return +} diff --git a/lib/peers/worker.go b/lib/peers/worker.go new file mode 100644 index 000000000..1fac37f06 --- /dev/null +++ b/lib/peers/worker.go @@ -0,0 +1,67 @@ +package peers + +import ( + "context" + "time" + + "go.uber.org/zap" + "google.golang.org/grpc/connectivity" +) + +func (s *iface) Job(ctx context.Context) { + var ( + tick = time.NewTimer(s.tick) + metrics = time.NewTimer(s.metricsTimeout) + ) + +loop: + for { + select { + case <-ctx.Done(): + break loop + case <-metrics.C: + var items = make(map[connectivity.State]float64) + s.grpc.globalMutex.Lock() + for _, item := range s.grpc.connBook { + if item.conn != nil { + items[item.conn.GetState()]++ + } + } + s.grpc.globalMutex.Unlock() + + updateMetrics(items) + + metrics.Reset(s.metricsTimeout) + case <-tick.C: + var count int + + s.grpc.globalMutex.Lock() + for addr, item := range s.grpc.connBook { + if item.conn == nil || isGRPCClosed(item.conn) || time.Since(item.used) > s.idle { + if err := s.removeGRPCConnection(addr); err != nil { + s.log.Error("could not close connection", + zap.String("address", addr), + zap.String("target", item.conn.Target()), + zap.Stringer("idle", time.Since(item.used)), + zap.Error(err)) + continue + } + + count++ + } else { + s.log.Debug("ignore connection", + zap.String("address", addr), + zap.Stringer("idle", time.Since(item.used))) + } + } + s.grpc.globalMutex.Unlock() + + s.log.Debug("cleanup connections done", + zap.Int("closed", count)) + + tick.Reset(s.tick) + } + } + + tick.Stop() +} diff --git a/lib/placement/graph.go b/lib/placement/graph.go new file mode 100644 index 000000000..02efc3de8 --- /dev/null +++ b/lib/placement/graph.go @@ -0,0 +1,178 @@ +package placement + +import ( + "github.com/gogo/protobuf/proto" + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-api-go/bootstrap" + "github.com/nspcc-dev/neofs-node/lib/netmap" + "github.com/pkg/errors" +) + +// method returns copy of current Graph. +func (g *graph) copy() *graph { + var ( + place *netmap.PlacementRule + roots = make([]*netmap.Bucket, 0, len(g.roots)) + items = make([]bootstrap.NodeInfo, len(g.items)) + ) + + copy(items, g.items) + + for _, root := range g.roots { + var r *netmap.Bucket + + if root != nil { + tmp := root.Copy() + r = &tmp + } + + roots = append(roots, r) + } + + place = proto.Clone(g.place).(*netmap.PlacementRule) + + return &graph{ + roots: roots, + items: items, + place: place, + } +} + +func (g *graph) Exclude(list []multiaddr.Multiaddr) Graph { + if len(list) == 0 { + return g + } + + var ( + sub = g.copy() + ignore = make([]uint32, 0, len(list)) + ) + + for i := range list { + for j := range sub.items { + if list[i].String() == sub.items[j].Address { + ignore = append(ignore, uint32(j)) + } + } + } + + return sub.Filter(func(group netmap.SFGroup, bucket *netmap.Bucket) *netmap.Bucket { + group.Exclude = ignore + return bucket.GetMaxSelection(group) + }) +} + +// Filter container by rules. +func (g *graph) Filter(rule FilterRule) Graph { + if rule == nil { + return g + } + + var ( + sub = g.copy() + roots = make([]*netmap.Bucket, len(g.roots)) + items = make([]bootstrap.NodeInfo, len(g.items)) + ) + + for i := range g.place.SFGroups { + if g.roots[i] == nil { + continue + } + + root := g.roots[i].Copy() + roots[i] = rule(g.place.SFGroups[i], &root) + } + + copy(items, g.items) + + return &graph{ + roots: roots, + items: items, + place: sub.place, + } +} + +// NodeList returns slice of MultiAddresses for current graph. +func (g *graph) NodeList() ([]multiaddr.Multiaddr, error) { + var ( + ln = uint32(len(g.items)) + result = make([]multiaddr.Multiaddr, 0, ln) + items = make([]bootstrap.NodeInfo, len(g.items)) + ) + + if ln == 0 { + return nil, ErrEmptyNodes + } + + copy(items, g.items) + + for _, root := range g.roots { + if root == nil { + continue + } + + list := root.Nodelist() + if len(list) == 0 { + continue + } + + for _, idx := range list { + if ln <= idx.N { + return nil, errors.Errorf("could not find index(%d) in list(size: %d)", ln, idx) + } + + addr, err := multiaddr.NewMultiaddr(items[idx.N].Address) + if err != nil { + return nil, errors.Wrapf(err, "could not convert multi address(%s)", g.items[idx.N].Address) + } + + result = append(result, addr) + } + } + + if len(result) == 0 { + return nil, ErrEmptyNodes + } + + return result, nil +} + +// NodeInfo returns slice of NodeInfo for current graph. +func (g *graph) NodeInfo() ([]bootstrap.NodeInfo, error) { + var ( + ln = uint32(len(g.items)) + result = make([]bootstrap.NodeInfo, 0, ln) + items = make([]bootstrap.NodeInfo, len(g.items)) + ) + + if ln == 0 { + return nil, ErrEmptyNodes + } + + copy(items, g.items) + + for _, root := range g.roots { + if root == nil { + continue + } + + list := root.Nodelist() + if len(list) == 0 { + continue + } + + for _, idx := range list { + if ln <= idx.N { + return nil, errors.Errorf("could not find index(%d) in list(size: %d)", ln, idx) + } + + result = append(result, items[idx.N]) + } + } + + if len(result) == 0 { + return nil, ErrEmptyNodes + } + + return result, nil +} diff --git a/lib/placement/interface.go b/lib/placement/interface.go new file mode 100644 index 000000000..5c428f17c --- /dev/null +++ b/lib/placement/interface.go @@ -0,0 +1,113 @@ +package placement + +import ( + "context" + + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-api-go/bootstrap" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-node/lib/container" + "github.com/nspcc-dev/neofs-node/lib/netmap" + "github.com/nspcc-dev/neofs-node/lib/peers" + "go.uber.org/atomic" + "go.uber.org/zap" +) + +type ( + // Component is interface of placement service + Component interface { + // TODO leave for feature request + + NetworkState() *bootstrap.SpreadMap + Neighbours(seed, epoch uint64, full bool) []peers.ID + Update(epoch uint64, nm *netmap.NetMap) error + Query(ctx context.Context, opts ...QueryOption) (Graph, error) + } + + // QueryOptions for query request + QueryOptions struct { + CID refs.CID + Previous int + Excludes []multiaddr.Multiaddr + } + + // QueryOption settings closure + QueryOption func(*QueryOptions) + + // FilterRule bucket callback handler + FilterRule func(netmap.SFGroup, *netmap.Bucket) *netmap.Bucket + + // Graph is result of request to Placement-component + Graph interface { + Filter(rule FilterRule) Graph + Exclude(list []multiaddr.Multiaddr) Graph + NodeList() ([]multiaddr.Multiaddr, error) + NodeInfo() ([]bootstrap.NodeInfo, error) + } + + // Key to fetch node-list + Key []byte + + // Params to create Placement component + Params struct { + Log *zap.Logger + Netmap *netmap.NetMap + Peerstore peers.Store + Fetcher container.Storage + ChronologyDuration uint64 // storing number of past epochs states + } + + networkState struct { + nm *netmap.NetMap + epoch uint64 + } + + // placement is implementation of placement.Component + placement struct { + log *zap.Logger + cnr container.Storage + + chronologyDur uint64 + nmStore *netMapStore + + ps peers.Store + + healthy *atomic.Bool + } + + // graph is implementation of placement.Graph + graph struct { + roots []*netmap.Bucket + items []bootstrap.NodeInfo + place *netmap.PlacementRule + } +) + +// Copy network state. +func (ns networkState) Copy() *networkState { + return &networkState{ + nm: ns.nm.Copy(), + epoch: ns.epoch, + } +} + +// ExcludeNodes to ignore some nodes. +func ExcludeNodes(list []multiaddr.Multiaddr) QueryOption { + return func(opt *QueryOptions) { + opt.Excludes = list + } +} + +// ContainerID set by Key. +func ContainerID(cid refs.CID) QueryOption { + return func(opt *QueryOptions) { + opt.CID = cid + } +} + +// UsePreviousNetmap for query. +func UsePreviousNetmap(diff int) QueryOption { + return func(opt *QueryOptions) { + opt.Previous = diff + } +} diff --git a/lib/placement/neighbours.go b/lib/placement/neighbours.go new file mode 100644 index 000000000..65154ee10 --- /dev/null +++ b/lib/placement/neighbours.go @@ -0,0 +1,69 @@ +package placement + +import ( + "math" + + "github.com/nspcc-dev/hrw" + "github.com/nspcc-dev/neofs-node/lib/netmap" + "github.com/nspcc-dev/neofs-node/lib/peers" + "go.uber.org/zap" +) + +func calculateCount(n int) int { + if n < 30 { + return n + } + + return int(1.4*math.Log(float64(n))+9) + 1 +} + +// Neighbours peers that which are distributed by hrw(seed) +// If full flag is set, all set of peers returns. +// Otherwise, result size depends on calculateCount function. +func (p *placement) Neighbours(seed, epoch uint64, full bool) []peers.ID { + nm := p.nmStore.get(epoch) + if nm == nil { + p.log.Error("could not receive network state", + zap.Uint64("epoch", epoch), + ) + + return nil + } + + rPeers := p.listPeers(nm.ItemsCopy(), !full) + + hrw.SortSliceByValue(rPeers, seed) + + if full { + return rPeers + } + + var ( + ln = len(rPeers) + cut = calculateCount(ln) + ) + + if cut > ln { + cut = ln + } + + return rPeers[:cut] +} + +func (p *placement) listPeers(nodes netmap.Nodes, exclSelf bool) []peers.ID { + var ( + id = p.ps.SelfID() + result = make([]peers.ID, 0, len(nodes)) + ) + + for i := range nodes { + key := peers.IDFromBinary(nodes[i].PubKey) + if exclSelf && id.Equal(key) { + continue + } + + result = append(result, key) + } + + return result +} diff --git a/lib/placement/neighbours_test.go b/lib/placement/neighbours_test.go new file mode 100644 index 000000000..8f9e43ac9 --- /dev/null +++ b/lib/placement/neighbours_test.go @@ -0,0 +1,177 @@ +package placement + +import ( + "crypto/ecdsa" + "strconv" + "testing" + + "bou.ke/monkey" + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-api-go/bootstrap" + crypto "github.com/nspcc-dev/neofs-crypto" + "github.com/nspcc-dev/neofs-node/lib/netmap" + "github.com/nspcc-dev/neofs-node/lib/peers" + "github.com/nspcc-dev/neofs-node/lib/test" + "github.com/stretchr/testify/require" +) + +func testAddress(t *testing.T) multiaddr.Multiaddr { + addr, err := multiaddr.NewMultiaddr("/ip4/0.0.0.0/tcp/0") + require.NoError(t, err) + return addr +} + +// -- -- // + +func testPeerstore(t *testing.T) peers.Store { + p, err := peers.NewStore(peers.StoreParams{ + Key: test.DecodeKey(-1), + Logger: test.NewTestLogger(false), + Addr: testAddress(t), + }) + require.NoError(t, err) + + return p +} + +const address = "/ip4/0.0.0.0/tcp/0/p2p/" + +func TestPlacement_Neighbours(t *testing.T) { + t.Run("Placement component NPE fix test", func(t *testing.T) { + nodes := []bootstrap.NodeInfo{ + {Address: address + idFromString(t, "USA1"), Options: []string{"/Location:Europe/Country:USA/City:NewYork"}}, + {Address: address + idFromString(t, "ITL1"), Options: []string{"/Location:Europe/Country:Italy/City:Rome"}}, + {Address: address + idFromString(t, "RUS1"), Options: []string{"/Location:Europe/Country:Russia/City:SPB"}}, + } + + ps := testPeerstore(t) + nm := testNetmap(t, nodes) + + p := New(Params{ + Log: test.NewTestLogger(false), + Peerstore: ps, + }) + + require.NotPanics(t, func() { + require.NoError(t, p.Update(1, nm)) + }) + }) + + t.Run("Placement Neighbours TestSuite", func(t *testing.T) { + keys := []*ecdsa.PrivateKey{ + test.DecodeKey(0), + test.DecodeKey(1), + test.DecodeKey(2), + } + nodes := []bootstrap.NodeInfo{ + { + Address: address + idFromString(t, "USA1"), + PubKey: crypto.MarshalPublicKey(&keys[0].PublicKey), + Options: []string{"/Location:Europe/Country:USA/City:NewYork"}, + }, + { + Address: address + idFromString(t, "ITL1"), + PubKey: crypto.MarshalPublicKey(&keys[1].PublicKey), + Options: []string{"/Location:Europe/Country:Italy/City:Rome"}, + }, + { + Address: address + idFromString(t, "RUS1"), + PubKey: crypto.MarshalPublicKey(&keys[2].PublicKey), + Options: []string{"/Location:Europe/Country:Russia/City:SPB"}, + }, + } + + ps := testPeerstore(t) + nm := testNetmap(t, nodes) + + p := New(Params{ + Log: test.NewTestLogger(false), + Netmap: nm, + Peerstore: ps, + }) + + t.Run("check, that items have expected length (< 30)", func(t *testing.T) { + items := p.Neighbours(1, 0, false) + require.Len(t, items, len(nm.ItemsCopy())) + }) + + t.Run("check, that items have expected length ( > 30)", func(t *testing.T) { + opts := []string{"/Location:Europe/Country:Russia/City:SPB"} + + key, err := ps.GetPublicKey(ps.SelfID()) + require.NoError(t, err) + + keyBytes := crypto.MarshalPublicKey(key) + + addr := address + idFromString(t, "NewRUS") + err = nm.Add(addr, keyBytes, 0, opts...) + require.NoError(t, err) + + for i := 0; i < 30; i++ { + addr := address + idFromString(t, "RUS"+strconv.Itoa(i+2)) + key := test.DecodeKey(i + len(nodes)) + pub := crypto.MarshalPublicKey(&key.PublicKey) + err := nm.Add(addr, pub, 0, opts...) + require.NoError(t, err) + } + + ln := calculateCount(len(nm.ItemsCopy())) + items := p.Neighbours(1, 0, false) + require.Len(t, items, ln) + }) + + t.Run("check, that items is shuffled", func(t *testing.T) { + var cur, pre []peers.ID + for i := uint64(0); i < 10; i++ { + cur = p.Neighbours(i, 0, false) + require.NotEqual(t, pre, cur) + + pre = cur + } + }) + + t.Run("check, that we can request more items that we have", func(t *testing.T) { + require.NotPanics(t, func() { + monkey.Patch(calculateCount, func(i int) int { return i + 1 }) + defer monkey.Unpatch(calculateCount) + + p.Neighbours(1, 0, false) + }) + }) + }) + + t.Run("unknown epoch", func(t *testing.T) { + s := &placement{ + log: test.NewTestLogger(false), + nmStore: newNetMapStore(), + ps: testPeerstore(t), + } + + require.Empty(t, s.Neighbours(1, 1, false)) + }) + + t.Run("neighbors w/ set full flag", func(t *testing.T) { + var ( + n = 3 + e uint64 = 5 + nm = netmap.NewNetmap() + nms = newNetMapStore() + ) + + for i := 0; i < n; i++ { + require.NoError(t, nm.Add("node"+strconv.Itoa(i), []byte{1}, 1)) + } + + nms.put(e, nm) + + s := &placement{ + log: test.NewTestLogger(false), + nmStore: nms, + ps: testPeerstore(t), + } + + neighbors := s.Neighbours(1, e, true) + + require.Len(t, neighbors, n) + }) +} diff --git a/lib/placement/placement.go b/lib/placement/placement.go new file mode 100644 index 000000000..dd7e8dad9 --- /dev/null +++ b/lib/placement/placement.go @@ -0,0 +1,257 @@ +package placement + +import ( + "bytes" + "context" + "strings" + + "github.com/nspcc-dev/neofs-api-go/bootstrap" + "github.com/nspcc-dev/neofs-api-go/refs" + crypto "github.com/nspcc-dev/neofs-crypto" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/container" + "github.com/nspcc-dev/neofs-node/lib/netmap" + "github.com/nspcc-dev/neofs-node/lib/peers" + "github.com/pkg/errors" + "go.uber.org/atomic" + "go.uber.org/zap" +) + +const defaultChronologyDuration = 1 + +var ( + // ErrEmptyNodes when container doesn't contains any nodes + ErrEmptyNodes = internal.Error("container doesn't contains nodes") + + // ErrNodesBucketOmitted when in PlacementRule, Selector has not NodesBucket + ErrNodesBucketOmitted = internal.Error("nodes-bucket is omitted") + + // ErrEmptyContainer when GetMaxSelection or GetSelection returns empty result + ErrEmptyContainer = internal.Error("could not get container, it's empty") +) + +var errNilNetMap = errors.New("network map is nil") + +// New is a placement component constructor. +func New(p Params) Component { + if p.Netmap == nil { + p.Netmap = netmap.NewNetmap() + } + + if p.ChronologyDuration <= 0 { + p.ChronologyDuration = defaultChronologyDuration + } + + pl := &placement{ + log: p.Log, + cnr: p.Fetcher, + + chronologyDur: p.ChronologyDuration, + nmStore: newNetMapStore(), + + ps: p.Peerstore, + + healthy: atomic.NewBool(false), + } + + pl.nmStore.put(0, p.Netmap) + + return pl +} + +func (p *placement) Name() string { return "PresentInNetwork" } +func (p *placement) Healthy() bool { return p.healthy.Load() } + +type strNodes []bootstrap.NodeInfo + +func (n strNodes) String() string { + list := make([]string, 0, len(n)) + for i := range n { + list = append(list, n[i].Address) + } + + return `[` + strings.Join(list, ",") + `]` +} + +func (p *placement) Update(epoch uint64, nm *netmap.NetMap) error { + cnm := p.nmStore.get(p.nmStore.epoch()) + if cnm == nil { + return errNilNetMap + } + + cp := cnm.Copy() + cp.Update(nm) + + items := nm.ItemsCopy() + + p.log.Debug("update to new netmap", + zap.Stringer("nodes", strNodes(items))) + + p.log.Debug("update peerstore") + + if err := p.ps.Update(cp); err != nil { + return err + } + + var ( + pubkeyBinary []byte + healthy bool + ) + + // storage nodes must be presented in network map to be healthy + pubkey, err := p.ps.GetPublicKey(p.ps.SelfID()) + if err != nil { + p.log.Error("can't get my own public key") + } + + pubkeyBinary = crypto.MarshalPublicKey(pubkey) + + for i := range items { + if bytes.Equal(pubkeyBinary, items[i].GetPubKey()) { + healthy = true + } + + p.log.Debug("new peer for dht", + zap.Stringer("peer", peers.IDFromBinary(items[i].GetPubKey())), + zap.String("addr", items[i].GetAddress())) + } + + // make copy to previous + p.log.Debug("update previous netmap") + + if epoch > p.chronologyDur { + p.nmStore.trim(epoch - p.chronologyDur) + } + + p.log.Debug("update current netmap") + p.nmStore.put(epoch, cp) + + p.log.Debug("update current epoch") + + p.healthy.Store(healthy) + + return nil +} + +// NetworkState returns copy of current NetworkMap. +func (p *placement) NetworkState() *bootstrap.SpreadMap { + ns := p.networkState(p.nmStore.epoch()) + if ns == nil { + ns = &networkState{nm: netmap.NewNetmap()} + } + + return &bootstrap.SpreadMap{ + Epoch: ns.epoch, + NetMap: ns.nm.Items(), + } +} + +func (p *placement) networkState(epoch uint64) *networkState { + nm := p.nmStore.get(epoch) + if nm == nil { + return nil + } + + return &networkState{ + nm: nm.Copy(), + epoch: epoch, + } +} + +// Query returns graph based on container. +func (p *placement) Query(ctx context.Context, opts ...QueryOption) (Graph, error) { + var ( + items []bootstrap.NodeInfo + query QueryOptions + ignore []uint32 + ) + + for _, opt := range opts { + opt(&query) + } + + epoch := p.nmStore.epoch() + if query.Previous > 0 { + epoch -= uint64(query.Previous) + } + + state := p.networkState(epoch) + if state == nil { + return nil, errors.Errorf("could not get network state for epoch #%d", epoch) + } + + items = state.nm.Items() + + gp := container.GetParams{} + gp.SetContext(ctx) + gp.SetCID(query.CID) + + getRes, err := p.cnr.GetContainer(gp) + if err != nil { + return nil, errors.Wrap(err, "could not fetch container") + } + + for i := range query.Excludes { + for j := range items { + if query.Excludes[i].String() == items[j].Address { + ignore = append(ignore, uint32(j)) + } + } + } + + rule := getRes.Container().GetRules() + + return ContainerGraph(state.nm, &rule, ignore, query.CID) +} + +// ContainerGraph applies the placement rules to network map and returns container graph. +func ContainerGraph(nm *netmap.NetMap, rule *netmap.PlacementRule, ignore []uint32, cid refs.CID) (Graph, error) { + root := nm.Root() + roots := make([]*netmap.Bucket, 0, len(rule.SFGroups)) + + for i := range rule.SFGroups { + rule.SFGroups[i].Exclude = ignore + if ln := len(rule.SFGroups[i].Selectors); ln <= 0 || + rule.SFGroups[i].Selectors[ln-1].Key != netmap.NodesBucket { + return nil, errors.Wrapf(ErrNodesBucketOmitted, "container (%s)", cid) + } + + bigSelectors := make([]netmap.Select, len(rule.SFGroups[i].Selectors)) + for j := range rule.SFGroups[i].Selectors { + bigSelectors[j] = netmap.Select{ + Key: rule.SFGroups[i].Selectors[j].Key, + Count: rule.SFGroups[i].Selectors[j].Count, + } + + if rule.ReplFactor > 1 && rule.SFGroups[i].Selectors[j].Key == netmap.NodesBucket { + bigSelectors[j].Count *= rule.ReplFactor + } + } + + sf := netmap.SFGroup{ + Selectors: bigSelectors, + Filters: rule.SFGroups[i].Filters, + Exclude: ignore, + } + + if tree := root.Copy().GetMaxSelection(sf); tree != nil { + // fetch graph for replication factor seeded by ContainerID + if tree = tree.GetSelection(bigSelectors, cid[:]); tree == nil { + return nil, errors.Wrapf(ErrEmptyContainer, "for container(%s) with repl-factor(%d)", + cid, rule.ReplFactor) + } + + roots = append(roots, tree) + + continue + } + + return nil, errors.Wrap(ErrEmptyContainer, "empty for bigSelector") + } + + return &graph{ + roots: roots, + items: nm.ItemsCopy(), + place: rule, + }, nil +} diff --git a/lib/placement/placement_test.go b/lib/placement/placement_test.go new file mode 100644 index 000000000..53ac8127a --- /dev/null +++ b/lib/placement/placement_test.go @@ -0,0 +1,407 @@ +package placement + +import ( + "context" + "sort" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/mr-tron/base58" + "github.com/multiformats/go-multiaddr" + "github.com/multiformats/go-multihash" + "github.com/nspcc-dev/neofs-api-go/bootstrap" + "github.com/nspcc-dev/neofs-api-go/container" + "github.com/nspcc-dev/neofs-api-go/refs" + libcnr "github.com/nspcc-dev/neofs-node/lib/container" + "github.com/nspcc-dev/neofs-node/lib/netmap" + "github.com/nspcc-dev/neofs-node/lib/peers" + "github.com/nspcc-dev/neofs-node/lib/test" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" +) + +type ( + fakeDHT struct { + } + + fakeContainerStorage struct { + libcnr.Storage + *sync.RWMutex + items map[refs.CID]*container.Container + } +) + +var ( + testDHTCapacity = 100 +) + +// -- -- // + +func testContainerStorage() *fakeContainerStorage { + return &fakeContainerStorage{ + RWMutex: new(sync.RWMutex), + items: make(map[refs.CID]*container.Container, testDHTCapacity), + } +} + +func (f *fakeContainerStorage) GetContainer(p libcnr.GetParams) (*libcnr.GetResult, error) { + f.RLock() + val, ok := f.items[p.CID()] + f.RUnlock() + + if !ok { + return nil, errors.New("value for requested key not found in DHT") + } + + res := new(libcnr.GetResult) + res.SetContainer(val) + + return res, nil +} + +func (f *fakeContainerStorage) Put(c *container.Container) error { + id, err := c.ID() + if err != nil { + return err + } + f.Lock() + f.items[id] = c + f.Unlock() + + return nil +} + +func (f *fakeDHT) UpdatePeers([]peers.ID) { + // do nothing +} + +func (f *fakeDHT) GetValue(ctx context.Context, key string) ([]byte, error) { + panic("implement me") +} + +func (f *fakeDHT) PutValue(ctx context.Context, key string, val []byte) error { + panic("implement me") +} + +func (f *fakeDHT) Get(ctx context.Context, key string) ([]byte, error) { + panic("implement me") +} + +func (f *fakeDHT) Put(ctx context.Context, key string, val []byte) error { + panic("implement me") +} + +// -- -- // + +func testNetmap(t *testing.T, nodes []bootstrap.NodeInfo) *netmap.NetMap { + nm := netmap.NewNetmap() + + for i := range nodes { + err := nm.Add(nodes[i].Address, nil, 0, nodes[i].Options...) + require.NoError(t, err) + } + + return nm +} + +// -- -- // + +func idFromString(t *testing.T, id string) string { + buf, err := multihash.Encode([]byte(id), multihash.ID) + require.NoError(t, err) + + return (multihash.Multihash(buf)).B58String() +} + +func idFromAddress(t *testing.T, addr multiaddr.Multiaddr) string { + id, err := addr.ValueForProtocol(multiaddr.P_P2P) + require.NoError(t, err) + + buf, err := base58.Decode(id) + require.NoError(t, err) + + hs, err := multihash.Decode(buf) + require.NoError(t, err) + + return string(hs.Digest) +} + +// -- -- // + +func TestPlacement(t *testing.T) { + multiaddr.SwapToP2pMultiaddrs() + testAddress := "/ip4/0.0.0.0/tcp/0/p2p/" + key := test.DecodeKey(-1) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + ids := map[string]struct{}{ + "GRM1": {}, "GRM2": {}, "GRM3": {}, "GRM4": {}, + "SPN1": {}, "SPN2": {}, "SPN3": {}, "SPN4": {}, + } + + nodes := []bootstrap.NodeInfo{ + {Address: testAddress + idFromString(t, "USA1"), Options: []string{"/Location:Europe/Country:USA/City:NewYork"}}, + {Address: testAddress + idFromString(t, "ITL1"), Options: []string{"/Location:Europe/Country:Italy/City:Rome"}}, + {Address: testAddress + idFromString(t, "RUS1"), Options: []string{"/Location:Europe/Country:Russia/City:SPB"}}, + } + + for id := range ids { + var opts []string + switch { + case strings.Contains(id, "GRM"): + opts = append(opts, "/Location:Europe/Country:Germany/City:"+id) + case strings.Contains(id, "SPN"): + opts = append(opts, "/Location:Europe/Country:Spain/City:"+id) + } + + for i := 0; i < 4; i++ { + id := id + strconv.Itoa(i) + + nodes = append(nodes, bootstrap.NodeInfo{ + Address: testAddress + idFromString(t, id), + Options: opts, + }) + } + } + + sort.Slice(nodes, func(i, j int) bool { + return strings.Compare(nodes[i].Address, nodes[j].Address) == -1 + }) + + nm := testNetmap(t, nodes) + + cnrStorage := testContainerStorage() + + p := New(Params{ + Log: test.NewTestLogger(false), + Netmap: netmap.NewNetmap(), + Peerstore: testPeerstore(t), + Fetcher: cnrStorage, + }) + + require.NoError(t, p.Update(1, nm)) + + oid, err := refs.NewObjectID() + require.NoError(t, err) + + // filter over oid + filter := func(group netmap.SFGroup, bucket *netmap.Bucket) *netmap.Bucket { + return bucket.GetSelection(group.Selectors, oid[:]) + } + + owner, err := refs.NewOwnerID(&key.PublicKey) + require.NoError(t, err) + res1, err := container.New(100, owner, 0, netmap.PlacementRule{ + ReplFactor: 2, + SFGroups: []netmap.SFGroup{ + { + Selectors: []netmap.Select{ + {Key: "Country", Count: 1}, + {Key: "City", Count: 2}, + {Key: netmap.NodesBucket, Count: 1}, + }, + Filters: []netmap.Filter{ + {Key: "Country", F: netmap.FilterIn("Germany", "Spain")}, + }, + }, + }, + }) + require.NoError(t, err) + + err = cnrStorage.Put(res1) + require.NoError(t, err) + + res2, err := container.New(100, owner, 0, netmap.PlacementRule{ + ReplFactor: 2, + SFGroups: []netmap.SFGroup{ + { + Selectors: []netmap.Select{ + {Key: "Country", Count: 1}, + {Key: netmap.NodesBucket, Count: 10}, + }, + Filters: []netmap.Filter{ + {Key: "Country", F: netmap.FilterIn("Germany", "Spain")}, + }, + }, + }, + }) + require.NoError(t, err) + + err = cnrStorage.Put(res2) + require.NoError(t, err) + + res3, err := container.New(100, owner, 0, netmap.PlacementRule{ + ReplFactor: 2, + SFGroups: []netmap.SFGroup{ + { + Selectors: []netmap.Select{ + {Key: "Country", Count: 1}, + }, + Filters: []netmap.Filter{ + {Key: "Country", F: netmap.FilterIn("Germany", "Spain")}, + }, + }, + }, + }) + require.NoError(t, err) + + err = cnrStorage.Put(res3) + require.NoError(t, err) + + t.Run("Should fail on empty container", func(t *testing.T) { + id, err := res2.ID() + require.NoError(t, err) + _, err = p.Query(ctx, ContainerID(id)) + require.EqualError(t, errors.Cause(err), ErrEmptyContainer.Error()) + }) + + t.Run("Should fail on Nodes Bucket is omitted in container", func(t *testing.T) { + id, err := res3.ID() + require.NoError(t, err) + _, err = p.Query(ctx, ContainerID(id)) + require.EqualError(t, errors.Cause(err), ErrNodesBucketOmitted.Error()) + }) + + t.Run("Should fail on unknown container (dht error)", func(t *testing.T) { + _, err = p.Query(ctx, ContainerID(refs.CID{5})) + require.Error(t, err) + }) + + id1, err := res1.ID() + require.NoError(t, err) + + g, err := p.Query(ctx, ContainerID(id1)) + require.NoError(t, err) + + t.Run("Should return error on empty items", func(t *testing.T) { + _, err = g.Filter(func(netmap.SFGroup, *netmap.Bucket) *netmap.Bucket { + return &netmap.Bucket{} + }).NodeList() + require.EqualError(t, err, ErrEmptyNodes.Error()) + }) + + t.Run("Should ignore some nodes", func(t *testing.T) { + g1, err := p.Query(ctx, ContainerID(id1)) + require.NoError(t, err) + + expect, err := g1. + Filter(filter). + NodeList() + require.NoError(t, err) + + g2, err := p.Query(ctx, ContainerID(id1)) + require.NoError(t, err) + + actual, err := g2. + Filter(filter). + NodeList() + require.NoError(t, err) + + require.Equal(t, expect, actual) + + g3, err := p.Query(ctx, ContainerID(id1)) + require.NoError(t, err) + + actual, err = g3. + Exclude(expect). + Filter(filter). + NodeList() + require.NoError(t, err) + + for _, item := range expect { + require.NotContains(t, actual, item) + } + + g4, err := p.Query(ctx, + ContainerID(id1), + ExcludeNodes(expect)) + require.NoError(t, err) + + actual, err = g4. + Filter(filter). + NodeList() + require.NoError(t, err) + + for _, item := range expect { + require.NotContains(t, actual, item) + } + }) + + t.Run("Should return error on nil Buckets", func(t *testing.T) { + _, err = g.Filter(func(netmap.SFGroup, *netmap.Bucket) *netmap.Bucket { + return nil + }).NodeList() + require.EqualError(t, err, ErrEmptyNodes.Error()) + }) + + t.Run("Should return error on empty NodeInfo's", func(t *testing.T) { + cp := g.Filter(func(netmap.SFGroup, *netmap.Bucket) *netmap.Bucket { + return nil + }) + + cp.(*graph).items = nil + + _, err := cp.NodeList() + require.EqualError(t, err, ErrEmptyNodes.Error()) + }) + + t.Run("Should return error on unknown items", func(t *testing.T) { + cp := g.Filter(func(_ netmap.SFGroup, b *netmap.Bucket) *netmap.Bucket { + return b + }) + + cp.(*graph).items = cp.(*graph).items[:5] + + _, err := cp.NodeList() + require.Error(t, err) + }) + + t.Run("Should return error on bad items", func(t *testing.T) { + cp := g.Filter(func(_ netmap.SFGroup, b *netmap.Bucket) *netmap.Bucket { + return b + }) + + for i := range cp.(*graph).items { + cp.(*graph).items[i].Address = "BadAddress" + } + + _, err := cp.NodeList() + require.EqualError(t, errors.Cause(err), "failed to parse multiaddr \"BadAddress\": must begin with /") + }) + + list, err := g. + Filter(filter). + // must return same graph on empty filter + Filter(nil). + NodeList() + require.NoError(t, err) + + // 1 Country, 2 Cities, 1 Node = 2 Nodes + require.Len(t, list, 2) + for _, item := range list { + id := idFromAddress(t, item) + require.Contains(t, ids, id[:4]) // exclude our postfix (0-4) + } +} + +func TestContainerGraph(t *testing.T) { + t.Run("selectors index out-of-range", func(t *testing.T) { + rule := new(netmap.PlacementRule) + + rule.SFGroups = append(rule.SFGroups, netmap.SFGroup{}) + + require.NotPanics(t, func() { + _, _ = ContainerGraph( + netmap.NewNetmap(), + rule, + nil, + refs.CID{}, + ) + }) + }) +} diff --git a/lib/placement/store.go b/lib/placement/store.go new file mode 100644 index 000000000..7d27bdf0a --- /dev/null +++ b/lib/placement/store.go @@ -0,0 +1,66 @@ +package placement + +import ( + "sync" + + "github.com/nspcc-dev/neofs-node/lib/netmap" +) + +type ( + // NetMap is a type alias of + // NetMap from netmap package. + NetMap = netmap.NetMap + + netMapStore struct { + *sync.RWMutex + items map[uint64]*NetMap + + curEpoch uint64 + } +) + +func newNetMapStore() *netMapStore { + return &netMapStore{ + RWMutex: new(sync.RWMutex), + items: make(map[uint64]*NetMap), + } +} + +func (s *netMapStore) put(epoch uint64, nm *NetMap) { + s.Lock() + s.items[epoch] = nm + s.curEpoch = epoch + s.Unlock() +} + +func (s *netMapStore) get(epoch uint64) *NetMap { + s.RLock() + nm := s.items[epoch] + s.RUnlock() + + return nm +} + +// trim cleans all network states elder than epoch. +func (s *netMapStore) trim(epoch uint64) { + s.Lock() + m := make(map[uint64]struct{}, len(s.items)) + + for e := range s.items { + if e < epoch { + m[e] = struct{}{} + } + } + + for e := range m { + delete(s.items, e) + } + s.Unlock() +} + +func (s *netMapStore) epoch() uint64 { + s.RLock() + defer s.RUnlock() + + return s.curEpoch +} diff --git a/lib/rand/rand.go b/lib/rand/rand.go new file mode 100644 index 000000000..b42b58e42 --- /dev/null +++ b/lib/rand/rand.go @@ -0,0 +1,46 @@ +package rand + +import ( + crand "crypto/rand" + "encoding/binary" + mrand "math/rand" +) + +type cryptoSource struct{} + +// Read is alias for crypto/rand.Read. +var Read = crand.Read + +// New constructs the source of random numbers. +func New() *mrand.Rand { + return mrand.New(&cryptoSource{}) +} + +func (s *cryptoSource) Seed(int64) {} + +func (s *cryptoSource) Int63() int64 { + return int64(s.Uint63()) +} + +func (s *cryptoSource) Uint63() uint64 { + buf := make([]byte, 8) + if _, err := crand.Read(buf); err != nil { + return 0 + } + + return binary.BigEndian.Uint64(buf) +} + +// Uint64 returns a random uint64 value. +func Uint64(r *mrand.Rand, max int64) uint64 { + if max <= 0 { + return 0 + } + + var i int64 = -1 + for i < 0 { + i = r.Int63n(max) + } + + return uint64(i) +} diff --git a/lib/replication/common.go b/lib/replication/common.go new file mode 100644 index 000000000..7ca8c0a7a --- /dev/null +++ b/lib/replication/common.go @@ -0,0 +1,197 @@ +package replication + +import ( + "context" + + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +type ( + // CID is a type alias of + // CID from refs package of neofs-api-go. + CID = refs.CID + + // Object is a type alias of + // Object from object package of neofs-api-go. + Object = object.Object + + // OwnerID is a type alias of + // OwnerID from object package of neofs-api-go. + OwnerID = object.OwnerID + + // Address is a type alias of + // Address from refs package of neofs-api-go. + Address = refs.Address + + // ObjectVerificationParams groups the parameters of stored object verification. + ObjectVerificationParams struct { + Address + Node multiaddr.Multiaddr + Handler func(valid bool, obj *Object) + LocalInvalid bool + } + + // ObjectVerifier is an interface of stored object verifier. + ObjectVerifier interface { + Verify(ctx context.Context, params *ObjectVerificationParams) bool + } + + // ObjectSource is an interface of the object storage with read access. + ObjectSource interface { + Get(ctx context.Context, addr Address) (*Object, error) + } + + // ObjectStoreParams groups the parameters for object storing. + ObjectStoreParams struct { + *Object + Nodes []ObjectLocation + Handler func(ObjectLocation, bool) + } + + // ObjectReceptacle is an interface of object storage with write access. + ObjectReceptacle interface { + Put(ctx context.Context, params ObjectStoreParams) error + } + + // ObjectCleaner Entity for removing object by address from somewhere + ObjectCleaner interface { + Del(Address) error + } + + // ContainerActualityChecker is an interface of entity + // for checking local node presence in container + // Return true if no errors && local node is in container + ContainerActualityChecker interface { + Actual(ctx context.Context, cid CID) bool + } + + // ObjectPool is a queue of objects selected for data audit. + // It is updated once in epoch. + ObjectPool interface { + Update([]Address) + Pop() (Address, error) + Undone() int + } + + // Scheduler returns slice of addresses for data audit. + // These addresses put into ObjectPool. + Scheduler interface { + SelectForReplication(limit int) ([]Address, error) + } + + // ReservationRatioReceiver is an interface of entity + // for getting reservation ratio value of object by address. + ReservationRatioReceiver interface { + ReservationRatio(ctx context.Context, objAddr Address) (int, error) + } + + // RemoteStorageSelector is an interface of entity + // for getting remote nodes from placement for object by address + // Result doesn't contain nodes from exclude list + RemoteStorageSelector interface { + SelectRemoteStorages(ctx context.Context, addr Address, excl ...multiaddr.Multiaddr) ([]ObjectLocation, error) + } + + // MultiSolver is an interface that encapsulates other different utilities. + MultiSolver interface { + AddressStore + RemoteStorageSelector + ReservationRatioReceiver + ContainerActualityChecker + EpochReceiver + WeightComparator + } + + // ObjectLocator is an itnerface of entity + // for building list current object remote nodes by address + ObjectLocator interface { + LocateObject(ctx context.Context, objAddr Address) ([]multiaddr.Multiaddr, error) + } + + // WeightComparator is an itnerface of entity + // for comparing weight by address of local node with passed node + // returns -1 if local node is weightier or on error + // returns 0 if weights are equal + // returns 1 if passed node is weightier + WeightComparator interface { + CompareWeight(ctx context.Context, addr Address, node multiaddr.Multiaddr) int + } + + // EpochReceiver is an interface of entity for getting current epoch number. + EpochReceiver interface { + Epoch() uint64 + } + + // ObjectLocation groups the information about object current remote location. + ObjectLocation struct { + Node multiaddr.Multiaddr + WeightGreater bool // true if Node field value has less index in placement vector than localhost + } + + // ObjectLocationRecord groups the information about all current locations. + ObjectLocationRecord struct { + Address + ReservationRatio int + Locations []ObjectLocation + } + + // ReplicateTask groups the information about object replication task. + // Task solver should not process nodes from exclude list, + // Task solver should perform up to Shortage replications. + ReplicateTask struct { + Address + Shortage int + ExcludeNodes []multiaddr.Multiaddr + } + + // ReplicateResult groups the information about object replication task result. + ReplicateResult struct { + *ReplicateTask + NewStorages []multiaddr.Multiaddr + } + + // PresenceChecker is an interface of object storage with presence check access. + PresenceChecker interface { + Has(address Address) (bool, error) + } + + // AddressStore is an interface of local peer's network address storage. + AddressStore interface { + SelfAddr() (multiaddr.Multiaddr, error) + } +) + +const ( + writeResultTimeout = "write result timeout" + + taskChanClosed = " process finish finish: task channel closed" + ctxDoneMsg = " process finish: context done" + + objectPoolPart = "object pool" + loggerPart = "logger" + objectVerifierPart = "object verifier" + objectReceptaclePart = "object receptacle" + remoteStorageSelectorPart = "remote storage elector" + objectSourcePart = "object source" + reservationRatioReceiverPart = "reservation ratio receiver" + objectLocatorPart = "object locator" + epochReceiverPart = "epoch receiver" + presenceCheckerPart = "object presence checker" + weightComparatorPart = "weight comparator" + addrStorePart = "address store" +) + +func instanceError(entity, part string) error { + return errors.Errorf("could not instantiate %s: empty %s", entity, part) +} + +func addressFields(addr Address) []zap.Field { + return []zap.Field{ + zap.Stringer("oid", addr.ObjectID), + zap.Stringer("cid", addr.CID), + } +} diff --git a/lib/replication/garbage.go b/lib/replication/garbage.go new file mode 100644 index 000000000..e2f7d44b4 --- /dev/null +++ b/lib/replication/garbage.go @@ -0,0 +1,27 @@ +package replication + +import ( + "sync" +) + +type ( + garbageStore struct { + *sync.RWMutex + items []Address + } +) + +func (s *garbageStore) put(addr Address) { + s.Lock() + defer s.Unlock() + + for i := range s.items { + if s.items[i].Equal(&addr) { + return + } + } + + s.items = append(s.items, addr) +} + +func newGarbageStore() *garbageStore { return &garbageStore{RWMutex: new(sync.RWMutex)} } diff --git a/lib/replication/implementations.go b/lib/replication/implementations.go new file mode 100644 index 000000000..708a8226c --- /dev/null +++ b/lib/replication/implementations.go @@ -0,0 +1,292 @@ +package replication + +import ( + "context" + "sync" + + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/localstore" + "github.com/nspcc-dev/neofs-node/lib/netmap" + "github.com/nspcc-dev/neofs-node/lib/placement" + "github.com/nspcc-dev/neofs-node/lib/rand" + "github.com/pkg/errors" +) + +type ( + replicationScheduler struct { + cac ContainerActualityChecker + ls localstore.Iterator + } + + // SchedulerParams groups the parameters of scheduler constructor. + SchedulerParams struct { + ContainerActualityChecker + localstore.Iterator + } + + objectPool struct { + mu *sync.Mutex + tasks []Address + } + + multiSolver struct { + as AddressStore + pl placement.Component + } + + // MultiSolverParams groups the parameters of multi solver constructor. + MultiSolverParams struct { + AddressStore + Placement placement.Component + } +) + +const ( + errPoolExhausted = internal.Error("object pool is exhausted") + + objectPoolInstanceFailMsg = "could not create object pool" + errEmptyLister = internal.Error("empty local objects lister") + errEmptyContainerActual = internal.Error("empty container actuality checker") + + multiSolverInstanceFailMsg = "could not create multi solver" + errEmptyAddressStore = internal.Error("empty address store") + errEmptyPlacement = internal.Error("empty placement") + replicationSchedulerEntity = "replication scheduler" +) + +// NewObjectPool is an object pool constructor. +func NewObjectPool() ObjectPool { + return &objectPool{mu: new(sync.Mutex)} +} + +// NewReplicationScheduler is a replication scheduler constructor. +func NewReplicationScheduler(p SchedulerParams) (Scheduler, error) { + switch { + case p.ContainerActualityChecker == nil: + return nil, errors.Wrap(errEmptyContainerActual, objectPoolInstanceFailMsg) + case p.Iterator == nil: + return nil, errors.Wrap(errEmptyLister, objectPoolInstanceFailMsg) + } + + return &replicationScheduler{ + cac: p.ContainerActualityChecker, + ls: p.Iterator, + }, nil +} + +// NewMultiSolver is a multi solver constructor. +func NewMultiSolver(p MultiSolverParams) (MultiSolver, error) { + switch { + case p.Placement == nil: + return nil, errors.Wrap(errEmptyPlacement, multiSolverInstanceFailMsg) + case p.AddressStore == nil: + return nil, errors.Wrap(errEmptyAddressStore, multiSolverInstanceFailMsg) + } + + return &multiSolver{ + as: p.AddressStore, + pl: p.Placement, + }, nil +} + +func (s *objectPool) Update(pool []Address) { + s.mu.Lock() + defer s.mu.Unlock() + + s.tasks = pool +} + +func (s *objectPool) Undone() int { + s.mu.Lock() + defer s.mu.Unlock() + + return len(s.tasks) +} + +func (s *objectPool) Pop() (Address, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if len(s.tasks) == 0 { + return Address{}, errPoolExhausted + } + + head := s.tasks[0] + s.tasks = s.tasks[1:] + + return head, nil +} + +func (s *replicationScheduler) SelectForReplication(limit int) ([]Address, error) { + // Attention! This routine might be inefficient with big number of objects + // and containers. Consider using fast traversal and filtering algorithms + // with sieve of bloom filters. + migration := make([]Address, 0, limit) + replication := make([]Address, 0) + ctx := context.Background() + + if err := s.ls.Iterate(nil, func(meta *localstore.ObjectMeta) bool { + if s.cac.Actual(ctx, meta.Object.SystemHeader.CID) { + replication = append(replication, *meta.Object.Address()) + } else { + migration = append(migration, *meta.Object.Address()) + } + return len(migration) >= limit + }); err != nil { + return nil, err + } + + lnM := len(migration) + lnR := len(replication) + edge := 0 + + // I considered using rand.Perm() and appending elements in `for` cycle. + // But it seems, that shuffling is efficient even when `limit-lnM` + // is 1000 times smaller than `lnR`. But it can be discussed and changed + // later anyway. + if lnM < limit { + r := rand.New() + r.Shuffle(lnR, func(i, j int) { + replication[i], replication[j] = replication[j], replication[i] + }) + + edge = min(limit-lnM, lnR) + } + + return append(migration, replication[:edge]...), nil +} + +func (s *multiSolver) Epoch() uint64 { return s.pl.NetworkState().Epoch } + +func (s *multiSolver) SelfAddr() (multiaddr.Multiaddr, error) { return s.as.SelfAddr() } +func (s *multiSolver) ReservationRatio(ctx context.Context, addr Address) (int, error) { + graph, err := s.pl.Query(ctx, placement.ContainerID(addr.CID)) + if err != nil { + return 0, errors.Wrap(err, "reservation ratio computation failed on placement query") + } + + nodes, err := graph.Filter(func(group netmap.SFGroup, bucket *netmap.Bucket) *netmap.Bucket { + return bucket.GetSelection(group.Selectors, addr.ObjectID.Bytes()) + }).NodeList() + if err != nil { + return 0, errors.Wrap(err, "reservation ratio computation failed on graph node list") + } + + return len(nodes), nil +} + +func (s *multiSolver) SelectRemoteStorages(ctx context.Context, addr Address, excl ...multiaddr.Multiaddr) ([]ObjectLocation, error) { + selfAddr, err := s.as.SelfAddr() + if err != nil { + return nil, errors.Wrap(err, "select remote storage nodes failed on get self address") + } + + nodes, err := s.selectNodes(ctx, addr, excl...) + if err != nil { + return nil, errors.Wrap(err, "select remote storage nodes failed on get node list") + } + + var ( + metSelf bool + selfIndex = -1 + res = make([]ObjectLocation, 0, len(nodes)) + ) + + for i := range nodes { + if nodes[i].Equal(selfAddr) { + metSelf = true + selfIndex = i + } + + res = append(res, ObjectLocation{ + Node: nodes[i], + WeightGreater: !metSelf, + }) + } + + if selfIndex != -1 { + res = append(res[:selfIndex], res[selfIndex+1:]...) + } + + return res, nil +} + +func (s *multiSolver) selectNodes(ctx context.Context, addr Address, excl ...multiaddr.Multiaddr) ([]multiaddr.Multiaddr, error) { + graph, err := s.pl.Query(ctx, placement.ContainerID(addr.CID)) + if err != nil { + return nil, errors.Wrap(err, "select remote storage nodes failed on placement query") + } + + filter := func(group netmap.SFGroup, bucket *netmap.Bucket) *netmap.Bucket { return bucket } + if !addr.ObjectID.Empty() { + filter = func(group netmap.SFGroup, bucket *netmap.Bucket) *netmap.Bucket { + return bucket.GetSelection(group.Selectors, addr.ObjectID.Bytes()) + } + } + + return graph.Exclude(excl).Filter(filter).NodeList() +} + +func (s *multiSolver) Actual(ctx context.Context, cid CID) bool { + graph, err := s.pl.Query(ctx, placement.ContainerID(cid)) + if err != nil { + return false + } + + nodes, err := graph.NodeList() + if err != nil { + return false + } + + selfAddr, err := s.as.SelfAddr() + if err != nil { + return false + } + + for i := range nodes { + if nodes[i].Equal(selfAddr) { + return true + } + } + + return false +} + +func (s *multiSolver) CompareWeight(ctx context.Context, addr Address, node multiaddr.Multiaddr) int { + selfAddr, err := s.as.SelfAddr() + if err != nil { + return -1 + } + + if selfAddr.Equal(node) { + return 0 + } + + excl := make([]multiaddr.Multiaddr, 0) + + for { + nodes, err := s.selectNodes(ctx, addr, excl...) + if err != nil { + return -1 + } + + for j := range nodes { + if nodes[j].Equal(selfAddr) { + return -1 + } else if nodes[j].Equal(node) { + return 1 + } + } + + excl = append(excl, nodes[0]) // TODO: when it will become relevant to append full nodes slice + } +} + +func min(a, b int) int { + if a < b { + return a + } + + return b +} diff --git a/lib/replication/location_detector.go b/lib/replication/location_detector.go new file mode 100644 index 000000000..d010e48f5 --- /dev/null +++ b/lib/replication/location_detector.go @@ -0,0 +1,154 @@ +package replication + +import ( + "context" + "time" + + "go.uber.org/zap" +) + +type ( + // ObjectLocationDetector is an interface of entity + // that listens tasks to detect object current locations in network. + ObjectLocationDetector interface { + Process(ctx context.Context) chan<- Address + Subscribe(ch chan<- *ObjectLocationRecord) + } + + objectLocationDetector struct { + weightComparator WeightComparator + objectLocator ObjectLocator + reservationRatioReceiver ReservationRatioReceiver + presenceChecker PresenceChecker + log *zap.Logger + + taskChanCap int + resultTimeout time.Duration + resultChan chan<- *ObjectLocationRecord + } + + // LocationDetectorParams groups the parameters of location detector's constructor. + LocationDetectorParams struct { + WeightComparator + ObjectLocator + ReservationRatioReceiver + PresenceChecker + *zap.Logger + + TaskChanCap int + ResultTimeout time.Duration + } +) + +const ( + defaultLocationDetectorChanCap = 10 + defaultLocationDetectorResultTimeout = time.Second + locationDetectorEntity = "object location detector" +) + +func (s *objectLocationDetector) Subscribe(ch chan<- *ObjectLocationRecord) { s.resultChan = ch } + +func (s *objectLocationDetector) Process(ctx context.Context) chan<- Address { + ch := make(chan Address, s.taskChanCap) + go s.processRoutine(ctx, ch) + + return ch +} + +func (s *objectLocationDetector) writeResult(locationRecord *ObjectLocationRecord) { + if s.resultChan == nil { + return + } + select { + case s.resultChan <- locationRecord: + case <-time.After(s.resultTimeout): + s.log.Warn(writeResultTimeout) + } +} + +func (s *objectLocationDetector) processRoutine(ctx context.Context, taskChan <-chan Address) { +loop: + for { + select { + case <-ctx.Done(): + s.log.Warn(locationDetectorEntity+ctxDoneMsg, zap.Error(ctx.Err())) + break loop + case addr, ok := <-taskChan: + if !ok { + s.log.Warn(locationDetectorEntity + taskChanClosed) + break loop + } else if has, err := s.presenceChecker.Has(addr); err != nil || !has { + continue loop + } + s.handleTask(ctx, addr) + } + } + close(s.resultChan) +} + +func (s *objectLocationDetector) handleTask(ctx context.Context, addr Address) { + var ( + err error + log = s.log.With(addressFields(addr)...) + locationRecord = &ObjectLocationRecord{addr, 0, nil} + ) + + if locationRecord.ReservationRatio, err = s.reservationRatioReceiver.ReservationRatio(ctx, addr); err != nil { + log.Error("reservation ratio computation failure", zap.Error(err)) + return + } + + nodes, err := s.objectLocator.LocateObject(ctx, addr) + if err != nil { + log.Error("locate object failure", zap.Error(err)) + return + } + + for i := range nodes { + locationRecord.Locations = append(locationRecord.Locations, ObjectLocation{ + Node: nodes[i], + WeightGreater: s.weightComparator.CompareWeight(ctx, addr, nodes[i]) == 1, + }) + } + + log.Debug("current location record created", + zap.Int("reservation ratio", locationRecord.ReservationRatio), + zap.Any("storage nodes exclude self", locationRecord.Locations)) + + s.writeResult(locationRecord) +} + +// NewLocationDetector is an object location detector's constructor. +func NewLocationDetector(p *LocationDetectorParams) (ObjectLocationDetector, error) { + switch { + case p.PresenceChecker == nil: + return nil, instanceError(locationDetectorEntity, presenceCheckerPart) + case p.ObjectLocator == nil: + return nil, instanceError(locationDetectorEntity, objectLocatorPart) + case p.ReservationRatioReceiver == nil: + return nil, instanceError(locationDetectorEntity, reservationRatioReceiverPart) + case p.Logger == nil: + return nil, instanceError(locationDetectorEntity, loggerPart) + case p.WeightComparator == nil: + return nil, instanceError(locationDetectorEntity, weightComparatorPart) + } + + if p.TaskChanCap <= 0 { + p.TaskChanCap = defaultLocationDetectorChanCap + } + + if p.ResultTimeout <= 0 { + p.ResultTimeout = defaultLocationDetectorResultTimeout + } + + return &objectLocationDetector{ + weightComparator: p.WeightComparator, + objectLocator: p.ObjectLocator, + reservationRatioReceiver: p.ReservationRatioReceiver, + presenceChecker: p.PresenceChecker, + log: p.Logger, + taskChanCap: p.TaskChanCap, + resultTimeout: p.ResultTimeout, + resultChan: nil, + }, nil +} diff --git a/lib/replication/manager.go b/lib/replication/manager.go new file mode 100644 index 000000000..57d7d17ae --- /dev/null +++ b/lib/replication/manager.go @@ -0,0 +1,347 @@ +package replication + +import ( + "context" + "fmt" + "time" + + "go.uber.org/zap" +) + +type ( + // Manager is an interface of object manager, + Manager interface { + Process(ctx context.Context) + HandleEpoch(ctx context.Context, epoch uint64) + } + + manager struct { + objectPool ObjectPool + managerTimeout time.Duration + objectVerifier ObjectVerifier + log *zap.Logger + + locationDetector ObjectLocationDetector + storageValidator StorageValidator + replicator ObjectReplicator + restorer ObjectRestorer + placementHonorer PlacementHonorer + + // internal task channels + detectLocationTaskChan chan<- Address + restoreTaskChan chan<- Address + + pushTaskTimeout time.Duration + + // internal result channels + replicationResultChan <-chan *ReplicateResult + restoreResultChan <-chan Address + + garbageChanCap int + replicateResultChanCap int + restoreResultChanCap int + + garbageChan <-chan Address + garbageStore *garbageStore + + epochCh chan uint64 + scheduler Scheduler + + poolSize int + poolExpansionRate float64 + } + + // ManagerParams groups the parameters of object manager's constructor. + ManagerParams struct { + Interval time.Duration + PushTaskTimeout time.Duration + PlacementHonorerEnabled bool + ReplicateTaskChanCap int + RestoreTaskChanCap int + GarbageChanCap int + InitPoolSize int + ExpansionRate float64 + + ObjectPool + ObjectVerifier + + PlacementHonorer + ObjectLocationDetector + StorageValidator + ObjectReplicator + ObjectRestorer + + *zap.Logger + + Scheduler + } +) + +const ( + managerEntity = "replication manager" + + redundantCopiesBeagleName = "BEAGLE_REDUNDANT_COPIES" + + defaultInterval = 3 * time.Second + defaultPushTaskTimeout = time.Second + + defaultGarbageChanCap = 10 + defaultReplicateResultChanCap = 10 + defaultRestoreResultChanCap = 10 +) + +func (s *manager) Name() string { return redundantCopiesBeagleName } + +func (s *manager) HandleEpoch(ctx context.Context, epoch uint64) { + select { + case s.epochCh <- epoch: + case <-ctx.Done(): + return + case <-time.After(s.managerTimeout): + // this timeout must never happen + // if timeout happens in runtime, then something is definitely wrong! + s.log.Warn("replication scheduler is busy") + } +} + +func (s *manager) Process(ctx context.Context) { + // starting object restorer + // bind manager to push restore tasks to restorer + s.restoreTaskChan = s.restorer.Process(ctx) + + // bind manager to listen object restorer results + restoreResultChan := make(chan Address, s.restoreResultChanCap) + s.restoreResultChan = restoreResultChan + s.restorer.Subscribe(restoreResultChan) + + // starting location detector + // bind manager to push locate tasks to location detector + s.detectLocationTaskChan = s.locationDetector.Process(ctx) + + locationsHandlerStartFn := s.storageValidator.Process + if s.placementHonorer != nil { + locationsHandlerStartFn = s.placementHonorer.Process + + // starting storage validator + // bind placement honorer to push validate tasks to storage validator + s.placementHonorer.Subscribe(s.storageValidator.Process(ctx)) + } + + // starting location handler component + // bind location detector to push tasks to location handler component + s.locationDetector.Subscribe(locationsHandlerStartFn(ctx)) + + // bind manager to listen object replicator results + replicateResultChan := make(chan *ReplicateResult, s.replicateResultChanCap) + s.replicationResultChan = replicateResultChan + s.replicator.Subscribe(replicateResultChan) + + // starting replicator + // bind storage validator to push replicate tasks to replicator + s.storageValidator.SubscribeReplication(s.replicator.Process(ctx)) + garbageChan := make(chan Address, s.garbageChanCap) + s.garbageChan = garbageChan + s.storageValidator.SubscribeGarbage(garbageChan) + + go s.taskRoutine(ctx) + go s.resultRoutine(ctx) + s.processRoutine(ctx) +} + +func resultLog(s1, s2 string) string { + return fmt.Sprintf(managerEntity+" %s process finish: %s", s1, s2) +} + +func (s *manager) writeDetectLocationTask(addr Address) { + if s.detectLocationTaskChan == nil { + return + } + select { + case s.detectLocationTaskChan <- addr: + case <-time.After(s.pushTaskTimeout): + s.log.Warn(writeResultTimeout) + } +} + +func (s *manager) writeRestoreTask(addr Address) { + if s.restoreTaskChan == nil { + return + } + select { + case s.restoreTaskChan <- addr: + case <-time.After(s.pushTaskTimeout): + s.log.Warn(writeResultTimeout) + } +} + +func (s *manager) resultRoutine(ctx context.Context) { +loop: + for { + select { + case <-ctx.Done(): + s.log.Warn(resultLog("result", ctxDoneMsg), zap.Error(ctx.Err())) + break loop + case addr, ok := <-s.restoreResultChan: + if !ok { + s.log.Warn(resultLog("result", "restorer result channel closed")) + break loop + } + s.log.Info("object successfully restored", addressFields(addr)...) + case res, ok := <-s.replicationResultChan: + if !ok { + s.log.Warn(resultLog("result", "replicator result channel closed")) + break loop + } else if len(res.NewStorages) > 0 { + s.log.Info("object successfully replicated", + append(addressFields(res.Address), zap.Any("new storages", res.NewStorages))...) + } + case addr, ok := <-s.garbageChan: + if !ok { + s.log.Warn(resultLog("result", "garbage channel closed")) + break loop + } + s.garbageStore.put(addr) + } + } +} + +func (s *manager) taskRoutine(ctx context.Context) { +loop: + for { + if task, err := s.objectPool.Pop(); err == nil { + select { + case <-ctx.Done(): + s.log.Warn(resultLog("task", ctxDoneMsg), zap.Error(ctx.Err())) + break loop + default: + s.distributeTask(ctx, task) + } + } else { + // if object pool is empty, check it again after a while + time.Sleep(s.managerTimeout) + } + } + close(s.restoreTaskChan) + close(s.detectLocationTaskChan) +} + +func (s *manager) processRoutine(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case epoch := <-s.epochCh: + var delta int + + // undone - amount of objects we couldn't process in last epoch + undone := s.objectPool.Undone() + if undone > 0 { + // if there are unprocessed objects, then lower your estimation + delta = -undone + } else { + // otherwise try to expand + delta = int(float64(s.poolSize) * s.poolExpansionRate) + } + + tasks, err := s.scheduler.SelectForReplication(s.poolSize + delta) + if err != nil { + s.log.Warn("can't select objects for replication", zap.Error(err)) + } + + // if there are NOT enough objects to fill the pool, do not change it + // otherwise expand or shrink it with the delta value + if len(tasks) >= s.poolSize+delta { + s.poolSize += delta + } + + s.objectPool.Update(tasks) + + s.log.Info("replication schedule updated", + zap.Int("unprocessed_tasks", undone), + zap.Int("next_tasks", len(tasks)), + zap.Int("pool_size", s.poolSize), + zap.Uint64("new_epoch", epoch)) + } + } +} + +// Function takes object from storage by address (if verify +// If verify flag is set object stored incorrectly (Verify returned error) - restore task is planned +// otherwise validate task is planned. +func (s *manager) distributeTask(ctx context.Context, addr Address) { + if !s.objectVerifier.Verify(ctx, &ObjectVerificationParams{Address: addr}) { + s.writeRestoreTask(addr) + return + } + + s.writeDetectLocationTask(addr) +} + +// NewManager is an object manager's constructor. +func NewManager(p ManagerParams) (Manager, error) { + switch { + case p.ObjectPool == nil: + return nil, instanceError(managerEntity, objectPoolPart) + case p.ObjectVerifier == nil: + return nil, instanceError(managerEntity, objectVerifierPart) + case p.Logger == nil: + return nil, instanceError(managerEntity, loggerPart) + case p.ObjectLocationDetector == nil: + return nil, instanceError(managerEntity, locationDetectorEntity) + case p.StorageValidator == nil: + return nil, instanceError(managerEntity, storageValidatorEntity) + case p.ObjectReplicator == nil: + return nil, instanceError(managerEntity, objectReplicatorEntity) + case p.ObjectRestorer == nil: + return nil, instanceError(managerEntity, objectRestorerEntity) + case p.PlacementHonorer == nil && p.PlacementHonorerEnabled: + return nil, instanceError(managerEntity, placementHonorerEntity) + case p.Scheduler == nil: + return nil, instanceError(managerEntity, replicationSchedulerEntity) + } + + if p.Interval <= 0 { + p.Interval = defaultInterval + } + + if p.PushTaskTimeout <= 0 { + p.PushTaskTimeout = defaultPushTaskTimeout + } + + if p.GarbageChanCap <= 0 { + p.GarbageChanCap = defaultGarbageChanCap + } + + if p.ReplicateTaskChanCap <= 0 { + p.ReplicateTaskChanCap = defaultReplicateResultChanCap + } + + if p.RestoreTaskChanCap <= 0 { + p.RestoreTaskChanCap = defaultRestoreResultChanCap + } + + if !p.PlacementHonorerEnabled { + p.PlacementHonorer = nil + } + + return &manager{ + objectPool: p.ObjectPool, + managerTimeout: p.Interval, + objectVerifier: p.ObjectVerifier, + log: p.Logger, + locationDetector: p.ObjectLocationDetector, + storageValidator: p.StorageValidator, + replicator: p.ObjectReplicator, + restorer: p.ObjectRestorer, + placementHonorer: p.PlacementHonorer, + pushTaskTimeout: p.PushTaskTimeout, + garbageChanCap: p.GarbageChanCap, + replicateResultChanCap: p.ReplicateTaskChanCap, + restoreResultChanCap: p.RestoreTaskChanCap, + garbageStore: newGarbageStore(), + epochCh: make(chan uint64), + scheduler: p.Scheduler, + poolSize: p.InitPoolSize, + poolExpansionRate: p.ExpansionRate, + }, nil +} diff --git a/lib/replication/object_replicator.go b/lib/replication/object_replicator.go new file mode 100644 index 000000000..37167286a --- /dev/null +++ b/lib/replication/object_replicator.go @@ -0,0 +1,188 @@ +package replication + +import ( + "context" + "time" + + "github.com/multiformats/go-multiaddr" + "go.uber.org/zap" +) + +type ( + // ObjectReplicator is an interface of entity + // that listens object replication tasks. + // Result includes new object storage list. + ObjectReplicator interface { + Process(ctx context.Context) chan<- *ReplicateTask + Subscribe(ch chan<- *ReplicateResult) + } + + objectReplicator struct { + objectReceptacle ObjectReceptacle + remoteStorageSelector RemoteStorageSelector + objectSource ObjectSource + presenceChecker PresenceChecker + log *zap.Logger + + taskChanCap int + resultTimeout time.Duration + resultChan chan<- *ReplicateResult + } + + // ObjectReplicatorParams groups the parameters of replicator's constructor. + ObjectReplicatorParams struct { + RemoteStorageSelector + ObjectSource + ObjectReceptacle + PresenceChecker + *zap.Logger + + TaskChanCap int + ResultTimeout time.Duration + } +) + +const ( + defaultReplicatorChanCap = 10 + defaultReplicatorResultTimeout = time.Second + objectReplicatorEntity = "object replicator" +) + +func (s *objectReplicator) Subscribe(ch chan<- *ReplicateResult) { s.resultChan = ch } + +func (s *objectReplicator) Process(ctx context.Context) chan<- *ReplicateTask { + ch := make(chan *ReplicateTask, s.taskChanCap) + go s.processRoutine(ctx, ch) + + return ch +} + +func (s *objectReplicator) writeResult(replicateResult *ReplicateResult) { + if s.resultChan == nil { + return + } + select { + case s.resultChan <- replicateResult: + case <-time.After(s.resultTimeout): + s.log.Warn(writeResultTimeout) + } +} + +func (s *objectReplicator) processRoutine(ctx context.Context, taskChan <-chan *ReplicateTask) { +loop: + for { + select { + case <-ctx.Done(): + s.log.Warn(objectReplicatorEntity+" process finish: context completed", + zap.Error(ctx.Err())) + break loop + case replicateTask, ok := <-taskChan: + if !ok { + s.log.Warn(objectReplicatorEntity + " process finish: task channel closed") + break loop + } else if has, err := s.presenceChecker.Has(replicateTask.Address); err != nil || !has { + continue loop + } + s.handleTask(ctx, replicateTask) + } + } + close(s.resultChan) +} + +func (s *objectReplicator) handleTask(ctx context.Context, task *ReplicateTask) { + obj, err := s.objectSource.Get(ctx, task.Address) + if err != nil { + s.log.Warn("get object from storage failure", zap.Error(err)) + return + } + + res := &ReplicateResult{ + ReplicateTask: task, + NewStorages: make([]multiaddr.Multiaddr, 0, task.Shortage), + } + + for len(res.NewStorages) < task.Shortage { + nodesInfo, err := s.remoteStorageSelector.SelectRemoteStorages(ctx, task.Address, task.ExcludeNodes...) + if err != nil { + break + } + + for i := 0; i < len(nodesInfo); i++ { + if contains(res.NewStorages, nodesInfo[i].Node) { + nodesInfo = append(nodesInfo[:i], nodesInfo[i+1:]...) + i-- + + continue + } + } + + if len(nodesInfo) > task.Shortage { + nodesInfo = nodesInfo[:task.Shortage] + } + + if len(nodesInfo) == 0 { + break + } + + if err := s.objectReceptacle.Put(ctx, ObjectStoreParams{ + Object: obj, + Nodes: nodesInfo, + Handler: func(location ObjectLocation, success bool) { + if success { + res.NewStorages = append(res.NewStorages, location.Node) + } else { + task.ExcludeNodes = append(task.ExcludeNodes, location.Node) + } + }, + }); err != nil { + s.log.Warn("replicate object failure", zap.Error(err)) + break + } + } + + s.writeResult(res) +} + +func contains(list []multiaddr.Multiaddr, item multiaddr.Multiaddr) bool { + for i := range list { + if list[i].Equal(item) { + return true + } + } + + return false +} + +// NewReplicator is an object replicator's constructor. +func NewReplicator(p ObjectReplicatorParams) (ObjectReplicator, error) { + switch { + case p.ObjectReceptacle == nil: + return nil, instanceError(objectReplicatorEntity, objectReceptaclePart) + case p.ObjectSource == nil: + return nil, instanceError(objectReplicatorEntity, objectSourcePart) + case p.RemoteStorageSelector == nil: + return nil, instanceError(objectReplicatorEntity, remoteStorageSelectorPart) + case p.PresenceChecker == nil: + return nil, instanceError(objectReplicatorEntity, presenceCheckerPart) + case p.Logger == nil: + return nil, instanceError(objectReplicatorEntity, loggerPart) + } + + if p.TaskChanCap <= 0 { + p.TaskChanCap = defaultReplicatorChanCap + } + + if p.ResultTimeout <= 0 { + p.ResultTimeout = defaultReplicatorResultTimeout + } + + return &objectReplicator{ + objectReceptacle: p.ObjectReceptacle, + remoteStorageSelector: p.RemoteStorageSelector, + objectSource: p.ObjectSource, + presenceChecker: p.PresenceChecker, + log: p.Logger, + taskChanCap: p.TaskChanCap, + resultTimeout: p.ResultTimeout, + }, nil +} diff --git a/lib/replication/object_restorer.go b/lib/replication/object_restorer.go new file mode 100644 index 000000000..00e70d87b --- /dev/null +++ b/lib/replication/object_restorer.go @@ -0,0 +1,173 @@ +package replication + +import ( + "context" + "time" + + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-node/lib/localstore" + "go.uber.org/zap" +) + +type ( + // ObjectRestorer is an interface of entity + // that listen tasks to restore object by address. + // Restorer doesn't recheck if object is actually corrupted. + // Restorer writes result to subscriber only if restoration was successful. + ObjectRestorer interface { + Process(ctx context.Context) chan<- Address + Subscribe(ch chan<- Address) + } + + objectRestorer struct { + objectVerifier ObjectVerifier + remoteStorageSelector RemoteStorageSelector + objectReceptacle ObjectReceptacle + epochReceiver EpochReceiver + presenceChecker PresenceChecker + log *zap.Logger + + taskChanCap int + resultTimeout time.Duration + resultChan chan<- Address + } + + // ObjectRestorerParams groups the parameters of object restorer's constructor. + ObjectRestorerParams struct { + ObjectVerifier + ObjectReceptacle + EpochReceiver + RemoteStorageSelector + PresenceChecker + *zap.Logger + + TaskChanCap int + ResultTimeout time.Duration + } +) + +const ( + defaultRestorerChanCap = 10 + defaultRestorerResultTimeout = time.Second + objectRestorerEntity = "object restorer" +) + +func (s *objectRestorer) Subscribe(ch chan<- Address) { s.resultChan = ch } + +func (s *objectRestorer) Process(ctx context.Context) chan<- Address { + ch := make(chan Address, s.taskChanCap) + go s.processRoutine(ctx, ch) + + return ch +} + +func (s *objectRestorer) writeResult(refInfo Address) { + if s.resultChan == nil { + return + } + select { + case s.resultChan <- refInfo: + case <-time.After(s.resultTimeout): + s.log.Warn(writeResultTimeout) + } +} + +func (s *objectRestorer) processRoutine(ctx context.Context, taskChan <-chan Address) { +loop: + for { + select { + case <-ctx.Done(): + s.log.Warn(objectRestorerEntity+ctxDoneMsg, zap.Error(ctx.Err())) + break loop + case addr, ok := <-taskChan: + if !ok { + s.log.Warn(objectRestorerEntity + taskChanClosed) + break loop + } else if has, err := s.presenceChecker.Has(addr); err != nil || !has { + continue loop + } + s.handleTask(ctx, addr) + } + } + close(s.resultChan) +} + +func (s *objectRestorer) handleTask(ctx context.Context, addr Address) { + var ( + receivedObj *Object + exclNodes = make([]multiaddr.Multiaddr, 0) + ) + +loop: + for { + nodesInfo, err := s.remoteStorageSelector.SelectRemoteStorages(ctx, addr, exclNodes...) + if err != nil { + break + } + + for i := range nodesInfo { + info := nodesInfo[i] + if s.objectVerifier.Verify(ctx, &ObjectVerificationParams{ + Address: addr, + Node: nodesInfo[i].Node, + Handler: func(valid bool, obj *Object) { + if valid { + receivedObj = obj + } else { + exclNodes = append(exclNodes, info.Node) + } + }, + LocalInvalid: true, + }) { + break loop + } + } + } + + if err := s.objectReceptacle.Put( + context.WithValue(ctx, localstore.StoreEpochValue, s.epochReceiver.Epoch()), + ObjectStoreParams{Object: receivedObj}, + ); err != nil { + s.log.Warn("put object to local storage failure", append(addressFields(addr), zap.Error(err))...) + return + } + + s.writeResult(addr) +} + +// NewObjectRestorer is an object restorer's constructor. +func NewObjectRestorer(p *ObjectRestorerParams) (ObjectRestorer, error) { + switch { + case p.Logger == nil: + return nil, instanceError(objectRestorerEntity, loggerPart) + case p.ObjectVerifier == nil: + return nil, instanceError(objectRestorerEntity, objectVerifierPart) + case p.ObjectReceptacle == nil: + return nil, instanceError(objectRestorerEntity, objectReceptaclePart) + case p.RemoteStorageSelector == nil: + return nil, instanceError(objectRestorerEntity, remoteStorageSelectorPart) + case p.EpochReceiver == nil: + return nil, instanceError(objectRestorerEntity, epochReceiverPart) + case p.PresenceChecker == nil: + return nil, instanceError(objectRestorerEntity, presenceCheckerPart) + } + + if p.TaskChanCap <= 0 { + p.TaskChanCap = defaultRestorerChanCap + } + + if p.ResultTimeout <= 0 { + p.ResultTimeout = defaultRestorerResultTimeout + } + + return &objectRestorer{ + objectVerifier: p.ObjectVerifier, + remoteStorageSelector: p.RemoteStorageSelector, + objectReceptacle: p.ObjectReceptacle, + epochReceiver: p.EpochReceiver, + presenceChecker: p.PresenceChecker, + log: p.Logger, + taskChanCap: p.TaskChanCap, + resultTimeout: p.ResultTimeout, + }, nil +} diff --git a/lib/replication/placement_honorer.go b/lib/replication/placement_honorer.go new file mode 100644 index 000000000..9a5ac3ccd --- /dev/null +++ b/lib/replication/placement_honorer.go @@ -0,0 +1,198 @@ +package replication + +import ( + "context" + "time" + + "github.com/multiformats/go-multiaddr" + "go.uber.org/zap" +) + +type ( + // PlacementHonorer is an interface of entity + // that listens tasks to piece out placement rule of container for particular object. + PlacementHonorer interface { + Process(ctx context.Context) chan<- *ObjectLocationRecord + Subscribe(ch chan<- *ObjectLocationRecord) + } + + placementHonorer struct { + objectSource ObjectSource + objectReceptacle ObjectReceptacle + remoteStorageSelector RemoteStorageSelector + presenceChecker PresenceChecker + log *zap.Logger + + taskChanCap int + resultTimeout time.Duration + resultChan chan<- *ObjectLocationRecord + } + + // PlacementHonorerParams groups the parameters of placement honorer's constructor. + PlacementHonorerParams struct { + ObjectSource + ObjectReceptacle + RemoteStorageSelector + PresenceChecker + *zap.Logger + + TaskChanCap int + ResultTimeout time.Duration + } +) + +const ( + defaultPlacementHonorerChanCap = 10 + defaultPlacementHonorerResultTimeout = time.Second + placementHonorerEntity = "placement honorer" +) + +func (s *placementHonorer) Subscribe(ch chan<- *ObjectLocationRecord) { s.resultChan = ch } + +func (s *placementHonorer) Process(ctx context.Context) chan<- *ObjectLocationRecord { + ch := make(chan *ObjectLocationRecord, s.taskChanCap) + go s.processRoutine(ctx, ch) + + return ch +} + +func (s *placementHonorer) writeResult(locationRecord *ObjectLocationRecord) { + if s.resultChan == nil { + return + } + select { + case s.resultChan <- locationRecord: + case <-time.After(s.resultTimeout): + s.log.Warn(writeResultTimeout) + } +} + +func (s *placementHonorer) processRoutine(ctx context.Context, taskChan <-chan *ObjectLocationRecord) { +loop: + for { + select { + case <-ctx.Done(): + s.log.Warn(placementHonorerEntity+ctxDoneMsg, zap.Error(ctx.Err())) + break loop + case locationRecord, ok := <-taskChan: + if !ok { + s.log.Warn(placementHonorerEntity + taskChanClosed) + break loop + } else if has, err := s.presenceChecker.Has(locationRecord.Address); err != nil || !has { + continue loop + } + s.handleTask(ctx, locationRecord) + } + } + close(s.resultChan) +} + +func (s *placementHonorer) handleTask(ctx context.Context, locationRecord *ObjectLocationRecord) { + defer s.writeResult(locationRecord) + + var ( + err error + log = s.log.With(addressFields(locationRecord.Address)...) + copiesShortage = locationRecord.ReservationRatio - 1 + exclNodes = make([]multiaddr.Multiaddr, 0) + procLocations []ObjectLocation + ) + + obj, err := s.objectSource.Get(ctx, locationRecord.Address) + if err != nil { + log.Warn("get object failure", zap.Error(err)) + return + } + + tombstone := obj.IsTombstone() + + for copiesShortage > 0 { + nodesInfo, err := s.remoteStorageSelector.SelectRemoteStorages(ctx, locationRecord.Address, exclNodes...) + if err != nil { + log.Warn("select remote storage nodes failure", + zap.Stringer("object", locationRecord.Address), + zap.Any("exclude nodes", exclNodes), + zap.String("error", err.Error()), + ) + + return + } + + if !tombstone { + procLocations = make([]ObjectLocation, 0, len(nodesInfo)) + loop: + for i := range nodesInfo { + for j := range locationRecord.Locations { + if locationRecord.Locations[j].Node.Equal(nodesInfo[i].Node) { + copiesShortage-- + continue loop + } + } + procLocations = append(procLocations, nodesInfo[i]) + } + + if len(procLocations) == 0 { + return + } + } else { + procLocations = nodesInfo + } + + if err := s.objectReceptacle.Put(ctx, ObjectStoreParams{ + Object: obj, + Nodes: procLocations, + Handler: func(loc ObjectLocation, success bool) { + if success { + copiesShortage-- + if tombstone { + for i := range locationRecord.Locations { + if locationRecord.Locations[i].Node.Equal(loc.Node) { + return + } + } + } + locationRecord.Locations = append(locationRecord.Locations, loc) + } else { + exclNodes = append(exclNodes, loc.Node) + } + }, + }); err != nil { + s.log.Warn("put object to new nodes failure", zap.Error(err)) + return + } + } +} + +// NewPlacementHonorer is a placement honorer's constructor. +func NewPlacementHonorer(p PlacementHonorerParams) (PlacementHonorer, error) { + switch { + case p.RemoteStorageSelector == nil: + return nil, instanceError(placementHonorerEntity, remoteStorageSelectorPart) + case p.ObjectSource == nil: + return nil, instanceError(placementHonorerEntity, objectSourcePart) + case p.ObjectReceptacle == nil: + return nil, instanceError(placementHonorerEntity, objectReceptaclePart) + case p.Logger == nil: + return nil, instanceError(placementHonorerEntity, loggerPart) + case p.PresenceChecker == nil: + return nil, instanceError(placementHonorerEntity, presenceCheckerPart) + } + + if p.TaskChanCap <= 0 { + p.TaskChanCap = defaultPlacementHonorerChanCap + } + + if p.ResultTimeout <= 0 { + p.ResultTimeout = defaultPlacementHonorerResultTimeout + } + + return &placementHonorer{ + objectSource: p.ObjectSource, + objectReceptacle: p.ObjectReceptacle, + remoteStorageSelector: p.RemoteStorageSelector, + presenceChecker: p.PresenceChecker, + log: p.Logger, + taskChanCap: p.TaskChanCap, + resultTimeout: p.ResultTimeout, + }, nil +} diff --git a/lib/replication/storage_validator.go b/lib/replication/storage_validator.go new file mode 100644 index 000000000..4dd058c88 --- /dev/null +++ b/lib/replication/storage_validator.go @@ -0,0 +1,194 @@ +package replication + +import ( + "context" + "time" + + "github.com/multiformats/go-multiaddr" + "go.uber.org/zap" +) + +type ( + // StorageValidator is an interface of entity + // that listens and performs task of storage validation on remote nodes. + // Validation can result to the need to replicate or clean object. + StorageValidator interface { + Process(ctx context.Context) chan<- *ObjectLocationRecord + SubscribeReplication(ch chan<- *ReplicateTask) + SubscribeGarbage(ch chan<- Address) + } + + storageValidator struct { + objectVerifier ObjectVerifier + log *zap.Logger + presenceChecker PresenceChecker + addrstore AddressStore + + taskChanCap int + resultTimeout time.Duration + replicateResultChan chan<- *ReplicateTask + garbageChan chan<- Address + } + + // StorageValidatorParams groups the parameters of storage validator's constructor. + StorageValidatorParams struct { + ObjectVerifier + PresenceChecker + *zap.Logger + + TaskChanCap int + ResultTimeout time.Duration + AddrStore AddressStore + } +) + +const ( + defaultStorageValidatorChanCap = 10 + defaultStorageValidatorResultTimeout = time.Second + + storageValidatorEntity = "storage validator" +) + +func (s *storageValidator) SubscribeReplication(ch chan<- *ReplicateTask) { + s.replicateResultChan = ch +} + +func (s *storageValidator) SubscribeGarbage(ch chan<- Address) { s.garbageChan = ch } + +func (s *storageValidator) Process(ctx context.Context) chan<- *ObjectLocationRecord { + ch := make(chan *ObjectLocationRecord, s.taskChanCap) + go s.processRoutine(ctx, ch) + + return ch +} + +func (s *storageValidator) writeReplicateResult(replicateTask *ReplicateTask) { + if s.replicateResultChan == nil { + return + } + select { + case s.replicateResultChan <- replicateTask: + case <-time.After(s.resultTimeout): + s.log.Warn(writeResultTimeout) + } +} + +func (s *storageValidator) writeGarbage(addr Address) { + if s.garbageChan == nil { + return + } + select { + case s.garbageChan <- addr: + case <-time.After(s.resultTimeout): + s.log.Warn(writeResultTimeout) + } +} + +func (s *storageValidator) processRoutine(ctx context.Context, taskChan <-chan *ObjectLocationRecord) { +loop: + for { + select { + case <-ctx.Done(): + s.log.Warn(storageValidatorEntity+ctxDoneMsg, zap.Error(ctx.Err())) + break loop + case locationRecord, ok := <-taskChan: + if !ok { + s.log.Warn(storageValidatorEntity + taskChanClosed) + break loop + } else if has, err := s.presenceChecker.Has(locationRecord.Address); err != nil || !has { + continue loop + } + s.handleTask(ctx, locationRecord) + } + } + close(s.replicateResultChan) + close(s.garbageChan) +} + +func (s *storageValidator) handleTask(ctx context.Context, locationRecord *ObjectLocationRecord) { + selfAddr, err := s.addrstore.SelfAddr() + if err != nil { + s.log.Error("storage validator can't obtain self address") + return + } + + var ( + weightierCounter int + replicateTask = &ReplicateTask{ + Address: locationRecord.Address, + Shortage: locationRecord.ReservationRatio - 1, // taking account of object correctly stored in local store + ExcludeNodes: nodesFromLocations(locationRecord.Locations, selfAddr), + } + ) + + for i := range locationRecord.Locations { + loc := locationRecord.Locations[i] + + if s.objectVerifier.Verify(ctx, &ObjectVerificationParams{ + Address: locationRecord.Address, + Node: locationRecord.Locations[i].Node, + Handler: func(valid bool, _ *Object) { + if valid { + replicateTask.Shortage-- + if loc.WeightGreater { + weightierCounter++ + } + } + }, + }); weightierCounter >= locationRecord.ReservationRatio { + s.writeGarbage(locationRecord.Address) + return + } + } + + if replicateTask.Shortage > 0 { + s.writeReplicateResult(replicateTask) + } +} + +// nodesFromLocations must ignore self address, because it is used in +// storage validator during replication. We must ignore our own stored +// objects during replication and work with remote hosts and check their +// verification info. +func nodesFromLocations(locations []ObjectLocation, selfaddr multiaddr.Multiaddr) []multiaddr.Multiaddr { + res := make([]multiaddr.Multiaddr, 0, len(locations)) + + for i := range locations { + if !locations[i].Node.Equal(selfaddr) { + res = append(res, locations[i].Node) + } + } + + return res +} + +// NewStorageValidator is a storage validator's constructor. +func NewStorageValidator(p StorageValidatorParams) (StorageValidator, error) { + switch { + case p.Logger == nil: + return nil, instanceError(storageValidatorEntity, loggerPart) + case p.ObjectVerifier == nil: + return nil, instanceError(storageValidatorEntity, objectVerifierPart) + case p.PresenceChecker == nil: + return nil, instanceError(storageValidatorEntity, presenceCheckerPart) + case p.AddrStore == nil: + return nil, instanceError(storageValidatorEntity, addrStorePart) + } + + if p.TaskChanCap <= 0 { + p.TaskChanCap = defaultStorageValidatorChanCap + } + + if p.ResultTimeout <= 0 { + p.ResultTimeout = defaultStorageValidatorResultTimeout + } + + return &storageValidator{ + objectVerifier: p.ObjectVerifier, + log: p.Logger, + presenceChecker: p.PresenceChecker, + taskChanCap: p.TaskChanCap, + resultTimeout: p.ResultTimeout, + addrstore: p.AddrStore, + }, nil +} diff --git a/lib/storage/storage.go b/lib/storage/storage.go new file mode 100644 index 000000000..11775c3d2 --- /dev/null +++ b/lib/storage/storage.go @@ -0,0 +1,122 @@ +package storage + +import ( + "io" + + "github.com/nspcc-dev/neofs-node/lib/buckets" + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/pkg/errors" + "github.com/spf13/viper" + "go.uber.org/zap" +) + +type ( + store struct { + blob core.Bucket + + meta core.Bucket + + spaceMetrics core.Bucket + } + + sizer interface { + Size() int64 + } + + // Params for create Core.Storage component + Params struct { + Buckets []core.BucketType + Viper *viper.Viper + Logger *zap.Logger + } +) + +// New creates Core.Storage component. +func New(p Params) (core.Storage, error) { + var ( + err error + bs = make(map[core.BucketType]core.Bucket) + ) + + for _, name := range p.Buckets { + if bs[name], err = buckets.NewBucket(name, p.Logger, p.Viper); err != nil { + return nil, err + } + } + + return &store{ + blob: bs[core.BlobStore], + + meta: bs[core.MetaStore], + + spaceMetrics: bs[core.SpaceMetricsStore], + }, nil +} + +// GetBucket returns available bucket by type or an error. +func (s *store) GetBucket(name core.BucketType) (core.Bucket, error) { + switch name { + case core.BlobStore: + if s.blob == nil { + return nil, errors.Errorf("bucket(`%s`) not initialized", core.BlobStore) + } + + return s.blob, nil + case core.MetaStore: + if s.meta == nil { + return nil, errors.Errorf("bucket(`%s`) not initialized", core.MetaStore) + } + + return s.meta, nil + case core.SpaceMetricsStore: + if s.spaceMetrics == nil { + return nil, errors.Errorf("bucket(`%s`) not initialized", core.SpaceMetricsStore) + } + + return s.spaceMetrics, nil + default: + return nil, errors.Errorf("bucket for type `%s` not implemented", name) + } +} + +// Size of all buckets. +func (s *store) Size() int64 { + var ( + all int64 + sizers = []sizer{ + s.blob, + s.meta, + s.spaceMetrics, + } + ) + + for _, item := range sizers { + if item == nil { + continue + } + + all += item.Size() + } + + return all +} + +// Close all buckets. +func (s *store) Close() error { + var closers = []io.Closer{ + s.blob, + s.meta, + } + + for _, item := range closers { + if item == nil { + continue + } + + if err := item.Close(); err != nil { + return err + } + } + + return nil +} diff --git a/lib/test/bucket.go b/lib/test/bucket.go new file mode 100644 index 000000000..024a2ab46 --- /dev/null +++ b/lib/test/bucket.go @@ -0,0 +1,144 @@ +package test + +import ( + "sync" + + "github.com/mr-tron/base58" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/core" +) + +type ( + testBucket struct { + sync.RWMutex + items map[string][]byte + } +) + +const ( + errOverflow = internal.Error("overflow") + errNotFound = internal.Error("not found") +) + +// Bucket constructs test core.Bucket implementation. +func Bucket() core.Bucket { + return &testBucket{ + items: make(map[string][]byte), + } +} + +func (t *testBucket) Get(key []byte) ([]byte, error) { + t.Lock() + defer t.Unlock() + + val, ok := t.items[base58.Encode(key)] + if !ok { + return nil, core.ErrNotFound + } + + return val, nil +} + +func (t *testBucket) Set(key, value []byte) error { + t.Lock() + defer t.Unlock() + + t.items[base58.Encode(key)] = value + + return nil +} + +func (t *testBucket) Del(key []byte) error { + t.RLock() + defer t.RUnlock() + + delete(t.items, base58.Encode(key)) + + return nil +} + +func (t *testBucket) Has(key []byte) bool { + t.RLock() + defer t.RUnlock() + + _, ok := t.items[base58.Encode(key)] + + return ok +} + +func (t *testBucket) Size() (res int64) { + t.RLock() + defer t.RUnlock() + + for _, v := range t.items { + res += int64(len(v)) + } + + return +} + +func (t *testBucket) List() ([][]byte, error) { + t.Lock() + defer t.Unlock() + + res := make([][]byte, 0) + + for k := range t.items { + sk, err := base58.Decode(k) + if err != nil { + return nil, err + } + + res = append(res, sk) + } + + return res, nil +} + +func (t *testBucket) Iterate(f core.FilterHandler) error { + t.RLock() + defer t.RUnlock() + + for k, v := range t.items { + key, err := base58.Decode(k) + if err != nil { + continue + } + + if !f(key, v) { + return core.ErrIteratingAborted + } + } + + return nil +} + +func (t *testBucket) Close() error { + t.Lock() + defer t.Unlock() + + for k := range t.items { + delete(t.items, k) + } + + return nil +} + +func (t *testBucket) PRead(key []byte, rng object.Range) ([]byte, error) { + t.RLock() + defer t.RUnlock() + + k := base58.Encode(key) + + v, ok := t.items[k] + if !ok { + return nil, errNotFound + } + + if rng.Offset+rng.Length > uint64(len(v)) { + return nil, errOverflow + } + + return v[rng.Offset : rng.Offset+rng.Length], nil +} diff --git a/lib/test/keys.go b/lib/test/keys.go new file mode 100644 index 000000000..3b87bfb3f --- /dev/null +++ b/lib/test/keys.go @@ -0,0 +1,142 @@ +package test + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/hex" + "strconv" +) + +// Keys is a list of test private keys in hex format. +var Keys = []string{ + "307702010104203ee1fd84dd7199925f8d32f897aaa7f2d6484aa3738e5e0abd03f8240d7c6d8ca00a06082a8648ce3d030107a1440342000475099c302b77664a2508bec1cae47903857b762c62713f190e8d99912ef76737f36191e4c0ea50e47b0e0edbae24fd6529df84f9bd63f87219df3a086efe9195", + "3077020101042035f2b425109b17b1d8f3b5c50daea1091e27d2452bce1126080bd4b98de9bb67a00a06082a8648ce3d030107a144034200045188d33a3113ac77fea0c17137e434d704283c234400b9b70bcdf4829094374abb5818767e460a94f36046ffcef44576fa59ef0e5f31fb86351c06c3d84e156c", + "30770201010420f20cd67ed4ea58307945f5e89a5e016b463fbcad610ee9a7b5e0094a780c63afa00a06082a8648ce3d030107a14403420004c4c574d1bbe7efb2feaeed99e6c03924d6d3c9ad76530437d75c07bff3ddcc0f3f7ef209b4c5156b7395dfa4479dd6aca00d8b0419c2d0ff34de73fad4515694", + "30770201010420335cd4300acc9594cc9a0b8c5b3b3148b29061d019daac1b97d0fbc884f0281ea00a06082a8648ce3d030107a14403420004563eece0b9035e679d28e2d548072773c43ce44a53cb7f30d3597052210dbb70674d8eefac71ca17b3dc6499c9167e833b2c079b2abfe87a5564c2014c6132ca", + "30770201010420063a502c7127688e152ce705f626ca75bf0b62c5106018460f1b2a0d86567546a00a06082a8648ce3d030107a14403420004f8152966ad33b3c2622bdd032f5989fbd63a9a3af34e12eefee912c37defc8801ef16cc2c16120b3359b7426a7609af8f4185a05dcd42e115ae0df0758bc4b4c", + "30770201010420714c3ae55534a1d065ea1213f40a3b276ec50c75eb37ee5934780e1a48027fa2a00a06082a8648ce3d030107a1440342000452d9fd2376f6b3bcb4706cad54ec031d95a1a70414129286c247cd2bc521f73fa8874a6a6466b9d111631645d891e3692688d19c052c244e592a742173ea8984", + "30770201010420324b97d5f2c68e402b6723c600c3a7350559cc90018f9bfce0deed3d57890916a00a06082a8648ce3d030107a1440342000451ec65b2496b1d8ece3efe68a8b57ce7bc75b4171f07fa5b26c63a27fb4f92169c1b15150a8bace13f322b554127eca12155130c0b729872935fd714df05df5e", + "3077020101042086ebcc716545e69a52a7f9a41404583e17984a20d96fafe9a98de0ac420a2f88a00a06082a8648ce3d030107a144034200045f7d63e18e6b896730f45989b7a8d00c0b86c75c2b834d903bc681833592bdcc25cf189e6ddef7b22217fd442b9825f17a985e7e2020b20188486dd53be9073e", + "3077020101042021a5b7932133e23d4ebb7a39713defd99fc94edfc909cf24722754c9077f0d61a00a06082a8648ce3d030107a14403420004d351a4c87ec3b33e62610cb3fd197962c0081bbe1b1b888bc41844f4c6df9cd3fd4637a6f35aa3d4531fecc156b1707504f37f9ef154beebc622afc29ab3f896", + "3077020101042081ef410f78e459fa110908048fc8923fe1e84d7ce75f78f32b8c114c572bfb87a00a06082a8648ce3d030107a144034200046e3859e6ab43c0f45b7891761f0da86a7b62f931f3d963efd3103924920a73b32ce5bc8f14d8fb31e63ccd336b0016eeb951323c915339ca6c4c1ebc01bbeb2b", + "307702010104209dd827fa67faf3912e981b8dbccafb6ded908957ba67cf4c5f37c07d33abb6c5a00a06082a8648ce3d030107a14403420004e5cb5ae6a1bd3861a6b233c9e13fa0183319f601d0f4e99b27461e28f473e822de395d15c1e14d29a6bd4b597547e8c5d09a7dd3a722a739bb76936c1ad43c0e", + "3077020101042005a03e332e1aff5273c52c38ec6c5a1593170ddf8d13989a8a160d894566fc6ba00a06082a8648ce3d030107a144034200045a11611542f07f2d5666de502994ef61f069674513811df42290254c26f71134100fed43ea8ecd9833be9abb42d95be8661f790c15b41ca20db5b4df4f664fb4", + "307702010104206e833f66daf44696cafc63297ff88e16ba13feefa5b6ab3b92a771ff593e96d0a00a06082a8648ce3d030107a14403420004434e0e3ec85c1edaf614f91b7e3203ab4d8e7e1c8a2042223f882fc04da7b1f77f8f2ee3b290ecfa6470a1c416a22b368d05578beb25ec31bcf60aff2e3ffcd4", + "30770201010420937c4796b9fc62fde4521c18289f0e610cf9b5ebf976be8d292bc8306cee2011a00a06082a8648ce3d030107a14403420004ba5951adddf8eb9bc5dac2c03a33584d321f902353c0aadccd3158256b294f5aa9cd5215201d74de2906630d8cefb4f298ff89caa29b5c90f9d15294f8d785bc", + "307702010104204b002204533f9b2fb035087df7f4288e496fc84e09299765de7a6cd61e6a32bca00a06082a8648ce3d030107a1440342000441abcf37a4d0962156c549de8497120b87e5e370a967188ab1d2d7abce53711dfd692a37f30018e2d14030185b16a8e0b9ca61dca82bfe6d8fc55c836355b770", + "3077020101042093ffa35f1977b170a0343986537de367f59ea5a8bd4a8fdd01c5d9700a7282dba00a06082a8648ce3d030107a144034200040e01090b297cf536740b5c0abb15afba03139b0d4b647fdd0c01d457936499c19283cf7b1aee2899923e879c97ddeffe4a1fa2bffc59d331b55982972524b45b", + "307702010104201c1a2209a2b6f445fb63b9c6469d3edc01c99bab10957f0cbe5fad2b1c548975a00a06082a8648ce3d030107a144034200040c8fd2da7bad95b6b3782c0a742476ffcb35e5bc539ea19bbccb5ed05265da3ab51ec39afd01fbee800e05ec0eb94b68854cd9c3de6ab028d011c53085ffc1b3", + "30770201010420b524d8cba99619f1f9559e2fe38b2c6d84a484d38574a92e56977f79eac8b537a00a06082a8648ce3d030107a14403420004a6d7d0db0cc0a46860fb912a7ace42c801d8d693e2678f07c3f5b9ea3cb0311169cbd96b0b9fc78f81e73d2d432b2c224d8d84380125ecc126481ee322335740", + "307702010104207681725fec424a0c75985acfb7be7baed18b43ec7a18c0b47aa757849444557ca00a06082a8648ce3d030107a14403420004bd4453efc74d7dedf442b6fc249848c461a0c636bb6a85c86a194add1f8a5fac9bf0c04ece3f233c5aba2dee0d8a2a11b6a297edae60c0bc0536454ce0b5f9dd", + "30770201010420ae43929b14666baa934684c20a03358cda860b89208824fac56b48f80920edc4a00a06082a8648ce3d030107a14403420004d706b0d86743d6052375aa5aa1a3613c87dccfe704dc85b4ed4f49a84a248a94582202927ec0c082234919f3ce6617152ba0d02497b81c61284261ce86cef905", + "3077020101042089d600f43c47ab98e00225e9b2d4a6c7ab771490f856d4679d9e1e0cca3009d0a00a06082a8648ce3d030107a144034200048515055045543e429173fc8f9f56a070bd4314b2b3005437d8504e6b6885f85101409b933e27c0de11415aee516d0d1b474088a437ece496ceb4f1c131e9ea40", + "3077020101042015518dcf888c7b241dac1c8bfa19d99f7fdba7ba37ed57d69bbbd95bb376ea4ca00a06082a8648ce3d030107a1440342000459e88d92efaa5277d60948feaa0bcd14388da00e35f9bae8282985441788f8beb2b84b71b1ae8aa24d64bb83759b80e3f05c07a791ffe10079c0e1694d74618c", + "307702010104203e840868a96e59ca10f048202cce02e51655a932ff0ac98a7b5589a8df17f580a00a06082a8648ce3d030107a14403420004f296414e914dcefd29bc8a493f8aedc683e5514a8ec5160637bee40ebaa85a421a363c8f7ce3ed113e97d2c4b6d9cd31d21698a54fce8d8e280a6be9ee4fbca9", + "30770201010420aa746067891cf005286d56d53092f77961f828bf5bf11aade18c8a458090d39aa00a06082a8648ce3d030107a144034200044af5ad2dacbb32ab795ab734d26bae6c098bd2ba9ca607542174d61b49ca3c07786aeb0c96908793a63d4f20cd370a77b7ec65e6b285c6337764e7ae3cd5fa1c", + "307702010104207135cbd831d52e778622c21ed035df9e3c6e4128de38fbf4d165a0583b5b4a29a00a06082a8648ce3d030107a1440342000412e2b9e11f288d8db60fbb00456f5969e2816a214a295d8e4d38fbacab6b0a7e0cdb8557e53d408244083f192d8a604d5b764ab44b467e34664ca82e012b60ab", + "3077020101042064b839ca26c42e2e97e94da5589db2de18597a12d6167fdfe0d20e932de747a2a00a06082a8648ce3d030107a1440342000481e90c2173b720447ae28361149598a7245ed51c3881a89353da25b8e574b8c9b2d80b2563efe5d9a0184b57af2431116c8a4ad8071ef2764ca3d3744c638401", + "30770201010420a56df8e6349520d27c36eb1e9675720c702d562842c859cd54b3d866f2cada30a00a06082a8648ce3d030107a14403420004dc08beb5b857f6da13ae1116e40a6e4e4b5aaebc8040eae0b3037c243b1c24def39de670380472df7aa98cb9e0f1132bc4afc0629d80a24c54b8ad600cb24cd4", + "30770201010420bd2dd18485a9667673b2c38c2ad51cc756a199d18fe1100acf29b647a549171ea00a06082a8648ce3d030107a1440342000422825ffe8b3416b6755a7076a7dc6f746ff29ee0a4455dceb0f3262127d51c9bb53f2c204636da8d7a09961274d7c7ba2ef3c771e83fb996ffe3f9882c530ffd", + "307702010104203058a0c8de5c6d4a5c7f64883e7d3c9f5097c8bc073cc482421e903b37123c06a00a06082a8648ce3d030107a14403420004f959705673c2f4112673e43d1d876ca71c64153abb6c9f58d1c3b3c1f8c213ee346833fb695eb533664d596a68e42150a21b405e3a08ed70af5f568275a7a79f", + "307702010104202bd9035bf38e7c4580abc377a6e9c31aa9bdaff90af2ce688eda9a532c83875ea00a06082a8648ce3d030107a14403420004918010ea3387786c6a257996ec74d7ee4e1703b3b811118f4e89fabfef7c694495191848a0d590313a0be9784644ef98e0f0f7e50fed5bee3fa48d66edbcd2b5", + "30770201010420aa055d6cbe96e1cfbe39530bc4b7a976baff53ce399956f0d8241750d3379990a00a06082a8648ce3d030107a1440342000444e8b6deda76c12320a8c5b7a48141ebf5dc9288df79a0f418ab92d82061d10118b8bce9fb200e5009a19fb0e19036762b3ef85440405f43225d6ee3350bf96c", + "30770201010420b8712525a79c7bd3df2a9dbabde1a111078a7ef30687a2efe0f0c4b4a23f2aa0a00a06082a8648ce3d030107a144034200049dc9e3d836a834f6d14ae99dfc70ad9b65c84f351c8dbc4f9b1b61c238051fb1db23e43d4b6e17803e21ebc44fe2f66742e306daa8c4ca7d79c6dd01fc1a4e4e", + "3077020101042086c18b56c4a2264b37c18a7937f026ab07ca6076eeea1ab90376492efb7875d9a00a06082a8648ce3d030107a144034200042f169311f2fae406de3c4a64fec94a22c35972281922a69e7657185997ae59fb3f69ac94295e58681cfbd263f8e6fbce144cc7925b71d90f57de3f3e10588321", + "30770201010420f58221355e1b2da73d66de482ec1edcb8597f3967d00d1356f4678fea6ad67e6a00a06082a8648ce3d030107a14403420004238cc44f02fa566e249a9697a078b9d38eba06012d54a29a430843a18df7a0a4207d704a360399db95eca591f2f81b6c50390467f293a1623b4757bdb4138101", + "30770201010420b10888a0157d524667fd575683bdcded4628a65149fde59b7340781b0cf2e36ea00a06082a8648ce3d030107a14403420004222ba11430b8719929c726aec74e8e70893e2960bc2bbee70fbaa6d88fa2a346adf0c450ea9823f0ba77d334fcd476ea036a62199338d7aa32e56c708d7a8caa", + "30770201010420edf001bd24c92e4f65789aae228223e77df71ce9bbfd7ce4d236ea3648e1f7fea00a06082a8648ce3d030107a1440342000472693c95786ab9f4e7c923338ce98bd068e28b71f84b77e7adb378c2ce2d8f1a2e13833df1afe4569367d7a4eee3abf50124299a28045a0073ea324f5ddb45ea", + "30770201010420e2649e591fc9072dd55573e41fc4ebfdf1db118951e4b7b2a98027ac9a4f7702a00a06082a8648ce3d030107a144034200046e34c9dea1836671f1ef259d7c3ee678c2f92d092af2518413fe9ba153a07ca8e9938784876e90cfa2989a00a83b1ac599c87a8d15be8001e46dfbfe018156a2", + "3077020101042069cd9b710f25613794751aed951004c888d4611aefa45abc23abff218e608290a00a06082a8648ce3d030107a14403420004dcf8ff34ab841720ff8dc08b60a14f41689e65f979a1af69b5e106f4262a2cb0947c9619e980caf20b3e7c8f15e60fc31c5b611c8a58370ba8201c9b6b932bd4", + "307702010104202898cef1944aaf90fddf433390323a02a79938568cf99f6c25bc9aa9e5cddb0aa00a06082a8648ce3d030107a1440342000491a1c20420f5005f5761419e4dcd0d9da0cf2ea4733f6d98a3d0c124f284cabdc65eafd9d2cad9b1122fca791c8b37997feed130c5725ea797cf07c61fb82734", + "30770201010420e568bd3ffa639aa418e7d5bc9e83f3f56690ebf645015ff7f0e216d76045efd5a00a06082a8648ce3d030107a144034200042424b498297124037db950bf2a1e652ba7f977363f4f69d7308531d27bf392219d93cb78f4379b7ffb16f3e7be311e208af2409bd33000fd25a8707ac6bec76b", + "307702010104205163d5d5eea4db97fccc692871f257842fdaca0eca967d29924242f7a2c56ad7a00a06082a8648ce3d030107a144034200044e2ca8312122039c3374db08851710d3b9a2efcbd8f5df004ec7b60a348aee32466f799b5957d39845f451071bb1f3bb99f25bf43196e7c772f7b84f39221b3a", + "30770201010420301eb936d2737886ab2fbf670952f9ba0d324827b81801810bfd60c89e8ca862a00a06082a8648ce3d030107a14403420004455454b1f3828a2328a8925c4c98bd6e37dece276efb3299d8b7d78c9d7e6f978b14d021c07bae0c18a623fc52ab2fec1523a89b2fd0cda373e9c9442a3545f2", + "3077020101042032c12a9bca8070c131b0a46944c17adf35eb44079f3c887fc3b93740bb9c03fca00a06082a8648ce3d030107a14403420004e61da413c4d5dbc6c004089d96a3cb55f4b20b70c544f3823a7a6322c53e134fcb8a885729ef284d68d23e0a58009d48b369f9c4f5a665a8880a48606491dd8a", + "30770201010420aa2b40742722b81c6ffd5c47b94b8be747da259e172a82d27ebc525c8f46d17aa00a06082a8648ce3d030107a14403420004f87a863ed11592cf4f96e837038b105d155f5e09a31386ab4604234e8a975d49a9612b4597b7fb206087b70a26bce4aca31edb253530e6da83ce16beefa99f60", + "307702010104202a70a0c827b4ce8d433e800ab0818b1401b220fadea75feff655251ee4317556a00a06082a8648ce3d030107a14403420004a5c9209fd53dc1ce2c873782ec507db5e0f9cc78292a84ecafc5bab16c2e4d786a882ad77ad999f3d6ba676ad80354ad376dabc4fa03a6c15ead3aa16f213bc5", + "307702010104202787d04901f48c81774171ef2e2a4d440b81f7fa1f12ab93d8e79ffab3416a1ca00a06082a8648ce3d030107a14403420004010d32df4d50343609932a923f11422e3bea5fa1319fb8ce0cc800f66aa38b3f7fda1bc17c824278734baa3d9b7f52262eeacbca21304b74ba4795b5055b1e9f", + "3077020101042032423728a897144d4fb95090ca0ac67a23eb22e2f7f925cbddaf542eeaec8faaa00a06082a8648ce3d030107a14403420004c37f9fec5b1be5b0286300ace6a5d25df8189d29604145a77b6578a4e3956ed3d9af48f8ee1e39868bba9e359e5444984f0428755e29d2012f235c9a56749148", + "30770201010420d5bd2a3867937e0b903d19113e859ca9f6497f4af082894a6911cef3a3a12d35a00a06082a8648ce3d030107a14403420004435b2e891c46023f422119f18a04c75b9322ea4aaddd10a0568438310896388bf7037e98bd5979a6f0839acb07dead1f2f973640dcc11dcee1de8a07c0b3dd80", + "30770201010420590edcf1f2b6ee6c1b836ace33b934597883a00ce84fe812a4b3e22432846972a00a06082a8648ce3d030107a14403420004183d7cad633cb0f4ab774f4dc19b9db87e7ef97b0f4d43ac395d2409dabbe5339dbad661c7c2fd05606e2edb08f8ace660f73bf5232011262d563603f61d2353", + "30770201010420a0ea4e16cf8c7c641d70aea82192fb9303aab6e7b5cd72586ba287d50f4612d6a00a06082a8648ce3d030107a1440342000482a72d31e71f0aea778cb42b324abf853cb4e4e8d4b2ae0e5130480073e911f183134c047a7e1cd41a845a38057ea51a1527923518cbf47c3e195a9f44e1d242", + "307702010104209e04b00c8d0f96ddb2fbb48cfc199905bfbfcc894acb77b56bf16a945a7c7d08a00a06082a8648ce3d030107a1440342000405efd203dcddfb66d514be0de2b35050b83e3738096cd35398165bfdbe34d34c0d96a4e6df503903c75c2c06b66b02b15cd7bf74c147d7a9f0a5e53b83c5762d", + "30770201010420aa69f1cc2cb3482a12af4b1614d6dde01216f1cad1c9f03c681daa8648b75b37a00a06082a8648ce3d030107a1440342000474ffec1297420d0cf730b42942058699d803ab618e1e40ccf9cc17f71f62b3123d863fbf8fae37b6c958892af6151159f74e2a568917bfc2f4e00c55c32b52e7", + "3077020101042090a04300e8d6ed9f44422a2cf93817604bf1f6233c4333ba0db20ab726852fa4a00a06082a8648ce3d030107a144034200049e6f2001baf2b6fb25e3273907ed7320f494de6b5882c4c4b9bcee7ddc60274e064cc68c64325c001f07a505722062d1ca9774a2cc1e0cd28fe5f807865bfcc1", + "3077020101042088945c19c6ce3e63f8d8a421616391d83bec79a0c590f1607b247ffa0c677dd3a00a06082a8648ce3d030107a1440342000492d17d410f9eabf7ae4509a92494e9fe94a72947f24e60c5bb6e12b2cde3c1bfe5305a0d759138069d44268f174136971ecb752df602c282e48d40f43a8734e3", + "3077020101042079d14eacdc4f21dc5284bd8487dcb2c22e9e53e71909474f922bf695f49cf23ea00a06082a8648ce3d030107a1440342000428039292c5bcf3593639bf5835ec9411ffd3ac236c0186697623930b5ca63f32ff41df5217e7def770d9a0de87f61526497bd9aaa95d924e0a17d85958e7c095", + "30770201010420a6ac867ff8d00aaad23198415868a64e59217b4d22474752a146fcb52204dfa5a00a06082a8648ce3d030107a14403420004a5f37a779265c55cd4f5a7f3bffc4679395898046eb9d67d8670be39001de5a7bc010b0d218561626272989c5952e8e0d95d2590f78eec44dc62a46184956301", + "30770201010420df446014577f6081113cd7d33c6ba91b9ac3d083e76f8873358f83129e2d0111a00a06082a8648ce3d030107a14403420004da0c932759f50ad705507f876138c2c6e012764abc8764a6dd609e6ad06099952b120be71690bc091591f1aa8d7d6e9365deddbc958bc87ff150358ad33f7537", + "30770201010420b3351033eaaee3a9ea27cd7dc54aa2c8d787b14b7d428165f1a04a59c6d5b0f2a00a06082a8648ce3d030107a14403420004da3984fb8152403a9fb9068b16f9afb5c900f24230e205567b4405ee3cad2db3ff46968489d494b38d0c85fcc4aecccb61fc00dca54c8fd99ee5bf5e2616f1b7", + "30770201010420deedbcef7f6821f6aab2b15ce198f5eb2064f6eb461a6b7776b4da35c81b1506a00a06082a8648ce3d030107a1440342000405422b86ce66b18e68f0fb14f28e4ed9b1f7ee84f57957f4e4b4c6b0c392e6357e4698fb707f590be1b915622ec8da476071a56919211f6e5e888284d4e33f06", + "3077020101042078c3db0d3b1114cb99f1d0bea0d3aec9067b26964e2b85fe9df4789b24cb3da5a00a06082a8648ce3d030107a144034200046874e52d7d58b6697b407b0c0eea3cfeb528e34fca1589c5031e11aae1ad1f9280e7a4c37ddf28479cd07b4246ce9398e0e24f99946f87e08532fa26b8fb8016", + "30770201010420f0ba42553b146cf088d3a5a3645782fe675d23561897ced7f1270a8d05cfdaaaa00a06082a8648ce3d030107a14403420004c250e12f3aa1fb6261c57cdb091cd90d82917e103711425888477b9da4359d2803aaf0015638294c7c0baa4ec77ba8fceff5ee7f15ea087a4174f58d518006dd", + "307702010104207f2c0fc4b0e418b2d4c72a63fdc27f158f6ad44c26d161f489714525b6a13db1a00a06082a8648ce3d030107a144034200041d83885672021e783d8bd995d187f407bbda2c6bed5e8fabc7c6c5cb304a85eaffa12dad7ba874ac45f4258fffe07534843ff7fe76075470f2c77104d781688f", + "30770201010420d3de828ac9742704d4e6981ce1fc8c473e508eda3a121cda420dacbdf39d48e9a00a06082a8648ce3d030107a14403420004c78abfc4a5c0eb3ee0c9817d1790b7ca9fd528d0bc727f9daf63f4212097538b6888b9de2ae4dff29895500be456fe0ccbee340aecb546d1558b08c3718aaa4a", + "30770201010420d9c4e477b56f2ff0b211acd82b450336276534b350747315152a4923e6e65294a00a06082a8648ce3d030107a14403420004fbd540966b03fe2c2314f20248d345e3e9b92d6a7cfea22d1b5367f01b32d616f317e00cea1f659437b4302610abba8abb0f2bfce0a91b952e9565159c1e464e", + "30770201010420fb84f4a426fa12920c2cf7c2d821280530c0fa93960ded8c20120511dc1d5069a00a06082a8648ce3d030107a14403420004c0177f13c6e00bb9029df089006a332192bdf12a782c60a8d00d110c53db67c344584f22677695a7f1629db1600b0559ced49ac931b08cc6a58e5ea436bde2f8", + "30770201010420653ce060214028f7aa584910f0925d702bde18d52d8e530f07dd5004076eb614a00a06082a8648ce3d030107a1440342000433668d0c9085feae4b285fe260a316e24f24c0bb8e442583e23284bf5a962cd0357cd63ac4d1cdda58afb201bceee911ebe7cf134652dc4390f4e328f6cb5d65", + "307702010104206123b7d5b8c53b2a2a95dd2e42fe550617b7520fe9bd94a99045addb828ad847a00a06082a8648ce3d030107a1440342000487c10fdeaabf8072dcea0dc5b18be4d72f2b8298bc891ea0a11d202438b7598ac588f16a9cd697f8220434d4e15ff4c82daaae63955525633335843069434aea", + "3077020101042000b793c9b8553ee7bec21cd966f5aaff59a07d1fa3fa86e0164bcd2f7f4dd586a00a06082a8648ce3d030107a1440342000419d4179dbeae7fa87e356f0406c327239d34e540cd7db5174a81bd6197738bc72e46fe4bd1512dc4b35950b2c1e78e6f8f54980193be78d45e4d97a837455777", + "307702010104200fb1a771004f6be6300eccd603b9c9e269fbdd69e5eb183d7acad51b0b205b88a00a06082a8648ce3d030107a14403420004d3b7fa62bacff49714ef28a955cdc30f4aef323293ac3aebab824892dfa3306f2ec319f5bca1771b956b4a9b1c2f565dc08b29c07ec84623932a5d6fb59be6c7", + "30770201010420fe6907b91407619fdc95153cd59df061e88095678801008d3901f29c7c434243a00a06082a8648ce3d030107a14403420004796fcea7889128f8060b04e9000381fd3d80fe68f000063b182fe9d8984e740c387c4ed4c6729e8c715c576fe355a9b7dda6890c55b15ae6013fd51e8858b2f2", + "30770201010420111eaff6db3b279d014b45b3da091909f054f37c350c237fe9d51b4342811299a00a06082a8648ce3d030107a144034200047d51f9178725c4134579ac6d0cb84745e0d2068ccf72d30c02dd431547f868d1cb93b5774c7e1eb9582e2151521ff16cdf80b3ba4646d64f7982066f9eb679f0", + "30770201010420631d01e6aaa68e6c36e3425b984df02bc5b54e81951479f7cea8fd1b804bab57a00a06082a8648ce3d030107a14403420004fa1b1ed9ff904f1f050577e05b5175e897d462598fdd323c8ef25f6072dfa43034baa0119e64092fb44f7a04d59d16ba8645f52cfb7775a6536c00f7fc2ee2f1", + "307702010104201ec553d14d45acdf147dba5fcbc3a42a1f763411d5c206d03600ed810b0cf106a00a06082a8648ce3d030107a14403420004e9a309a24d1061204087de10e5bc64b6d45369399a5a402d630ca2d04b34ae9d27d491e5fadd5d082e14454e6b2a572a24904ba2a8dc7430b20d361134188589", + "307702010104206d31e401bb20968106a058f8df70cd5fb8e9aaca0b01a176649712aa594ff600a00a06082a8648ce3d030107a144034200048555a2f9e7256c57b406c729d2d8da12c009f219e81cecb522cb3c494dcc1c76ac6d2f641dafe816065482fb88916e1a719672c82406556e16c32cf90752a92f", + "307702010104208ada3d6ea6000cecbfcc3eafc5d1b0674fabece2b4ed8e9192200021b8861da0a00a06082a8648ce3d030107a14403420004a99e7ed75a2e28e30d8bad1a779f2a48bded02db32b22715c804d8eeadfbf453d063f099874cb170a10d613f6b6b3be0dbdb44c79fc34f81f68aeff570193e78", + "30770201010420d066dfb8f6ba957e19656d5b2362df0fb27075836ec7141ce344f76aa364c3cea00a06082a8648ce3d030107a14403420004597fd2183c21f6d04fa686e813cf7f838594e2e9c95b86ce34b8871674d78cc685b0918fd623e3019d8c7b67104395b1f94fc3338d0772e306572236bab59c39", + "307702010104202c291b04d43060f4c2fd896b7a9b6b4f847fb590f6774b78a0dff2513b32f55ca00a06082a8648ce3d030107a14403420004e80bd7e6445ee6947616e235f59bbecbaa0a49737be3b969363ee8d3cfccbbc42a0a1282de0f27c135c34afad7e5c563c674e3d18f8abcad4a73c8c79dad3efa", + "3077020101042029af306b5c8e677768355076ba86113411023024189e687d8b9c4dee12f156fda00a06082a8648ce3d030107a144034200049d7d21e6e1e586b5868853a3751618de597241215fb2328331d2f273299a11295fe6ccd5d990bf33cf0cdcda9944bf34094d5ffa4e5512ee4a55c9f5a8c25294", + "3077020101042022e65c9fc484173b9c931261d54d2cf34b70deccb19ce0a84ce3b08bc2e0648ba00a06082a8648ce3d030107a14403420004ea9ee4ab7475ebaff6ea2a290fc77aafa4b893447d1a033f40400b4d62ee923a31d06fe5f28dbc2ebec467ebd2e002a9ea72057f0b0c60fe564584a6539376ad", + "307702010104205000583dc21cb6fd26df1c7d6e4efb9b47ceff73c0d94ed453bae0c13a9e5795a00a06082a8648ce3d030107a144034200045a6a5b5886b01f54dfa0788f15d3542aec160843a57e723008d1b984dd572ecb8935662daaba53d756d45442efbae067f52b0b151899a645afb663205babddd3", + "30770201010420997431e73eae00f476bb1a221b4cc9dfd18d787be207b7069141627f61ba752da00a06082a8648ce3d030107a144034200047c89dc8c46a27e20c37b0ecf1150e8b92c2dd4dc534a25545f87a5f0c44fdbf4dee2af5bcdc4012f0acee168aeb55bb4d24738fac105fc056928ff5870491047", + "307702010104207dc10db95a597a80e916d7f8e4e419b609d767538fe9732bcc5f9d783c605a2ba00a06082a8648ce3d030107a144034200042e2ae4fae087a11fcdf9565670164c229337ed87b5056687c6bceeb84108db9a88b9e5d96a0cf121255ceefce0bb5239608768bb841e6687dbd9626222eb5187", + "307702010104209056e22b347f5f1839f1a53f1250d098616ff04db0b49b1fddb18b987930cec7a00a06082a8648ce3d030107a1440342000427cc4c7fb5d7ac047161aee78e812ad264ba25dd878684637308674ea693817b20a5e3672de6a92dfbf82f641268052fa742e6f35ff91c617334f09f89bd1218", + "30770201010420554ea6cfeb2cc4f1e29c08e65317d72731ee03940af9ff6a141b761d5d054db6a00a06082a8648ce3d030107a14403420004a6121746c0553ede0944da8a7f304831fcefb51b40acf78016d41cc45cc5f7e9a1b22bbea028daab5cb4c39cadf84da442749cbfc04536d6f85c3254ec7a0805", + "30770201010420f53ff1c7db3c4e7c734bf7396a1a5364ac2dfe4b794b118aada6bab72cde8969a00a06082a8648ce3d030107a1440342000414b11ec158e3f9d558bd1da1ed0e38c92b1ad55834f3ce08e456747279dd9ed1143cff4f5e8d70189f4b114e3cd609105d6eb8f431f392487e4c9e16a152dba1", + "30770201010420b3f394090547f5dcb2e77cef65e03a3b7d1c953cd0e069553da2795ab0adc950a00a06082a8648ce3d030107a14403420004a1a9dbe5d6dfa2dfb039aebabe96b12faf97c994e1430323d074ecbd90ef075e0fe9dc7d5eef2483d485ffb0b4a01b01e131754fb38059a1365d342d5175397a", + "30770201010420bf13c42fa84c409161f9d73ce20fd85b20c5381914aa2a2375452b34cd352022a00a06082a8648ce3d030107a14403420004e0134214a5349a235cee406ad942ca105ef871a7e4c922ef4769466d8495c78b82f6c49270c8cd913e0cf407cdab679dd9914090ea91122ca9fb654ebcfce57d", + "30770201010420440d975b65bf585d0813137fe041461de59221856eaf255479b5e69721cfb30da00a06082a8648ce3d030107a14403420004935a9626ddb7bd6fbcd2ad9d9333851bbc64b9997cb8e43b1a17f8e9968ed6b0e5d2edf105fbabc9bd745fa2120ac527bbfefb6e8ed96844f80b8e27b6d9a549", + "307702010104209ea2dc59260408165d6c42205aa52e275f81c39d9bf5b1b9c8187ade875e8068a00a06082a8648ce3d030107a14403420004bc570aa24df0306cb761ee9fb22e61f59ae4f11e8804491d8651084f191c800d1e6b16e4bc3693b88f9bef82849f3cd6914a15cae60322c1f4822a2bdf426782", + "30770201010420505b596fb71a2e36c0ba07da03442a721f3f1832dcac19631d6c11b36ab81986a00a06082a8648ce3d030107a1440342000472cfb26cf07faa4e6e9d328214677b5eb51cd2e35717ac661d732115e592a07482bf966a31792cc993bdf816a732069ed423871b53fb3c7eabab2f4d3d272013", + "3077020101042089a9d5b397c521db4bb4a5f3e8f2043e43bb5617a2070e7bfa30dd2dbf1815a1a00a06082a8648ce3d030107a1440342000468d2aeaf641b839095644cfd4b72ab97d0bf3fae1ed36e9f81d9aff333b0123f7b846f6ca61dbbd4e10988e740463addef793994a1498987883ecf237f18bc40", + "307702010104200919a89aedb4e20cfcd2cb568c8de18b1b60b5da17aaea3be9804eb5bc3280f5a00a06082a8648ce3d030107a14403420004139812ec6bd62fd3ce71040d87cc07671948ff82300fae5f3af80dcd4e22c870c0102c4add460b2cbbeeb298f58037fc645da20aa8f5531a5ff56d3e5b2d1944", + "30770201010420b145fc69cfabff378f390f0a99fb98ddc8ba9228cb1adf9c7099c6393a24567aa00a06082a8648ce3d030107a14403420004b660084cb05e005fb163011663fee6946f354714565069968f16e89e9a7aac45610f05502ff9d9e3cd0fdc88083bd8840a518b71135e59a0f0f235636d5eb7c4", + "3077020101042082d39168f289e784ace49bfdd523297b524c494f83fe7d04dd2f055b48d636b9a00a06082a8648ce3d030107a14403420004ea4021da5eec4e7f333059625ecbad3969676cf625cbf0da316f55f50ccd40e6174fdb7023c07abdb3ca91203acbcb5e78e1601f1a9aa616c5019ac5b2222ff4", + "3077020101042066a1ebc23e993674bfdc3b9721c280b7f3c1599903063ea7899b848b942a6169a00a06082a8648ce3d030107a144034200046bdb182c6c0c1f9ea898c3847bc4b46014cb8da6a02d75b7bed3c4a9a4e9c8836d4ce22fe68b68ae56a91fb435c7ea8f05bca8e8fcb1d6b77770d419f99e51da", + "30770201010420fa2cda21b761c46fcc5b54d47b045e24affdb95425e859bb367a07950119ab6ba00a06082a8648ce3d030107a144034200044b9e4cee102ad23fea3357f8f5f95ab9d60d34086ba4b39d5f37cbc61998ac9658ec56033ad72977d41e449d449f5aac2bc653ea8038fc04a011ff02ec49e088", + "3077020101042028acfb3c41b7be1d9d0506ac3702c363ffd767dd738dc8ab581ad7add2ec8872a00a06082a8648ce3d030107a144034200047467dedfb8c9a7d9496d4898d6ace0fba063545ab0d345d8b63b90871927ed269645a745a7335ca511d86a366f24e7832477842b4041a9ab564c5fbce49e4df8", + "307702010104202e57b8b867bd95a8dfcdd2cb8f82ea41bff21610019afd6e2367e755dec5b944a00a06082a8648ce3d030107a144034200048f97eb2d6ee2d3da8746d8d4f84469ea765fb0d1412b167b6d8a916b5f968b4d64ede5ea6d6e08ec0de192262fcb3ebed49e9d17858261affed84827b38c6cc9", + "3077020101042021a904281e4c31386ce34a5b52af3a068caa65819fbcf0ca76ab6041ecdaf454a00a06082a8648ce3d030107a1440342000405f9b7894a97fcddfc3285b8e974718606616fe07c70b7ab2bfb28a85fb3014c2610ab9e8e6da8ae3da032837d3a14b1e791d2633bdd8551b4817a080b9aa697", + "3077020101042089c2c73d08bd03da4c3111aa0b78bb1edc5243d8e119513035d3741e851dec1ca00a06082a8648ce3d030107a14403420004ec9ebc34f45150334fd1d8c92274fe43c5b3b059f15cb1963f6cf7d54bc6b1b0b4ef1c5d56d2d06ab54ce2e7606e0fa5d2f188a2d593b22d9cf6a0098aa00cb6", +} + +// DecodeKey creates a test private key. +func DecodeKey(i int) *ecdsa.PrivateKey { + if i < 0 { + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + panic("could not generate uniq key") + } + + return key + } + + if current, size := i, len(Keys); current >= size { + panic("add more test keys, used " + strconv.Itoa(current) + " from " + strconv.Itoa(size)) + } + + buf, err := hex.DecodeString(Keys[i]) + if err != nil { + panic("could not hex.Decode: " + err.Error()) + } + + key, err := x509.ParseECPrivateKey(buf) + if err != nil { + panic("could x509.ParseECPrivateKey: " + err.Error()) + } + + return key +} diff --git a/lib/test/logger.go b/lib/test/logger.go new file mode 100644 index 000000000..1ba431371 --- /dev/null +++ b/lib/test/logger.go @@ -0,0 +1,30 @@ +package test + +import ( + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +const sampling = 1000 + +// NewTestLogger creates test logger. +func NewTestLogger(debug bool) *zap.Logger { + if debug { + cfg := zap.NewDevelopmentConfig() + cfg.Sampling = &zap.SamplingConfig{ + Initial: sampling, + Thereafter: sampling, + } + + cfg.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder + + log, err := cfg.Build() + if err != nil { + panic("could not prepare logger: " + err.Error()) + } + + return log + } + + return zap.L() +} diff --git a/lib/transformer/alias.go b/lib/transformer/alias.go new file mode 100644 index 000000000..a18098bf5 --- /dev/null +++ b/lib/transformer/alias.go @@ -0,0 +1,25 @@ +package transformer + +import ( + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-api-go/storagegroup" +) + +type ( + // Object is a type alias of + // Object from object package of neofs-api-go. + Object = object.Object + + // ObjectID is a type alias of + // ObjectID from refs package of neofs-api-go. + ObjectID = refs.ObjectID + + // CID is a type alias of + // CID from refs package of neofs-api-go. + CID = refs.CID + + // StorageGroup is a type alias of + // StorageGroup from storagegroup package of neofs-api-go. + StorageGroup = storagegroup.StorageGroup +) diff --git a/lib/transformer/put_test.go b/lib/transformer/put_test.go new file mode 100644 index 000000000..ddd7affd3 --- /dev/null +++ b/lib/transformer/put_test.go @@ -0,0 +1,764 @@ +package transformer + +import ( + "bytes" + "context" + "crypto/sha256" + "io" + "math/rand" + "sort" + "testing" + + "github.com/nspcc-dev/neofs-api-go/hash" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-api-go/session" + "github.com/nspcc-dev/neofs-api-go/storagegroup" + crypto "github.com/nspcc-dev/neofs-crypto" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/nspcc-dev/neofs-node/lib/objutil" + "github.com/nspcc-dev/neofs-node/lib/test" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" +) + +type ( + // Entity for mocking interfaces. + // Implementation of any interface intercepts arguments via f (if not nil). + // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. + testPutEntity struct { + // Set of interfaces which entity must implement, but some methods from those does not call. + + // Argument interceptor. Used for ascertain of correct parameter passage between components. + f func(...interface{}) + // Mocked result of any interface. + res interface{} + // Mocked error of any interface. + err error + } +) + +var ( + _ io.Writer = (*testPutEntity)(nil) + _ EpochReceiver = (*testPutEntity)(nil) + _ Transformer = (*testPutEntity)(nil) + _ storagegroup.InfoReceiver = (*testPutEntity)(nil) + _ objutil.Verifier = (*testPutEntity)(nil) +) + +func (s *testPutEntity) Verify(_ context.Context, obj *Object) error { + if s.f != nil { + s.f(obj) + } + return s.err +} + +func (s *testPutEntity) Write(p []byte) (int, error) { + if s.f != nil { + s.f(p) + } + return 0, s.err +} + +func (s *testPutEntity) Transform(_ context.Context, u ProcUnit, h ...ProcUnitHandler) error { + if s.f != nil { + s.f(u, h) + } + return s.err +} + +func (s *testPutEntity) GetSGInfo(_ context.Context, cid CID, group []ObjectID) (*StorageGroup, error) { + if s.f != nil { + s.f(cid, group) + } + if s.err != nil { + return nil, s.err + } + return s.res.(*StorageGroup), nil +} + +func (s *testPutEntity) Epoch() uint64 { return s.res.(uint64) } + +func TestNewTransformer(t *testing.T) { + validParams := Params{ + SGInfoReceiver: new(testPutEntity), + EpochReceiver: new(testPutEntity), + SizeLimit: 1, + Verifier: new(testPutEntity), + } + + t.Run("valid params", func(t *testing.T) { + res, err := NewTransformer(validParams) + require.NoError(t, err) + require.NotNil(t, res) + }) + t.Run("non-positive size", func(t *testing.T) { + p := validParams + p.SizeLimit = 0 + _, err := NewTransformer(p) + require.EqualError(t, err, errors.Wrap(errInvalidSizeLimit, transformerInstanceFailMsg).Error()) + }) + t.Run("empty SG info receiver", func(t *testing.T) { + p := validParams + p.SGInfoReceiver = nil + _, err := NewTransformer(p) + require.EqualError(t, err, errors.Wrap(errEmptySGInfoRecv, transformerInstanceFailMsg).Error()) + }) + t.Run("empty epoch receiver", func(t *testing.T) { + p := validParams + p.EpochReceiver = nil + _, err := NewTransformer(p) + require.EqualError(t, err, errors.Wrap(errEmptyEpochReceiver, transformerInstanceFailMsg).Error()) + }) + t.Run("empty object verifier", func(t *testing.T) { + p := validParams + p.Verifier = nil + _, err := NewTransformer(p) + require.EqualError(t, err, errors.Wrap(errEmptyVerifier, transformerInstanceFailMsg).Error()) + }) +} + +func Test_transformer(t *testing.T) { + ctx := context.TODO() + + u := ProcUnit{ + Head: &Object{ + Payload: testData(t, 10), + }, + Payload: new(emptyReader), + } + + handlers := []ProcUnitHandler{func(context.Context, ProcUnit) error { return nil }} + + t.Run("preliminary transformation failure", func(t *testing.T) { + // create custom error for test + pErr := internal.Error("test error for prelim transformer") + + s := &transformer{ + tPrelim: &testPutEntity{ + f: func(items ...interface{}) { + t.Run("correct prelim transformer params", func(t *testing.T) { + require.Equal(t, u, items[0]) + require.Empty(t, items[1]) + }) + }, + err: pErr, // force Transformer to return pErr + }, + } + + // ascertain that error returns as expected + require.EqualError(t, s.Transform(ctx, u, handlers...), pErr.Error()) + }) + + t.Run("size limiter error/correct sign processing", func(t *testing.T) { + // create custom error for test + sErr := internal.Error("test error for signer") + lErr := internal.Error("test error for size limiter") + + s := &transformer{ + tPrelim: new(testPutEntity), + tSizeLim: &testPutEntity{ + f: func(items ...interface{}) { + t.Run("correct size limiter params", func(t *testing.T) { + require.Equal(t, u, items[0]) + hs := items[1].([]ProcUnitHandler) + require.Len(t, hs, 1) + require.EqualError(t, hs[0](ctx, u), sErr.Error()) + }) + }, + err: lErr, // force Transformer to return lErr + }, + tSign: &testPutEntity{ + f: func(items ...interface{}) { + t.Run("correct signer params", func(t *testing.T) { + require.Equal(t, u, items[0]) + require.Equal(t, handlers, items[1]) + }) + }, + err: sErr, // force Transformer to return sErr + }, + } + + // ascertain that error returns as expected + require.EqualError(t, s.Transform(ctx, u, handlers...), lErr.Error()) + }) +} + +func Test_preliminaryTransformer(t *testing.T) { + ctx := context.TODO() + + u := ProcUnit{ + Head: &Object{ + Payload: testData(t, 10), + }, + Payload: new(emptyReader), + } + + t.Run("field moulder failure", func(t *testing.T) { + // create custom error for test + mErr := internal.Error("test error for field moulder") + + s := &preliminaryTransformer{ + fMoulder: &testPutEntity{ + f: func(items ...interface{}) { + t.Run("correct field moulder params", func(t *testing.T) { + require.Equal(t, u, items[0]) + require.Empty(t, items[1]) + }) + }, + err: mErr, // force Transformer to return mErr + }, + } + + // ascertain that error returns as expected + require.EqualError(t, s.Transform(ctx, u), mErr.Error()) + }) + + t.Run("correct result", func(t *testing.T) { + // create custom error for test + sgErr := internal.Error("test error for SG moulder") + + s := &preliminaryTransformer{ + fMoulder: new(testPutEntity), + sgMoulder: &testPutEntity{ + f: func(items ...interface{}) { + t.Run("correct field moulder params", func(t *testing.T) { + require.Equal(t, u, items[0]) + require.Empty(t, items[1]) + }) + }, + err: sgErr, // force Transformer to return sgErr + }, + } + + // ascertain that error returns as expected + require.EqualError(t, s.Transform(ctx, u), sgErr.Error()) + }) +} + +func Test_readChunk(t *testing.T) { + t.Run("empty slice", func(t *testing.T) { + t.Run("missing checksum header", func(t *testing.T) { + obj := new(Object) + + _, h := obj.LastHeader(object.HeaderType(object.PayloadChecksumHdr)) + require.Nil(t, h) + + require.NoError(t, readChunk(ProcUnit{ + Head: obj, + Payload: bytes.NewBuffer(testData(t, 10)), + }, nil, nil, nil)) + + _, h = obj.LastHeader(object.HeaderType(object.PayloadChecksumHdr)) + + require.NotNil(t, h) + require.Equal(t, sha256.New().Sum(nil), h.Value.(*object.Header_PayloadChecksum).PayloadChecksum) + }) + + t.Run("existing checksum header", func(t *testing.T) { + h := &object.Header_PayloadChecksum{PayloadChecksum: testData(t, 10)} + + obj := &Object{Headers: []object.Header{{Value: h}}} + + require.NoError(t, readChunk(ProcUnit{ + Head: obj, + Payload: bytes.NewBuffer(testData(t, 10)), + }, nil, nil, nil)) + + require.NotNil(t, h) + require.Equal(t, sha256.New().Sum(nil), h.PayloadChecksum) + }) + }) + + t.Run("non-empty slice", func(t *testing.T) { + t.Run("non-full data", func(t *testing.T) { + var ( + size = 10 + buf = testData(t, size) + r = bytes.NewBuffer(buf[:size-1]) + ) + + require.EqualError(t, + readChunk(ProcUnit{Head: new(Object), Payload: r}, buf, nil, nil), + ErrPayloadEOF.Error(), + ) + }) + + t.Run("hash accumulator write", func(t *testing.T) { + var ( + d = testData(t, 10) + srcHash = sha256.Sum256(d) + hAcc = sha256.New() + buf = bytes.NewBuffer(d) + b = make([]byte, len(d)) + obj = new(Object) + + srcHomoHash = hash.Sum(d) + homoHashHdr = &object.Header_HomoHash{HomoHash: hash.Sum(make([]byte, 0))} + ) + + t.Run("failure", func(t *testing.T) { + hErr := internal.Error("test error for hash writer") + b := testData(t, len(d)) + + require.EqualError(t, readChunk(EmptyPayloadUnit(new(Object)), b, &testPutEntity{ + f: func(items ...interface{}) { + t.Run("correct accumulator params", func(t *testing.T) { + require.Equal(t, b, items[0]) + }) + }, + err: hErr, + }, nil), hErr.Error()) + }) + + require.NoError(t, readChunk(ProcUnit{Head: obj, Payload: buf}, b, hAcc, homoHashHdr)) + + _, h := obj.LastHeader(object.HeaderType(object.PayloadChecksumHdr)) + require.NotNil(t, h) + require.Equal(t, srcHash[:], h.Value.(*object.Header_PayloadChecksum).PayloadChecksum) + + require.Equal(t, srcHash[:], hAcc.Sum(nil)) + require.Equal(t, srcHomoHash, homoHashHdr.HomoHash) + }) + }) +} + +func Test_headSigner(t *testing.T) { + ctx := context.TODO() + + t.Run("invalid input", func(t *testing.T) { + t.Run("missing token", func(t *testing.T) { + u := ProcUnit{Head: new(Object)} + require.Error(t, u.Head.Verify()) + s := &headSigner{verifier: &testPutEntity{err: internal.Error("")}} + require.EqualError(t, s.Transform(ctx, u), errNoToken.Error()) + }) + + t.Run("with token", func(t *testing.T) { + u := ProcUnit{Head: new(Object)} + + verifier, err := implementations.NewLocalHeadIntegrityVerifier(core.NewNeoKeyVerifier()) + require.NoError(t, err) + + require.Error(t, u.Head.Verify()) + + privateToken, err := session.NewPrivateToken(0) + require.NoError(t, err) + ctx := context.WithValue(ctx, PrivateSessionToken, privateToken) + + s := &headSigner{ + verifier: &testPutEntity{ + err: internal.Error(""), + }, + } + + key := &privateToken.PrivateKey().PublicKey + + u.Head.SystemHeader.OwnerID, err = refs.NewOwnerID(key) + require.NoError(t, err) + u.Head.AddHeader(&object.Header{ + Value: &object.Header_PublicKey{ + PublicKey: &object.PublicKey{ + Value: crypto.MarshalPublicKey(key), + }, + }, + }) + + require.NoError(t, s.Transform(ctx, u, func(_ context.Context, unit ProcUnit) error { + require.NoError(t, verifier.Verify(ctx, unit.Head)) + _, h := unit.Head.LastHeader(object.HeaderType(object.IntegrityHdr)) + require.NotNil(t, h) + d, err := objutil.MarshalHeaders(unit.Head, len(unit.Head.Headers)-1) + require.NoError(t, err) + cs := sha256.Sum256(d) + require.Equal(t, cs[:], h.Value.(*object.Header_Integrity).Integrity.GetHeadersChecksum()) + return nil + })) + + t.Run("valid input", func(t *testing.T) { + s := &headSigner{verifier: new(testPutEntity)} + require.NoError(t, s.Transform(ctx, u, func(_ context.Context, unit ProcUnit) error { + require.Equal(t, u, unit) + return nil + })) + }) + }) + }) +} + +func Test_fieldMoulder(t *testing.T) { + ctx := context.TODO() + epoch := uint64(100) + + fMoulder := &fieldMoulder{epochRecv: &testPutEntity{res: epoch}} + + t.Run("no token", func(t *testing.T) { + require.EqualError(t, new(fieldMoulder).Transform(ctx, ProcUnit{}), errNoToken.Error()) + }) + + t.Run("with token", func(t *testing.T) { + token := new(service.Token) + token.SetID(service.TokenID{1, 2, 3}) + + ctx := context.WithValue(ctx, PublicSessionToken, token) + + u := ProcUnit{Head: new(Object)} + + _, h := u.Head.LastHeader(object.HeaderType(object.TokenHdr)) + require.Nil(t, h) + + require.NoError(t, fMoulder.Transform(ctx, u)) + + _, h = u.Head.LastHeader(object.HeaderType(object.TokenHdr)) + require.Equal(t, token, h.Value.(*object.Header_Token).Token) + + require.False(t, u.Head.SystemHeader.ID.Empty()) + require.NotZero(t, u.Head.SystemHeader.CreatedAt.UnixTime) + require.Equal(t, epoch, u.Head.SystemHeader.CreatedAt.Epoch) + require.Equal(t, uint64(1), u.Head.SystemHeader.Version) + }) +} + +func Test_sgMoulder(t *testing.T) { + ctx := context.TODO() + + t.Run("invalid SG linking", func(t *testing.T) { + t.Run("w/ header and w/o links", func(t *testing.T) { + obj := new(Object) + obj.SetStorageGroup(new(storagegroup.StorageGroup)) + require.EqualError(t, new(sgMoulder).Transform(ctx, ProcUnit{Head: obj}), ErrInvalidSGLinking.Error()) + }) + + t.Run("w/o header and w/ links", func(t *testing.T) { + obj := new(Object) + addLink(obj, object.Link_StorageGroup, ObjectID{}) + require.EqualError(t, new(sgMoulder).Transform(ctx, ProcUnit{Head: obj}), ErrInvalidSGLinking.Error()) + }) + }) + + t.Run("non-SG", func(t *testing.T) { + obj := new(Object) + require.NoError(t, new(sgMoulder).Transform(ctx, ProcUnit{Head: obj})) + }) + + t.Run("receive SG info", func(t *testing.T) { + cid := testObjectAddress(t).CID + group := make([]ObjectID, 5) + for i := range group { + group[i] = testObjectAddress(t).ObjectID + } + + t.Run("failure", func(t *testing.T) { + obj := &Object{SystemHeader: object.SystemHeader{CID: cid}} + + obj.SetStorageGroup(new(storagegroup.StorageGroup)) + for i := range group { + addLink(obj, object.Link_StorageGroup, group[i]) + } + + sgErr := internal.Error("test error for SG info receiver") + + mSG := &sgMoulder{ + sgInfoRecv: &testPutEntity{ + f: func(items ...interface{}) { + t.Run("correct SG info receiver params", func(t *testing.T) { + cp := make([]ObjectID, len(group)) + copy(cp, group) + sort.Sort(storagegroup.IDList(cp)) + require.Equal(t, cid, items[0]) + require.Equal(t, cp, items[1]) + }) + }, + err: sgErr, + }, + } + + require.EqualError(t, mSG.Transform(ctx, ProcUnit{Head: obj}), sgErr.Error()) + }) + }) + + t.Run("correct result", func(t *testing.T) { + obj := new(Object) + obj.SetStorageGroup(new(storagegroup.StorageGroup)) + addLink(obj, object.Link_StorageGroup, ObjectID{}) + + sgInfo := &storagegroup.StorageGroup{ + ValidationDataSize: 19, + ValidationHash: hash.Sum(testData(t, 10)), + } + + mSG := &sgMoulder{ + sgInfoRecv: &testPutEntity{ + res: sgInfo, + }, + } + + require.NoError(t, mSG.Transform(ctx, ProcUnit{Head: obj})) + + _, h := obj.LastHeader(object.HeaderType(object.StorageGroupHdr)) + require.NotNil(t, h) + require.Equal(t, sgInfo, h.Value.(*object.Header_StorageGroup).StorageGroup) + }) +} + +func Test_sizeLimiter(t *testing.T) { + ctx := context.TODO() + + t.Run("limit entry", func(t *testing.T) { + payload := testData(t, 10) + payloadSize := uint64(len(payload) - 1) + + u := ProcUnit{ + Head: &Object{SystemHeader: object.SystemHeader{ + PayloadLength: payloadSize, + }}, + Payload: bytes.NewBuffer(payload[:payloadSize]), + } + + sl := &sizeLimiter{limit: payloadSize} + + t.Run("cut payload", func(t *testing.T) { + require.Error(t, sl.Transform(ctx, ProcUnit{ + Head: &Object{SystemHeader: object.SystemHeader{PayloadLength: payloadSize}}, + Payload: bytes.NewBuffer(payload[:payloadSize-1]), + })) + }) + + require.NoError(t, sl.Transform(ctx, u, func(_ context.Context, unit ProcUnit) error { + _, err := unit.Payload.Read(make([]byte, 1)) + require.EqualError(t, err, io.EOF.Error()) + require.Equal(t, payload[:payloadSize], unit.Head.Payload) + _, h := unit.Head.LastHeader(object.HeaderType(object.HomoHashHdr)) + require.NotNil(t, h) + require.Equal(t, hash.Sum(payload[:payloadSize]), h.Value.(*object.Header_HomoHash).HomoHash) + return nil + })) + }) + + t.Run("limit exceed", func(t *testing.T) { + payload := testData(t, 100) + sizeLimit := uint64(len(payload)) / 13 + + pToken, err := session.NewPrivateToken(0) + require.NoError(t, err) + + srcObj := &object.Object{ + SystemHeader: object.SystemHeader{ + Version: 12, + PayloadLength: uint64(len(payload)), + ID: testObjectAddress(t).ObjectID, + OwnerID: object.OwnerID{1, 2, 3}, + CID: testObjectAddress(t).CID, + }, + Headers: []object.Header{ + {Value: &object.Header_UserHeader{UserHeader: &object.UserHeader{Key: "key", Value: "value"}}}, + }, + } + + u := ProcUnit{ + Head: srcObj, + Payload: bytes.NewBuffer(payload), + } + + epoch := uint64(77) + + sl := &sizeLimiter{ + limit: sizeLimit, + epochRecv: &testPutEntity{res: epoch}, + } + + t.Run("no token", func(t *testing.T) { + require.EqualError(t, sl.Transform(ctx, ProcUnit{ + Head: &Object{ + SystemHeader: object.SystemHeader{ + PayloadLength: uint64(len(payload)), + }, + }, + Payload: bytes.NewBuffer(payload), + }), errNoToken.Error()) + }) + + ctx := context.WithValue(ctx, PrivateSessionToken, pToken) + + t.Run("cut payload", func(t *testing.T) { + require.Error(t, sl.Transform(ctx, ProcUnit{ + Head: &Object{ + SystemHeader: object.SystemHeader{ + PayloadLength: uint64(len(payload)) + 1, + }, + }, + Payload: bytes.NewBuffer(payload), + })) + }) + + objs := make([]Object, 0) + + t.Run("handler error", func(t *testing.T) { + hErr := internal.Error("test error for handler") + + require.EqualError(t, sl.Transform(ctx, ProcUnit{ + Head: &Object{ + SystemHeader: object.SystemHeader{PayloadLength: uint64(len(payload))}, + Headers: make([]object.Header, 0), + }, + Payload: bytes.NewBuffer(payload), + }, func(context.Context, ProcUnit) error { return hErr }), hErr.Error()) + }) + + require.NoError(t, sl.Transform(ctx, u, func(_ context.Context, unit ProcUnit) error { + _, err := unit.Payload.Read(make([]byte, 1)) + require.EqualError(t, err, io.EOF.Error()) + objs = append(objs, *unit.Head.Copy()) + return nil + })) + + ln := len(objs) + + res := make([]byte, 0, len(payload)) + + zObj := objs[ln-1] + require.Zero(t, zObj.SystemHeader.PayloadLength) + require.Empty(t, zObj.Payload) + require.Empty(t, zObj.Links(object.Link_Next)) + require.Empty(t, zObj.Links(object.Link_Previous)) + require.Empty(t, zObj.Links(object.Link_Parent)) + children := zObj.Links(object.Link_Child) + require.Len(t, children, ln-1) + for i := range objs[:ln-1] { + require.Equal(t, objs[i].SystemHeader.ID, children[i]) + } + + for i := range objs[:ln-1] { + res = append(res, objs[i].Payload...) + if i == 0 { + require.Equal(t, objs[i].Links(object.Link_Next)[0], objs[i+1].SystemHeader.ID) + require.True(t, objs[i].Links(object.Link_Previous)[0].Empty()) + } else if i < ln-2 { + require.Equal(t, objs[i].Links(object.Link_Previous)[0], objs[i-1].SystemHeader.ID) + require.Equal(t, objs[i].Links(object.Link_Next)[0], objs[i+1].SystemHeader.ID) + } else { + _, h := objs[i].LastHeader(object.HeaderType(object.HomoHashHdr)) + require.NotNil(t, h) + require.Equal(t, hash.Sum(payload), h.Value.(*object.Header_HomoHash).HomoHash) + require.Equal(t, objs[i].Links(object.Link_Previous)[0], objs[i-1].SystemHeader.ID) + require.True(t, objs[i].Links(object.Link_Next)[0].Empty()) + } + } + + require.Equal(t, payload, res) + }) +} + +// testData returns size bytes of random data. +func testData(t *testing.T, size int) []byte { + res := make([]byte, size) + _, err := rand.Read(res) + require.NoError(t, err) + return res +} + +// testObjectAddress returns new random object address. +func testObjectAddress(t *testing.T) refs.Address { + oid, err := refs.NewObjectID() + require.NoError(t, err) + return refs.Address{CID: refs.CIDForBytes(testData(t, refs.CIDSize)), ObjectID: oid} +} + +func TestIntegration(t *testing.T) { + ownerKey := test.DecodeKey(1) + + ownerID, err := refs.NewOwnerID(&ownerKey.PublicKey) + require.NoError(t, err) + + privToken, err := session.NewPrivateToken(0) + require.NoError(t, err) + + pkBytes, err := session.PublicSessionToken(privToken) + require.NoError(t, err) + + ctx := context.WithValue(context.TODO(), PrivateSessionToken, privToken) + + pubToken := new(service.Token) + pubToken.SetID(service.TokenID{1, 2, 3}) + pubToken.SetSessionKey(pkBytes) + pubToken.SetOwnerID(ownerID) + pubToken.SetOwnerKey(crypto.MarshalPublicKey(&ownerKey.PublicKey)) + require.NoError(t, service.AddSignatureWithKey(ownerKey, service.NewSignedSessionToken(pubToken))) + + ctx = context.WithValue(ctx, PublicSessionToken, pubToken) + + t.Run("non-SG object", func(t *testing.T) { + t.Run("with split", func(t *testing.T) { + tr, err := NewTransformer(Params{ + SGInfoReceiver: new(testPutEntity), + EpochReceiver: &testPutEntity{res: uint64(1)}, + SizeLimit: 13, + Verifier: &testPutEntity{ + err: internal.Error(""), // force verifier to return non-nil error + }, + }) + require.NoError(t, err) + + payload := make([]byte, 20) + _, err = rand.Read(payload) + require.NoError(t, err) + + obj := &Object{ + SystemHeader: object.SystemHeader{ + PayloadLength: uint64(len(payload)), + CID: CID{3}, + }, + Headers: []object.Header{ + {Value: &object.Header_UserHeader{UserHeader: &object.UserHeader{Key: "key", Value: "value"}}}, + }, + } + + obj.SystemHeader.OwnerID = ownerID + + obj.SetHeader(&object.Header{ + Value: &object.Header_Token{ + Token: pubToken, + }, + }) + + testTransformer(t, ctx, ProcUnit{ + Head: obj, + Payload: bytes.NewBuffer(payload), + }, tr, payload) + }) + }) +} + +func testTransformer(t *testing.T, ctx context.Context, u ProcUnit, tr Transformer, src []byte) { + objList := make([]Object, 0) + verifier, err := implementations.NewLocalHeadIntegrityVerifier(core.NewNeoKeyVerifier()) + require.NoError(t, err) + + require.NoError(t, tr.Transform(ctx, u, func(_ context.Context, unit ProcUnit) error { + require.NoError(t, verifier.Verify(ctx, unit.Head)) + objList = append(objList, *unit.Head.Copy()) + return nil + })) + + reverse := NewRestorePipeline(SplitRestorer()) + + res, err := reverse.Restore(ctx, objList...) + require.NoError(t, err) + + integrityVerifier, err := implementations.NewLocalIntegrityVerifier(core.NewNeoKeyVerifier()) + require.NoError(t, err) + require.NoError(t, integrityVerifier.Verify(ctx, &res[0])) + + require.Equal(t, src, res[0].Payload) + _, h := res[0].LastHeader(object.HeaderType(object.HomoHashHdr)) + require.True(t, hash.Sum(src).Equal(h.Value.(*object.Header_HomoHash).HomoHash)) +} + +func addLink(o *Object, t object.Link_Type, id ObjectID) { + o.AddHeader(&object.Header{Value: &object.Header_Link{ + Link: &object.Link{Type: t, ID: id}, + }}) +} diff --git a/lib/transformer/restore.go b/lib/transformer/restore.go new file mode 100644 index 000000000..6242bb761 --- /dev/null +++ b/lib/transformer/restore.go @@ -0,0 +1,126 @@ +package transformer + +import ( + "context" + "sync" + + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/pkg/errors" +) + +type ( + // ObjectRestorer is an interface of object restorer. + ObjectRestorer interface { + Type() object.Transform_Type + Restore(context.Context, ...Object) ([]Object, error) + } + + restorePipeline struct { + ObjectRestorer + *sync.RWMutex + items map[object.Transform_Type]ObjectRestorer + } + + splitRestorer struct{} +) + +var errEmptyObjList = errors.New("object list is empty") + +var errMissingParentLink = errors.New("missing parent link") + +func (s *restorePipeline) Restore(ctx context.Context, srcObjs ...Object) ([]Object, error) { + if len(srcObjs) == 0 { + return nil, errEmptyInput + } + + s.RLock() + defer s.RUnlock() + + var ( + objs = srcObjs + err error + ) + + for { + _, th := objs[0].LastHeader(object.HeaderType(object.TransformHdr)) + if th == nil { + break + } + + transform := th.Value.(*object.Header_Transform).Transform + + tr, ok := s.items[transform.Type] + if !ok { + return nil, errors.Errorf("missing restorer (%s)", transform.Type) + } + + if objs, err = tr.Restore(ctx, objs...); err != nil { + return nil, errors.Wrapf(err, "restoration failed (%s)", transform.Type) + } + } + + return objs, nil +} + +// NewRestorePipeline is a constructor of the pipeline of object restorers. +func NewRestorePipeline(t ...ObjectRestorer) ObjectRestorer { + m := make(map[object.Transform_Type]ObjectRestorer, len(t)) + + for i := range t { + m[t[i].Type()] = t[i] + } + + return &restorePipeline{ + RWMutex: new(sync.RWMutex), + items: m, + } +} + +func (*splitRestorer) Type() object.Transform_Type { + return object.Transform_Split +} + +func (*splitRestorer) Restore(ctx context.Context, objs ...Object) ([]Object, error) { + if len(objs) == 0 { + return nil, errEmptyObjList + } + + chain, err := GetChain(objs...) + if err != nil { + return nil, errors.Wrap(err, "could not get chain of objects") + } + + obj := chain[len(chain)-1] + + var ( + size uint64 + p = make([]byte, 0, len(chain[0].Payload)*len(chain)) + ) + + for j := 0; j < len(chain); j++ { + p = append(p, chain[j].Payload...) + size += chain[j].SystemHeader.PayloadLength + } + + obj.SystemHeader.PayloadLength = size + obj.Payload = p + + parent, err := lastLink(&obj, object.Link_Parent) + if err != nil { + return nil, errMissingParentLink + } + + obj.SystemHeader.ID = parent + + err = deleteTransformer(&obj, object.Transform_Split) + if err != nil { + return nil, err + } + + return []Object{obj}, nil +} + +// SplitRestorer is a splitted object restorer's constructor. +func SplitRestorer() ObjectRestorer { + return new(splitRestorer) +} diff --git a/lib/transformer/transformer.go b/lib/transformer/transformer.go new file mode 100644 index 000000000..0016035b3 --- /dev/null +++ b/lib/transformer/transformer.go @@ -0,0 +1,528 @@ +package transformer + +import ( + "context" + "crypto/sha256" + "io" + "sort" + "time" + + "github.com/nspcc-dev/neofs-api-go/hash" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-api-go/session" + "github.com/nspcc-dev/neofs-api-go/storagegroup" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/objutil" + "github.com/pkg/errors" +) + +type ( + // Type is a type alias of + // Type from object package of neofs-api-go. + Type = object.Transform_Type + + // ProcUnit groups the information about transforming unit. + ProcUnit struct { + Head *Object + Payload io.Reader + } + + // ProcUnitHandler is a handling ProcUnit function. + ProcUnitHandler func(context.Context, ProcUnit) error + + // Transformer is an interface of object transformer. + Transformer interface { + Transform(context.Context, ProcUnit, ...ProcUnitHandler) error + } + + // EpochReceiver is an interface of epoch number container with read access. + EpochReceiver interface { + Epoch() uint64 + } + + transformer struct { + tPrelim Transformer + tSizeLim Transformer + tSign Transformer + } + + preliminaryTransformer struct { + fMoulder Transformer + sgMoulder Transformer + } + + fieldMoulder struct { + epochRecv EpochReceiver + } + + sgMoulder struct { + sgInfoRecv storagegroup.InfoReceiver + } + + sizeLimiter struct { + limit uint64 + epochRecv EpochReceiver + } + + headSigner struct { + verifier objutil.Verifier + } + + emptyReader struct{} + + // Params groups the parameters of object transformer's constructor. + Params struct { + SGInfoReceiver storagegroup.InfoReceiver + EpochReceiver EpochReceiver + SizeLimit uint64 + Verifier objutil.Verifier + } +) + +// ErrPayloadEOF is returned by Transformer that +// received unexpected end of object payload. +const ErrPayloadEOF = internal.Error("payload EOF") + +const ( + verifyHeadersCount = 2 // payload checksum, integrity + splitHeadersCount = 4 // flag, parent, left, right + + errEmptyInput = internal.Error("empty input") + + transformerInstanceFailMsg = "could not create transformer instance" + errEmptySGInfoRecv = internal.Error("empty storage group info receivers") + errInvalidSizeLimit = internal.Error("non-positive object size limit") + errEmptyEpochReceiver = internal.Error("empty epoch receiver") + errEmptyVerifier = internal.Error("empty object verifier") + + // ErrInvalidSGLinking is returned by Transformer that received + // an object with broken storage group links. + ErrInvalidSGLinking = internal.Error("invalid storage group linking") + + // PrivateSessionToken is a context key for session.PrivateToken. + PrivateSessionToken = "private token" + + // PublicSessionToken is a context key for service.SessionToken. + PublicSessionToken = "public token" + + errNoToken = internal.Error("no token provided") +) + +var errChainNotFound = errors.New("chain not found") + +var errCutChain = errors.New("GetChain failed: chain is not full") + +var errMissingTransformHdr = errors.New("cannot find transformer header") + +// NewTransformer is an object transformer's constructor. +func NewTransformer(p Params) (Transformer, error) { + switch { + case p.SizeLimit <= 0: + return nil, errors.Wrap(errInvalidSizeLimit, transformerInstanceFailMsg) + case p.EpochReceiver == nil: + return nil, errors.Wrap(errEmptyEpochReceiver, transformerInstanceFailMsg) + case p.SGInfoReceiver == nil: + return nil, errors.Wrap(errEmptySGInfoRecv, transformerInstanceFailMsg) + case p.Verifier == nil: + return nil, errors.Wrap(errEmptyVerifier, transformerInstanceFailMsg) + } + + return &transformer{ + tPrelim: &preliminaryTransformer{ + fMoulder: &fieldMoulder{ + epochRecv: p.EpochReceiver, + }, + sgMoulder: &sgMoulder{ + sgInfoRecv: p.SGInfoReceiver, + }, + }, + tSizeLim: &sizeLimiter{ + limit: p.SizeLimit, + epochRecv: p.EpochReceiver, + }, + tSign: &headSigner{ + verifier: p.Verifier, + }, + }, nil +} + +func (s *transformer) Transform(ctx context.Context, unit ProcUnit, handlers ...ProcUnitHandler) error { + if err := s.tPrelim.Transform(ctx, unit); err != nil { + return err + } + + return s.tSizeLim.Transform(ctx, unit, func(ctx context.Context, unit ProcUnit) error { + return s.tSign.Transform(ctx, unit, handlers...) + }) +} + +func (s *preliminaryTransformer) Transform(ctx context.Context, unit ProcUnit, _ ...ProcUnitHandler) error { + if err := s.fMoulder.Transform(ctx, unit); err != nil { + return err + } + + return s.sgMoulder.Transform(ctx, unit) +} + +// TODO: simplify huge function. +func (s *sizeLimiter) Transform(ctx context.Context, unit ProcUnit, handlers ...ProcUnitHandler) error { + if unit.Head.SystemHeader.PayloadLength <= s.limit { + homoHashHdr := &object.Header_HomoHash{HomoHash: hash.Sum(make([]byte, 0))} + + unit.Head.AddHeader(&object.Header{Value: homoHashHdr}) + + buf := make([]byte, unit.Head.SystemHeader.PayloadLength) + + if err := readChunk(unit, buf, nil, homoHashHdr); err != nil { + return err + } + + unit.Head.Payload = buf + + return procHandlers(ctx, EmptyPayloadUnit(unit.Head), handlers...) + } + + var ( + err error + buf = make([]byte, s.limit) + hAcc = sha256.New() + srcHdrLen = len(unit.Head.Headers) + pObj = unit.Head + resObj = ProcUnit{ + Head: &Object{ + SystemHeader: object.SystemHeader{ + Version: pObj.SystemHeader.Version, + OwnerID: pObj.SystemHeader.OwnerID, + CID: pObj.SystemHeader.CID, + CreatedAt: object.CreationPoint{ + UnixTime: time.Now().Unix(), + Epoch: s.epochRecv.Epoch(), + }, + }, + }, + Payload: unit.Payload, + } + left, right = &object.Link{Type: object.Link_Previous}, &object.Link{Type: object.Link_Next} + hashAccHdr, hashHdr = new(object.Header_PayloadChecksum), new(object.Header_PayloadChecksum) + homoHashAccHdr = &object.Header_HomoHash{HomoHash: hash.Sum(make([]byte, 0))} + childCount = pObj.SystemHeader.PayloadLength/s.limit + 1 + ) + + if right.ID, err = refs.NewObjectID(); err != nil { + return err + } + + splitHeaders := make([]object.Header, 0, 3*verifyHeadersCount+splitHeadersCount+childCount) + + splitHeaders = append(splitHeaders, pObj.Headers...) + splitHeaders = append(splitHeaders, []object.Header{ + {Value: &object.Header_Transform{Transform: &object.Transform{Type: object.Transform_Split}}}, + {Value: &object.Header_Link{Link: &object.Link{ + Type: object.Link_Parent, + ID: unit.Head.SystemHeader.ID, + }}}, + {Value: &object.Header_Link{Link: left}}, + {Value: &object.Header_Link{Link: right}}, + {Value: hashHdr}, + {Value: &object.Header_Integrity{Integrity: new(object.IntegrityHeader)}}, + {Value: homoHashAccHdr}, + {Value: hashAccHdr}, + {Value: &object.Header_Integrity{Integrity: new(object.IntegrityHeader)}}, + }...) + + children := splitHeaders[srcHdrLen+2*verifyHeadersCount+splitHeadersCount+1:] + pObj.Headers = splitHeaders[:srcHdrLen+2*verifyHeadersCount+splitHeadersCount] + + for tail := pObj.SystemHeader.PayloadLength; tail > 0; tail -= min(tail, s.limit) { + size := min(tail, s.limit) + + resObj.Head.Headers = pObj.Headers[:len(pObj.Headers)-verifyHeadersCount-1] + if err = readChunk(resObj, buf[:size], hAcc, homoHashAccHdr); err != nil { + return err + } + + resObj.Head.SystemHeader.PayloadLength = size + resObj.Head.Payload = buf[:size] + left.ID, resObj.Head.SystemHeader.ID = resObj.Head.SystemHeader.ID, right.ID + + if tail <= s.limit { + right.ID = ObjectID{} + + temp := make([]object.Header, verifyHeadersCount+1) // +1 for homomorphic hash + + copy(temp, pObj.Headers[srcHdrLen:]) + + hashAccHdr.PayloadChecksum = hAcc.Sum(nil) + + copy(pObj.Headers[srcHdrLen:srcHdrLen+verifyHeadersCount+1], + pObj.Headers[len(pObj.Headers)-verifyHeadersCount:]) + + resObj.Head.Headers = pObj.Headers[:srcHdrLen+verifyHeadersCount] + + if err = signWithToken(ctx, &Object{ + SystemHeader: pObj.SystemHeader, + Headers: resObj.Head.Headers, + }); err != nil { + return err + } + + copy(pObj.Headers[srcHdrLen+2*(verifyHeadersCount+1):], + pObj.Headers[srcHdrLen+verifyHeadersCount+1:srcHdrLen+verifyHeadersCount+splitHeadersCount]) + + copy(pObj.Headers[srcHdrLen+verifyHeadersCount+1:], temp) + + resObj.Head.Headers = pObj.Headers[:len(pObj.Headers)] + } else if right.ID, err = refs.NewObjectID(); err != nil { + return err + } + + if err := procHandlers(ctx, EmptyPayloadUnit(resObj.Head), handlers...); err != nil { + return err + } + + children = append(children, object.Header{Value: &object.Header_Link{Link: &object.Link{ + Type: object.Link_Child, + ID: resObj.Head.SystemHeader.ID, + }}}) + } + + pObj.SystemHeader.PayloadLength = 0 + pObj.Headers = append(pObj.Headers[:srcHdrLen], children...) + + if err := readChunk(unit, nil, nil, nil); err != nil { + return err + } + + return procHandlers(ctx, EmptyPayloadUnit(pObj), handlers...) +} + +func readChunk(unit ProcUnit, buf []byte, hAcc io.Writer, homoHashAcc *object.Header_HomoHash) (err error) { + var csHdr *object.Header_PayloadChecksum + + if _, v := unit.Head.LastHeader(object.HeaderType(object.PayloadChecksumHdr)); v == nil { + csHdr = new(object.Header_PayloadChecksum) + + unit.Head.Headers = append(unit.Head.Headers, object.Header{Value: csHdr}) + } else { + csHdr = v.Value.(*object.Header_PayloadChecksum) + } + + if _, err = io.ReadFull(unit.Payload, buf); err != nil && err != io.EOF { + if errors.Is(err, io.ErrUnexpectedEOF) { + err = ErrPayloadEOF + } + + return + } else if hAcc != nil { + if _, err = hAcc.Write(buf); err != nil { + return + } + } + + if homoHashAcc != nil { + if homoHashAcc.HomoHash, err = hash.Concat([]hash.Hash{homoHashAcc.HomoHash, hash.Sum(buf)}); err != nil { + return + } + } + + h := sha256.Sum256(buf) + csHdr.PayloadChecksum = h[:] + + return nil +} + +func (s *headSigner) Transform(ctx context.Context, unit ProcUnit, handlers ...ProcUnitHandler) error { + if s.verifier.Verify(ctx, unit.Head) != nil { + if err := signWithToken(ctx, unit.Head); err != nil { + return err + } + } + + return procHandlers(ctx, unit, handlers...) +} + +func signWithToken(ctx context.Context, obj *Object) error { + integrityHdr := new(object.IntegrityHeader) + + if pToken, ok := ctx.Value(PrivateSessionToken).(session.PrivateToken); !ok { + return errNoToken + } else if hdrData, err := objutil.MarshalHeaders(obj, len(obj.Headers)); err != nil { + return err + } else { + cs := sha256.Sum256(hdrData) + integrityHdr.SetHeadersChecksum(cs[:]) + if err = service.AddSignatureWithKey(pToken.PrivateKey(), integrityHdr); err != nil { + return err + } + } + + obj.AddHeader(&object.Header{Value: &object.Header_Integrity{Integrity: integrityHdr}}) + + return nil +} + +func (s *fieldMoulder) Transform(ctx context.Context, unit ProcUnit, _ ...ProcUnitHandler) (err error) { + token, ok := ctx.Value(PublicSessionToken).(*service.Token) + if !ok { + return errNoToken + } + + unit.Head.AddHeader(&object.Header{ + Value: &object.Header_Token{ + Token: token, + }, + }) + + if unit.Head.SystemHeader.ID.Empty() { + if unit.Head.SystemHeader.ID, err = refs.NewObjectID(); err != nil { + return + } + } + + if unit.Head.SystemHeader.CreatedAt.UnixTime == 0 { + unit.Head.SystemHeader.CreatedAt.UnixTime = time.Now().Unix() + } + + if unit.Head.SystemHeader.CreatedAt.Epoch == 0 { + unit.Head.SystemHeader.CreatedAt.Epoch = s.epochRecv.Epoch() + } + + if unit.Head.SystemHeader.Version == 0 { + unit.Head.SystemHeader.Version = 1 + } + + return nil +} + +func (s *sgMoulder) Transform(ctx context.Context, unit ProcUnit, _ ...ProcUnitHandler) error { + sgLinks := unit.Head.Links(object.Link_StorageGroup) + + group, err := unit.Head.StorageGroup() + + if nonEmptyList := len(sgLinks) > 0; (err == nil) != nonEmptyList { + return ErrInvalidSGLinking + } else if err != nil || !group.Empty() { + return nil + } + + sort.Sort(storagegroup.IDList(sgLinks)) + + sgInfo, err := s.sgInfoRecv.GetSGInfo(ctx, unit.Head.SystemHeader.CID, sgLinks) + if err != nil { + return err + } + + unit.Head.SetStorageGroup(sgInfo) + + return nil +} + +func procHandlers(ctx context.Context, unit ProcUnit, handlers ...ProcUnitHandler) error { + for i := range handlers { + if err := handlers[i](ctx, unit); err != nil { + return err + } + } + + return nil +} + +func (*emptyReader) Read([]byte) (n int, err error) { return 0, io.EOF } + +// EmptyPayloadUnit returns ProcUnit with Object from argument and empty payload reader +// that always returns (0, io.EOF). +func EmptyPayloadUnit(head *Object) ProcUnit { return ProcUnit{Head: head, Payload: new(emptyReader)} } + +func min(a, b uint64) uint64 { + if a < b { + return a + } + + return b +} + +// GetChain builds a list of objects in the hereditary chain. +// In case of impossibility to do this, an error is returned. +func GetChain(srcObjs ...Object) ([]Object, error) { + var ( + err error + first, id ObjectID + res = make([]Object, 0, len(srcObjs)) + m = make(map[ObjectID]*Object, len(srcObjs)) + ) + + // Fill map with all objects + for i := range srcObjs { + m[srcObjs[i].SystemHeader.ID] = &srcObjs[i] + + prev, err := lastLink(&srcObjs[i], object.Link_Previous) + if err == nil && prev.Empty() { // then it is first + id, err = lastLink(&srcObjs[i], object.Link_Next) + if err != nil { + return nil, errors.Wrap(err, "GetChain failed: missing first object next links") + } + + first = srcObjs[i].SystemHeader.ID + } + } + + // Check first presence + if first.Empty() { + return nil, errChainNotFound + } + + res = append(res, *m[first]) + + // Iterate chain + for count := 0; !id.Empty() && count < len(srcObjs); count++ { + nextObj, ok := m[id] + if !ok { + return nil, errors.Errorf("GetChain failed: missing next object %s", id) + } + + id, err = lastLink(nextObj, object.Link_Next) + if err != nil { + return nil, errors.Wrap(err, "GetChain failed: missing object next links") + } + + res = append(res, *nextObj) + } + + // Check last chain element has empty next (prevent cut chain) + id, err = lastLink(&res[len(res)-1], object.Link_Next) + if err != nil { + return nil, errors.Wrap(err, "GetChain failed: missing object next links") + } else if !id.Empty() { + return nil, errCutChain + } + + return res, nil +} + +func deleteTransformer(o *Object, t object.Transform_Type) error { + n, th := o.LastHeader(object.HeaderType(object.TransformHdr)) + if th == nil || th.Value.(*object.Header_Transform).Transform.Type != t { + return errMissingTransformHdr + } + + o.Headers = o.Headers[:n] + + return nil +} + +func lastLink(o *Object, t object.Link_Type) (res ObjectID, err error) { + for i := len(o.Headers) - 1; i >= 0; i-- { + if v, ok := o.Headers[i].Value.(*object.Header_Link); ok { + if v.Link.GetType() == t { + res = v.Link.ID + return + } + } + } + + err = errors.Errorf("object.lastLink: links of type %s not found", t) + + return +} diff --git a/lib/transport/connection.go b/lib/transport/connection.go new file mode 100644 index 000000000..bb051b4a9 --- /dev/null +++ b/lib/transport/connection.go @@ -0,0 +1,39 @@ +package transport + +import ( + "sync/atomic" + + manet "github.com/multiformats/go-multiaddr-net" +) + +type ( + // Connection is an interface of network connection. + Connection interface { + manet.Conn + Closed() bool + } + + conn struct { + manet.Conn + closed *int32 + } +) + +func newConnection(con manet.Conn) Connection { + return &conn{ + Conn: con, + closed: new(int32), + } +} + +// Closed checks that connection closed. +func (c *conn) Closed() bool { return atomic.LoadInt32(c.closed) == 1 } + +// Close connection and write state. +func (c *conn) Close() error { + if atomic.CompareAndSwapInt32(c.closed, 0, 1) { + return c.Conn.Close() + } + + return nil +} diff --git a/lib/transport/object.go b/lib/transport/object.go new file mode 100644 index 000000000..0965265e1 --- /dev/null +++ b/lib/transport/object.go @@ -0,0 +1,107 @@ +package transport + +import ( + "context" + "io" + "time" + + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-api-go/service" +) + +type ( + // ObjectTransport is an interface of the executor of object remote operations. + ObjectTransport interface { + Transport(context.Context, ObjectTransportParams) + } + + // ObjectTransportParams groups the parameters of remote object operation. + ObjectTransportParams struct { + TransportInfo MetaInfo + TargetNode multiaddr.Multiaddr + ResultHandler ResultHandler + } + + // ResultHandler is an interface of remote object operation's result handler. + ResultHandler interface { + HandleResult(context.Context, multiaddr.Multiaddr, interface{}, error) + } + + // MetaInfo is an interface of the container of cross-operation values. + MetaInfo interface { + GetTTL() uint32 + GetTimeout() time.Duration + service.SessionTokenSource + GetRaw() bool + Type() object.RequestType + service.BearerTokenSource + service.ExtendedHeadersSource + } + + // SearchInfo is an interface of the container of object Search operation parameters. + SearchInfo interface { + MetaInfo + GetCID() refs.CID + GetQuery() []byte + } + + // PutInfo is an interface of the container of object Put operation parameters. + PutInfo interface { + MetaInfo + GetHead() *object.Object + Payload() io.Reader + CopiesNumber() uint32 + } + + // AddressInfo is an interface of the container of object request by Address. + AddressInfo interface { + MetaInfo + GetAddress() refs.Address + } + + // GetInfo is an interface of the container of object Get operation parameters. + GetInfo interface { + AddressInfo + } + + // HeadInfo is an interface of the container of object Head operation parameters. + HeadInfo interface { + GetInfo + GetFullHeaders() bool + } + + // RangeInfo is an interface of the container of object GetRange operation parameters. + RangeInfo interface { + AddressInfo + GetRange() object.Range + } + + // RangeHashInfo is an interface of the container of object GetRangeHash operation parameters. + RangeHashInfo interface { + AddressInfo + GetRanges() []object.Range + GetSalt() []byte + } +) + +const ( + // KeyID is a filter key to object ID field. + KeyID = "ID" + + // KeyTombstone is a filter key to tombstone header. + KeyTombstone = "TOMBSTONE" + + // KeyStorageGroup is a filter key to storage group link. + KeyStorageGroup = "STORAGE_GROUP" + + // KeyNoChildren is a filter key to objects w/o child links. + KeyNoChildren = "LEAF" + + // KeyParent is a filter key to parent link. + KeyParent = "PARENT" + + // KeyHasParent is a filter key to objects with parent link. + KeyHasParent = "HAS_PAR" +) diff --git a/lib/transport/transport.go b/lib/transport/transport.go new file mode 100644 index 000000000..4e06fedd3 --- /dev/null +++ b/lib/transport/transport.go @@ -0,0 +1,76 @@ +package transport + +import ( + "context" + "fmt" + "time" + + "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr-net" + circuit "github.com/rubyist/circuitbreaker" +) + +type ( + // Transport is an interface of network connection listener. + Transport interface { + Dial(context.Context, multiaddr.Multiaddr, bool) (Connection, error) + Listen(multiaddr.Multiaddr) (manet.Listener, error) + } + + transport struct { + threshold int64 + timeout time.Duration + panel *circuit.Panel + } +) + +const defaultBreakerName = "_NeoFS" + +func (t *transport) Dial(ctx context.Context, addr multiaddr.Multiaddr, reset bool) (Connection, error) { + var ( + con manet.Conn + breaker = t.breakerLookup(addr) + ) + + if reset { + breaker.Reset() + } + + err := breaker.CallContext(ctx, func() (errCall error) { + var d manet.Dialer + con, errCall = d.DialContext(ctx, addr) + return errCall + }, t.timeout) + + if err != nil { + return nil, err + } + + return newConnection(con), nil +} + +func (t *transport) Listen(addr multiaddr.Multiaddr) (manet.Listener, error) { + return manet.Listen(addr) +} + +func (t *transport) breakerLookup(addr fmt.Stringer) *circuit.Breaker { + panel := defaultBreakerName + addr.String() + + cb, ok := t.panel.Get(panel) + if !ok { + cb = circuit.NewConsecutiveBreaker(t.threshold) + t.panel.Add(panel, cb) + } + + return cb +} + +// New is a transport component's constructor. +func New(threshold int64, timeout time.Duration) Transport { + breaker := circuit.NewConsecutiveBreaker(threshold) + + panel := circuit.NewPanel() + panel.Add(defaultBreakerName, breaker) + + return &transport{panel: panel, threshold: threshold, timeout: timeout} +} diff --git a/lib/transport/transport_test.go b/lib/transport/transport_test.go new file mode 100644 index 000000000..bd3bd2838 --- /dev/null +++ b/lib/transport/transport_test.go @@ -0,0 +1,61 @@ +package transport + +import ( + "context" + "net" + "testing" + "time" + + manet "github.com/multiformats/go-multiaddr-net" + circuit "github.com/rubyist/circuitbreaker" + "github.com/stretchr/testify/require" +) + +func TestTransport(t *testing.T) { + var ( + attempts = int64(5) + lc net.ListenConfig + tr = New(attempts, time.Second) + ctx, cancel = context.WithCancel(context.TODO()) + ) + + defer cancel() + + lis1, err := lc.Listen(ctx, "tcp", ":0") + require.NoError(t, err) + + addr1, err := manet.FromNetAddr(lis1.Addr()) + require.NoError(t, err) + + _, err = tr.Dial(ctx, addr1, false) + require.NoError(t, err) + + lis2, err := lc.Listen(ctx, "tcp", ":0") + require.NoError(t, err) + + addr2, err := manet.FromNetAddr(lis2.Addr()) + require.NoError(t, err) + + _, err = tr.Dial(ctx, addr1, false) + require.NoError(t, err) + + require.NoError(t, lis1.Close()) + + for i := int64(0); i < 10; i++ { + _, err = tr.Dial(ctx, addr1, false) + require.Error(t, err) + + if i >= attempts { + require.EqualError(t, err, circuit.ErrBreakerOpen.Error()) + } + + _, err = tr.Dial(ctx, addr2, false) + require.NoError(t, err) + } + + time.Sleep(time.Second) + + _, err = tr.Dial(ctx, addr1, false) + require.Error(t, err) + require.NotContains(t, err.Error(), circuit.ErrBreakerOpen.Error()) +} diff --git a/misc/build.go b/misc/build.go new file mode 100644 index 000000000..f4dab3063 --- /dev/null +++ b/misc/build.go @@ -0,0 +1,18 @@ +package misc + +const ( + // NodeName is an application name. + NodeName = "neofs-node" + + // Prefix is an application prefix. + Prefix = "neofs" + + // Build is an application build time. + Build = "now" + + // Version is an application version. + Version = "dev" + + // Debug is an application debug mode flag. + Debug = "true" +) diff --git a/modules/bootstrap/healthy.go b/modules/bootstrap/healthy.go new file mode 100644 index 000000000..bd93fd0fd --- /dev/null +++ b/modules/bootstrap/healthy.go @@ -0,0 +1,95 @@ +package bootstrap + +import ( + "crypto/ecdsa" + "sync" + + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/nspcc-dev/neofs-node/lib/placement" + "github.com/nspcc-dev/neofs-node/services/public/state" + "github.com/spf13/viper" + "go.uber.org/dig" + "go.uber.org/zap" +) + +type ( + healthyParams struct { + dig.In + + Logger *zap.Logger + Viper *viper.Viper + Place placement.Component + Checkers []state.HealthChecker `group:"healthy"` + + // for ChangeState + PrivateKey *ecdsa.PrivateKey + + MorphNetmapContract *implementations.MorphNetmapContract + } + + healthyResult struct { + dig.Out + + HealthyClient HealthyClient + + StateService state.Service + } + + // HealthyClient is an interface of healthiness checking tool. + HealthyClient interface { + Healthy() error + } + + healthyClient struct { + *sync.RWMutex + healthy func() error + } +) + +const ( + errUnhealthy = internal.Error("unhealthy") +) + +func (h *healthyClient) setHandler(handler func() error) { + if handler == nil { + return + } + + h.Lock() + h.healthy = handler + h.Unlock() +} + +func (h *healthyClient) Healthy() error { + if h.healthy == nil { + return errUnhealthy + } + + return h.healthy() +} + +func newHealthy(p healthyParams) (res healthyResult, err error) { + sp := state.Params{ + Stater: p.Place, + Logger: p.Logger, + Viper: p.Viper, + Checkers: p.Checkers, + PrivateKey: p.PrivateKey, + MorphNetmapContract: p.MorphNetmapContract, + } + + if res.StateService, err = state.New(sp); err != nil { + return + } + + healthyClient := &healthyClient{ + RWMutex: new(sync.RWMutex), + } + + healthyClient.setHandler(res.StateService.Healthy) + + res.HealthyClient = healthyClient + + return +} diff --git a/modules/bootstrap/module.go b/modules/bootstrap/module.go new file mode 100644 index 000000000..8b31ed2e8 --- /dev/null +++ b/modules/bootstrap/module.go @@ -0,0 +1,10 @@ +package bootstrap + +import ( + "github.com/nspcc-dev/neofs-node/lib/fix/module" +) + +// Module is a module of bootstrap component. +var Module = module.Module{ + {Constructor: newHealthy}, +} diff --git a/modules/grpc/billing.go b/modules/grpc/billing.go new file mode 100644 index 000000000..d8500c265 --- /dev/null +++ b/modules/grpc/billing.go @@ -0,0 +1,141 @@ +package grpc + +import ( + "context" + + "github.com/gogo/protobuf/proto" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc" +) + +type ( + billingStream struct { + grpc.ServerStream + *grpc.StreamServerInfo + + input int + output int + cid string + } + + cider interface { + CID() refs.CID + } +) + +const ( + typeInput = "input" + typeOutput = "output" + + labelType = "type" + labelMethod = "method" + labelContainer = "container" +) + +var ( + serviceBillingBytes = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "neofs", + Name: "billing_bytes", + Help: "Count of bytes received / sent for method and container", + }, []string{labelType, labelMethod, labelContainer}) + + serviceBillingCalls = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "neofs", + Name: "billing_calls", + Help: "Count of calls for api methods", + }, []string{labelMethod, labelContainer}) +) + +func init() { + // Register billing metrics + prometheus.MustRegister(serviceBillingBytes) + prometheus.MustRegister(serviceBillingCalls) +} + +func getProtoSize(val interface{}) int { + if msg, ok := val.(proto.Message); ok && msg != nil { + return proto.Size(msg) + } + + return 0 +} + +func getProtoContainer(val interface{}) string { + if t, ok := val.(cider); ok && t != nil { + return t.CID().String() + } + + return "" +} + +func (b *billingStream) RecvMsg(msg interface{}) error { + err := b.ServerStream.RecvMsg(msg) + b.input += getProtoSize(msg) + + if cid := getProtoContainer(msg); cid != "" { + b.cid = cid + } + + return err +} + +func (b *billingStream) SendMsg(msg interface{}) error { + b.output += getProtoSize(msg) + + return b.ServerStream.SendMsg(msg) +} + +func (b *billingStream) report() { + labels := prometheus.Labels{ + labelMethod: b.FullMethod, + labelContainer: b.cid, + } + + serviceBillingCalls.With(labels).Inc() + + labels[labelType] = typeInput + serviceBillingBytes.With(labels).Add(float64(b.input)) + + labels[labelType] = typeOutput + serviceBillingBytes.With(labels).Add(float64(b.output)) +} + +func streamBilling(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + stream := &billingStream{ + ServerStream: ss, + StreamServerInfo: info, + } + + err := handler(srv, stream) + + stream.report() + + return err +} + +func unaryBilling(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (res interface{}, err error) { + input := getProtoSize(req) + cid := getProtoContainer(req) + + labels := prometheus.Labels{ + labelMethod: info.FullMethod, + labelContainer: cid, + } + + serviceBillingCalls.With(labels).Inc() + + if res, err = handler(ctx, req); err != nil { + return + } + + output := getProtoSize(res) + + labels[labelType] = typeInput + serviceBillingBytes.With(labels).Add(float64(input)) + + labels[labelType] = typeOutput + serviceBillingBytes.With(labels).Add(float64(output)) + + return +} diff --git a/modules/grpc/module.go b/modules/grpc/module.go new file mode 100644 index 000000000..7e2660391 --- /dev/null +++ b/modules/grpc/module.go @@ -0,0 +1,10 @@ +package grpc + +import ( + "github.com/nspcc-dev/neofs-node/lib/fix/module" +) + +// Module is a gRPC layer module. +var Module = module.Module{ + {Constructor: routing}, +} diff --git a/modules/grpc/routing.go b/modules/grpc/routing.go new file mode 100644 index 000000000..d0fc6fca6 --- /dev/null +++ b/modules/grpc/routing.go @@ -0,0 +1,118 @@ +// About "github.com/nspcc-dev/neofs-node/lib/grpc" +// there's just alias for "google.golang.org/grpc" +// with Service-interface + +package grpc + +import ( + middleware "github.com/grpc-ecosystem/go-grpc-middleware" + gZap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" + prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/spf13/viper" + "go.uber.org/dig" + "go.uber.org/zap" + "google.golang.org/grpc" +) + +type ( + // Service interface + Service interface { + Name() string + Register(*grpc.Server) + } + + // ServerParams to create gRPC-server + // and provide service-handlers + ServerParams struct { + dig.In + + Services []Service + Logger *zap.Logger + Viper *viper.Viper + } + + // ServicesResult ... + ServicesResult struct { + dig.Out + + Services []Service + } + + // Server type-alias + Server = grpc.Server + + // CallOption type-alias + CallOption = grpc.CallOption + + // ClientConn type-alias + ClientConn = grpc.ClientConn + + // ServerOption type-alias + ServerOption = grpc.ServerOption +) + +var ( + // DialContext func-alias + DialContext = grpc.DialContext + + // WithBlock func-alias + WithBlock = grpc.WithBlock + + // WithInsecure func-alias + WithInsecure = grpc.WithInsecure +) + +// NewServer creates a gRPC server which has no service registered and has not +// started to accept requests yet. +func NewServer(opts ...ServerOption) *Server { + return grpc.NewServer(opts...) +} + +// creates new gRPC server and attach handlers. +func routing(p ServerParams) *grpc.Server { + var ( + options []ServerOption + stream []grpc.StreamServerInterceptor + unary []grpc.UnaryServerInterceptor + ) + + if p.Viper.GetBool("node.grpc.billing") { + unary = append(unary, unaryBilling) + stream = append(stream, streamBilling) + } + + if p.Viper.GetBool("node.grpc.logging") { + stream = append(stream, gZap.StreamServerInterceptor(p.Logger)) + unary = append(unary, gZap.UnaryServerInterceptor(p.Logger)) + } + + if p.Viper.GetBool("node.grpc.metrics") { + stream = append(stream, prometheus.StreamServerInterceptor) + unary = append(unary, prometheus.UnaryServerInterceptor) + } + + // Add stream options: + if len(stream) > 0 { + options = append(options, + grpc.StreamInterceptor(middleware.ChainStreamServer(stream...)), + ) + } + + // Add unary options: + if len(unary) > 0 { + options = append(options, + grpc.UnaryInterceptor(middleware.ChainUnaryServer(unary...)), + ) + } + + g := grpc.NewServer(options...) + + // Service services here: + for _, service := range p.Services { + p.Logger.Info("register gRPC service", + zap.String("service", service.Name())) + service.Register(g) + } + + return g +} diff --git a/modules/morph/balance.go b/modules/morph/balance.go new file mode 100644 index 000000000..df3964421 --- /dev/null +++ b/modules/morph/balance.go @@ -0,0 +1,67 @@ +package morph + +import ( + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/nspcc-dev/neofs-node/services/public/accounting" + "github.com/pkg/errors" + "go.uber.org/dig" +) + +type balanceContractResult struct { + dig.Out + + BalanceContract implementations.MorphBalanceContract + + AccountingService accounting.Service +} + +// BalanceContractName is a name of Balance contract config sub-section. +const BalanceContractName = "balance" + +const ( + balanceContractBalanceOfOpt = "balance_of_method" + + balanceContractDecimalsOfOpt = "decimals_method" +) + +// BalanceContractBalanceOfOptPath is a path to balanceOf method name option. +func BalanceContractBalanceOfOptPath() string { + return optPath(prefix, BalanceContractName, balanceContractBalanceOfOpt) +} + +// BalanceContractDecimalsOfOptPath is a path to decimals method name option. +func BalanceContractDecimalsOfOptPath() string { + return optPath(prefix, BalanceContractName, balanceContractDecimalsOfOpt) +} + +func newBalanceContract(p contractParams) (res balanceContractResult, err error) { + client, ok := p.MorphContracts[BalanceContractName] + if !ok { + err = errors.Errorf("missing %s contract client", BalanceContractName) + return + } + + morphClient := implementations.MorphBalanceContract{} + morphClient.SetBalanceContractClient(client) + + morphClient.SetBalanceOfMethodName( + p.Viper.GetString( + BalanceContractBalanceOfOptPath(), + ), + ) + morphClient.SetDecimalsMethodName( + p.Viper.GetString( + BalanceContractDecimalsOfOptPath(), + ), + ) + + if res.AccountingService, err = accounting.New(accounting.Params{ + MorphBalanceContract: morphClient, + }); err != nil { + return + } + + res.BalanceContract = morphClient + + return +} diff --git a/modules/morph/common.go b/modules/morph/common.go new file mode 100644 index 000000000..6584f7ae6 --- /dev/null +++ b/modules/morph/common.go @@ -0,0 +1,140 @@ +package morph + +import ( + "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/nspcc-dev/neofs-api-go/bootstrap" + "github.com/nspcc-dev/neofs-node/lib/blockchain/event" + "github.com/nspcc-dev/neofs-node/lib/blockchain/goclient" + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/spf13/viper" + "go.uber.org/dig" + "go.uber.org/zap" +) + +// SmartContracts maps smart contract name to contract client. +type SmartContracts map[string]implementations.StaticContractClient + +// EventHandlers maps notification event name to handler information. +type EventHandlers map[string]event.HandlerInfo + +type morphContractsParams struct { + dig.In + + Viper *viper.Viper + + GoClient *goclient.Client + + Listener event.Listener +} + +type contractParams struct { + dig.In + + Viper *viper.Viper + + Logger *zap.Logger + + MorphContracts SmartContracts + + NodeInfo bootstrap.NodeInfo +} + +func newMorphContracts(p morphContractsParams) (SmartContracts, EventHandlers, error) { + mContracts := make(map[string]implementations.StaticContractClient, len(ContractNames)) + mHandlers := make(map[string]event.HandlerInfo) + + for _, contractName := range ContractNames { + scHash, err := util.Uint160DecodeStringLE( + p.Viper.GetString( + ScriptHashOptPath(contractName), + ), + ) + if err != nil { + return nil, nil, err + } + + fee := util.Fixed8FromInt64( + p.Viper.GetInt64( + InvocationFeeOptPath(contractName), + ), + ) + + mContracts[contractName], err = implementations.NewStaticContractClient(p.GoClient, scHash, fee) + if err != nil { + return nil, nil, err + } + + // set event parsers + parserInfo := event.ParserInfo{} + parserInfo.SetScriptHash(scHash) + + handlerInfo := event.HandlerInfo{} + handlerInfo.SetScriptHash(scHash) + + for _, item := range mParsers[contractName] { + parserInfo.SetParser(item.parser) + + optPath := ContractEventOptPath(contractName, item.typ) + + typEvent := event.TypeFromString( + p.Viper.GetString(optPath), + ) + + parserInfo.SetType(typEvent) + handlerInfo.SetType(typEvent) + + p.Listener.SetParser(parserInfo) + + mHandlers[optPath] = handlerInfo + } + } + + return mContracts, mHandlers, nil +} + +const prefix = "morph" + +const ( + endpointOpt = "endpoint" + + dialTimeoutOpt = "dial_timeout" + + magicNumberOpt = "magic_number" + + scriptHashOpt = "script_hash" + + invocationFeeOpt = "invocation_fee" +) + +// ContractNames is a list of smart contract names. +var ContractNames = []string{ + containerContractName, + reputationContractName, + NetmapContractName, + BalanceContractName, +} + +// EndpointOptPath returns the config path to goclient endpoint. +func EndpointOptPath() string { + return optPath(prefix, endpointOpt) +} + +// MagicNumberOptPath returns the config path to goclient magic number. +func MagicNumberOptPath() string { + return optPath(prefix, magicNumberOpt) +} + +// DialTimeoutOptPath returns the config path to goclient dial timeout. +func DialTimeoutOptPath() string { + return optPath(prefix, dialTimeoutOpt) +} + +// ScriptHashOptPath calculates the config path to script hash config of particular contract. +func ScriptHashOptPath(name string) string { + return optPath(prefix, name, scriptHashOpt) +} + +// InvocationFeeOptPath calculates the config path to invocation fee config of particular contract. +func InvocationFeeOptPath(name string) string { + return optPath(prefix, name, invocationFeeOpt) +} diff --git a/modules/morph/container.go b/modules/morph/container.go new file mode 100644 index 000000000..770bf4b74 --- /dev/null +++ b/modules/morph/container.go @@ -0,0 +1,122 @@ +package morph + +import ( + "github.com/nspcc-dev/neofs-node/lib/acl" + "github.com/nspcc-dev/neofs-node/lib/container" + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/pkg/errors" + "go.uber.org/dig" +) + +type containerContractResult struct { + dig.Out + + ContainerContract *implementations.MorphContainerContract + + BinaryExtendedACLStore acl.BinaryExtendedACLStore + + ExtendedACLSource acl.ExtendedACLSource + + ContainerStorage container.Storage +} + +const ( + containerContractName = "container" + + containerContractSetEACLOpt = "set_eacl_method" + + containerContractEACLOpt = "get_eacl_method" + + containerContractPutOpt = "put_method" + + containerContractGetOpt = "get_method" + + containerContractDelOpt = "delete_method" + + containerContractListOpt = "list_method" +) + +// ContainerContractSetEACLOptPath returns the config path to set eACL method name of Container contract. +func ContainerContractSetEACLOptPath() string { + return optPath(prefix, containerContractName, containerContractSetEACLOpt) +} + +// ContainerContractEACLOptPath returns the config path to get eACL method name of Container contract. +func ContainerContractEACLOptPath() string { + return optPath(prefix, containerContractName, containerContractEACLOpt) +} + +// ContainerContractPutOptPath returns the config path to put container method name of Container contract. +func ContainerContractPutOptPath() string { + return optPath(prefix, containerContractName, containerContractPutOpt) +} + +// ContainerContractGetOptPath returns the config path to get container method name of Container contract. +func ContainerContractGetOptPath() string { + return optPath(prefix, containerContractName, containerContractGetOpt) +} + +// ContainerContractDelOptPath returns the config path to delete container method name of Container contract. +func ContainerContractDelOptPath() string { + return optPath(prefix, containerContractName, containerContractDelOpt) +} + +// ContainerContractListOptPath returns the config path to list containers method name of Container contract. +func ContainerContractListOptPath() string { + return optPath(prefix, containerContractName, containerContractListOpt) +} + +func newContainerContract(p contractParams) (res containerContractResult, err error) { + client, ok := p.MorphContracts[containerContractName] + if !ok { + err = errors.Errorf("missing %s contract client", containerContractName) + return + } + + morphClient := new(implementations.MorphContainerContract) + morphClient.SetContainerContractClient(client) + + morphClient.SetEACLSetMethodName( + p.Viper.GetString( + ContainerContractSetEACLOptPath(), + ), + ) + morphClient.SetEACLGetMethodName( + p.Viper.GetString( + ContainerContractEACLOptPath(), + ), + ) + morphClient.SetContainerGetMethodName( + p.Viper.GetString( + ContainerContractGetOptPath(), + ), + ) + morphClient.SetContainerPutMethodName( + p.Viper.GetString( + ContainerContractPutOptPath(), + ), + ) + morphClient.SetContainerDeleteMethodName( + p.Viper.GetString( + ContainerContractDelOptPath(), + ), + ) + morphClient.SetContainerListMethodName( + p.Viper.GetString( + ContainerContractListOptPath(), + ), + ) + + res.ContainerContract = morphClient + + res.BinaryExtendedACLStore = morphClient + + res.ExtendedACLSource, err = implementations.ExtendedACLSourceFromBinary(res.BinaryExtendedACLStore) + if err != nil { + return + } + + res.ContainerStorage = morphClient + + return res, nil +} diff --git a/modules/morph/event.go b/modules/morph/event.go new file mode 100644 index 000000000..4df3f486c --- /dev/null +++ b/modules/morph/event.go @@ -0,0 +1,28 @@ +package morph + +import ( + "github.com/nspcc-dev/neofs-node/lib/blockchain/event" + "github.com/nspcc-dev/neofs-node/lib/blockchain/event/netmap" +) + +const eventOpt = "event" + +// NewEpochEventType is a config section of new epoch notification event. +const NewEpochEventType = "new_epoch" + +// ContractEventOptPath returns the config path to notification event name of particular contract. +func ContractEventOptPath(contract, event string) string { + return optPath(prefix, contract, eventOpt, event) +} + +var mParsers = map[string][]struct { + typ string + parser event.Parser +}{ + NetmapContractName: { + { + typ: NewEpochEventType, + parser: netmap.ParseNewEpoch, + }, + }, +} diff --git a/modules/morph/goclient.go b/modules/morph/goclient.go new file mode 100644 index 000000000..dd0359f2c --- /dev/null +++ b/modules/morph/goclient.go @@ -0,0 +1,32 @@ +package morph + +import ( + "context" + "crypto/ecdsa" + + "github.com/nspcc-dev/neo-go/pkg/config/netmode" + "github.com/nspcc-dev/neofs-node/lib/blockchain/goclient" + "github.com/spf13/viper" + "go.uber.org/dig" + "go.uber.org/zap" +) + +type morphClientParams struct { + dig.In + + Viper *viper.Viper + + Logger *zap.Logger + + Key *ecdsa.PrivateKey +} + +func newMorphClient(p morphClientParams) (*goclient.Client, error) { + return goclient.New(context.Background(), &goclient.Params{ + Log: p.Logger, + Key: p.Key, + Endpoint: p.Viper.GetString(optPath(prefix, endpointOpt)), + DialTimeout: p.Viper.GetDuration(optPath(prefix, dialTimeoutOpt)), + Magic: netmode.Magic(p.Viper.GetUint32(optPath(prefix, magicNumberOpt))), + }) +} diff --git a/modules/morph/listener.go b/modules/morph/listener.go new file mode 100644 index 000000000..4c334ced9 --- /dev/null +++ b/modules/morph/listener.go @@ -0,0 +1,53 @@ +package morph + +import ( + "context" + + "github.com/nspcc-dev/neofs-node/lib/blockchain/event" + "github.com/nspcc-dev/neofs-node/lib/blockchain/subscriber" + "github.com/spf13/viper" + "go.uber.org/dig" + "go.uber.org/zap" +) + +type eventListenerParams struct { + dig.In + + Viper *viper.Viper + + Logger *zap.Logger +} + +var listenerPrefix = optPath(prefix, "listener") + +const ( + listenerEndpointOpt = "endpoint" + + listenerDialTimeoutOpt = "dial_timeout" +) + +// ListenerEndpointOptPath returns the config path to event listener's endpoint. +func ListenerEndpointOptPath() string { + return optPath(listenerPrefix, listenerEndpointOpt) +} + +// ListenerDialTimeoutOptPath returns the config path to event listener's dial timeout. +func ListenerDialTimeoutOptPath() string { + return optPath(listenerPrefix, listenerDialTimeoutOpt) +} + +func newEventListener(p eventListenerParams) (event.Listener, error) { + sub, err := subscriber.New(context.Background(), &subscriber.Params{ + Log: p.Logger, + Endpoint: p.Viper.GetString(ListenerEndpointOptPath()), + DialTimeout: p.Viper.GetDuration(ListenerDialTimeoutOptPath()), + }) + if err != nil { + return nil, err + } + + return event.NewListener(event.ListenerParams{ + Logger: p.Logger, + Subscriber: sub, + }) +} diff --git a/modules/morph/module.go b/modules/morph/module.go new file mode 100644 index 000000000..c2ae26378 --- /dev/null +++ b/modules/morph/module.go @@ -0,0 +1,22 @@ +package morph + +import ( + "strings" + + "github.com/nspcc-dev/neofs-node/lib/fix/module" +) + +// Module is a Neo:Morph module. +var Module = module.Module{ + {Constructor: newMorphClient}, + {Constructor: newMorphContracts}, + {Constructor: newContainerContract}, + {Constructor: newReputationContract}, + {Constructor: newNetmapContract}, + {Constructor: newEventListener}, + {Constructor: newBalanceContract}, +} + +func optPath(sections ...string) string { + return strings.Join(sections, ".") +} diff --git a/modules/morph/netmap.go b/modules/morph/netmap.go new file mode 100644 index 000000000..3c5e4f66a --- /dev/null +++ b/modules/morph/netmap.go @@ -0,0 +1,115 @@ +package morph + +import ( + "github.com/nspcc-dev/neofs-node/lib/boot" + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/nspcc-dev/neofs-node/lib/ir" + "github.com/nspcc-dev/neofs-node/lib/netmap" + "github.com/pkg/errors" + "go.uber.org/dig" +) + +type netmapContractResult struct { + dig.Out + + NetmapContract *implementations.MorphNetmapContract + + NetMapStorage netmap.Storage + + IRStorage ir.Storage + + StorageBootController boot.StorageBootController +} + +const ( + // NetmapContractName is a Netmap contract's config section name. + NetmapContractName = "netmap" + + netmapContractAddPeerOpt = "add_peer_method" + + netmapContractNewEpochOpt = "new_epoch_method" + + netmapContractNetmapOpt = "netmap_method" + + netmapContractUpdStateOpt = "update_state_method" + + netmapContractIRListOpt = "ir_list_method" +) + +// NetmapContractAddPeerOptPath returns the config path to add peer method of Netmap contract. +func NetmapContractAddPeerOptPath() string { + return optPath(prefix, NetmapContractName, netmapContractAddPeerOpt) +} + +// NetmapContractNewEpochOptPath returns the config path to new epoch method of Netmap contract. +func NetmapContractNewEpochOptPath() string { + return optPath(prefix, NetmapContractName, netmapContractNewEpochOpt) +} + +// NetmapContractNetmapOptPath returns the config path to get netmap method of Netmap contract. +func NetmapContractNetmapOptPath() string { + return optPath(prefix, NetmapContractName, netmapContractNetmapOpt) +} + +// NetmapContractUpdateStateOptPath returns the config path to update state method of Netmap contract. +func NetmapContractUpdateStateOptPath() string { + return optPath(prefix, NetmapContractName, netmapContractUpdStateOpt) +} + +// NetmapContractIRListOptPath returns the config path to inner ring list method of Netmap contract. +func NetmapContractIRListOptPath() string { + return optPath(prefix, NetmapContractName, netmapContractIRListOpt) +} + +func newNetmapContract(p contractParams) (res netmapContractResult, err error) { + client, ok := p.MorphContracts[NetmapContractName] + if !ok { + err = errors.Errorf("missing %s contract client", NetmapContractName) + return + } + + morphClient := new(implementations.MorphNetmapContract) + morphClient.SetNetmapContractClient(client) + + morphClient.SetAddPeerMethodName( + p.Viper.GetString( + NetmapContractAddPeerOptPath(), + ), + ) + morphClient.SetNewEpochMethodName( + p.Viper.GetString( + NetmapContractNewEpochOptPath(), + ), + ) + morphClient.SetNetMapMethodName( + p.Viper.GetString( + NetmapContractNetmapOptPath(), + ), + ) + morphClient.SetUpdateStateMethodName( + p.Viper.GetString( + NetmapContractUpdateStateOptPath(), + ), + ) + morphClient.SetIRListMethodName( + p.Viper.GetString( + NetmapContractIRListOptPath(), + ), + ) + + bootCtrl := boot.StorageBootController{} + bootCtrl.SetPeerBootstrapper(morphClient) + bootCtrl.SetLogger(p.Logger) + + bootPrm := boot.StorageBootParams{} + bootPrm.SetNodeInfo(&p.NodeInfo) + + bootCtrl.SetBootParams(bootPrm) + + res.StorageBootController = bootCtrl + res.NetmapContract = morphClient + res.NetMapStorage = morphClient + res.IRStorage = morphClient + + return res, nil +} diff --git a/modules/morph/reputation.go b/modules/morph/reputation.go new file mode 100644 index 000000000..e8c12434c --- /dev/null +++ b/modules/morph/reputation.go @@ -0,0 +1,59 @@ +package morph + +import ( + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/nspcc-dev/neofs-node/lib/peers" + "github.com/pkg/errors" + "go.uber.org/dig" +) + +type reputationContractResult struct { + dig.Out + + ReputationContract implementations.MorphReputationContract +} + +const ( + reputationContractName = "reputation" + + reputationContractPutOpt = "put_method" + + reputationContractListOpt = "list_method" +) + +// ReputationContractPutOptPath returns the config path to put method of Reputation contract. +func ReputationContractPutOptPath() string { + return optPath(prefix, reputationContractName, reputationContractPutOpt) +} + +// ReputationContractListOptPath returns the config path to list method of Reputation contract. +func ReputationContractListOptPath() string { + return optPath(prefix, reputationContractName, reputationContractListOpt) +} + +func newReputationContract(p contractParams, ps peers.Store) (res reputationContractResult, err error) { + cli, ok := p.MorphContracts[reputationContractName] + if !ok { + err = errors.Errorf("missing %s contract client", reputationContractName) + return + } + + morphClient := implementations.MorphReputationContract{} + morphClient.SetReputationContractClient(cli) + morphClient.SetPublicKeyStore(ps) + + morphClient.SetPutMethodName( + p.Viper.GetString( + ReputationContractPutOptPath(), + ), + ) + morphClient.SetListMethodName( + p.Viper.GetString( + ReputationContractListOptPath(), + ), + ) + + res.ReputationContract = morphClient + + return +} diff --git a/modules/network/http.go b/modules/network/http.go new file mode 100644 index 000000000..21fbd7226 --- /dev/null +++ b/modules/network/http.go @@ -0,0 +1,49 @@ +package network + +import ( + "github.com/fasthttp/router" + svc "github.com/nspcc-dev/neofs-node/modules/bootstrap" + "github.com/valyala/fasthttp" + "go.uber.org/dig" +) + +type ( + handlerParams struct { + dig.In + + Healthy svc.HealthyClient + } +) + +const ( + healthyState = "NeoFS node is " + defaultContentType = "text/plain; charset=utf-8" +) + +func newHTTPHandler(p handlerParams) (fasthttp.RequestHandler, error) { + r := router.New() + r.RedirectTrailingSlash = true + + r.GET("/-/ready/", func(c *fasthttp.RequestCtx) { + c.SetStatusCode(fasthttp.StatusOK) + c.SetBodyString(healthyState + "ready") + }) + + r.GET("/-/healthy/", func(c *fasthttp.RequestCtx) { + code := fasthttp.StatusOK + msg := "healthy" + + err := p.Healthy.Healthy() + if err != nil { + code = fasthttp.StatusBadRequest + msg = "unhealthy: " + err.Error() + } + + c.Response.Reset() + c.SetStatusCode(code) + c.SetContentType(defaultContentType) + c.SetBodyString(healthyState + msg) + }) + + return r.Handler, nil +} diff --git a/modules/network/module.go b/modules/network/module.go new file mode 100644 index 000000000..95c6041f3 --- /dev/null +++ b/modules/network/module.go @@ -0,0 +1,20 @@ +package network + +import ( + "github.com/nspcc-dev/neofs-node/lib/fix/module" + "github.com/nspcc-dev/neofs-node/lib/fix/web" +) + +// Module is a network layer module. +var Module = module.Module{ + {Constructor: newMuxer}, + {Constructor: newPeers}, + {Constructor: newPlacement}, + {Constructor: newTransport}, + + // Metrics is prometheus handler + {Constructor: web.NewMetrics}, + // Profiler is pprof handler + {Constructor: web.NewProfiler}, + {Constructor: newHTTPHandler}, +} diff --git a/modules/network/muxer.go b/modules/network/muxer.go new file mode 100644 index 000000000..63ad8fc5b --- /dev/null +++ b/modules/network/muxer.go @@ -0,0 +1,57 @@ +package network + +import ( + "time" + + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-node/lib/muxer" + "github.com/nspcc-dev/neofs-node/lib/peers" + "github.com/spf13/viper" + "github.com/valyala/fasthttp" + "go.uber.org/dig" + "go.uber.org/zap" + "google.golang.org/grpc" +) + +type muxerParams struct { + dig.In + + Logger *zap.Logger + P2P *grpc.Server + + Peers peers.Interface + + Address multiaddr.Multiaddr + ShutdownTTL time.Duration `name:"shutdown_ttl"` + API fasthttp.RequestHandler + Viper *viper.Viper +} + +const appName = "neofs-node" + +func newFastHTTPServer(p muxerParams) *fasthttp.Server { + srv := new(fasthttp.Server) + srv.Name = appName + srv.ReadBufferSize = p.Viper.GetInt("muxer.http.read_buffer_size") + srv.WriteBufferSize = p.Viper.GetInt("muxer.http.write_buffer_size") + srv.ReadTimeout = p.Viper.GetDuration("muxer.http.read_timeout") + srv.WriteTimeout = p.Viper.GetDuration("muxer.http.write_timeout") + srv.GetOnly = true + srv.DisableHeaderNamesNormalizing = true + srv.NoDefaultServerHeader = true + srv.NoDefaultContentType = true + srv.Handler = p.API + + return srv +} + +func newMuxer(p muxerParams) muxer.Mux { + return muxer.New(muxer.Params{ + P2P: p.P2P, + Peers: p.Peers, + Logger: p.Logger, + Address: p.Address, + ShutdownTTL: p.ShutdownTTL, + API: newFastHTTPServer(p), + }) +} diff --git a/modules/network/peers.go b/modules/network/peers.go new file mode 100644 index 000000000..f9af19c0a --- /dev/null +++ b/modules/network/peers.go @@ -0,0 +1,41 @@ +package network + +import ( + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-node/lib/peers" + "github.com/nspcc-dev/neofs-node/lib/transport" + "github.com/spf13/viper" + "go.uber.org/dig" + "go.uber.org/zap" +) + +type peersParams struct { + dig.In + + Viper *viper.Viper + Logger *zap.Logger + Address multiaddr.Multiaddr + Transport transport.Transport +} + +func newTransport(v *viper.Viper) transport.Transport { + return transport.New( + v.GetInt64("transport.attempts_count"), + v.GetDuration("transport.attempts_ttl"), + ) +} + +func newPeers(p peersParams) (peers.Interface, error) { + return peers.New(peers.Params{ + Logger: p.Logger, + Address: p.Address, + Transport: p.Transport, + Attempts: p.Viper.GetInt64("peers.attempts_count"), + AttemptsTTL: p.Viper.GetDuration("peers.attempts_ttl"), + ConnectionTTL: p.Viper.GetDuration("peers.connections_ttl"), + ConnectionIDLE: p.Viper.GetDuration("peers.connections_idle"), + MetricsTimeout: p.Viper.GetDuration("peers.metrics_timeout"), + KeepAliveTTL: p.Viper.GetDuration("peers.keep_alive.ttl"), + KeepAlivePingTTL: p.Viper.GetDuration("peers.keep_alive.ping"), + }) +} diff --git a/modules/network/placement.go b/modules/network/placement.go new file mode 100644 index 000000000..36959efdf --- /dev/null +++ b/modules/network/placement.go @@ -0,0 +1,79 @@ +package network + +import ( + "github.com/nspcc-dev/neofs-node/lib/blockchain/event" + netmapevent "github.com/nspcc-dev/neofs-node/lib/blockchain/event/netmap" + libcnr "github.com/nspcc-dev/neofs-node/lib/container" + "github.com/nspcc-dev/neofs-node/lib/netmap" + "github.com/nspcc-dev/neofs-node/lib/peers" + "github.com/nspcc-dev/neofs-node/lib/placement" + "github.com/nspcc-dev/neofs-node/modules/morph" + "github.com/nspcc-dev/neofs-node/services/public/state" + "go.uber.org/dig" + "go.uber.org/zap" +) + +type ( + placementParams struct { + dig.In + + Log *zap.Logger + Peers peers.Store + Fetcher libcnr.Storage + + MorphEventListener event.Listener + + NetMapStorage netmap.Storage + + MorphEventHandlers morph.EventHandlers + } + + placementOutput struct { + dig.Out + + Placement placement.Component + Healthy state.HealthChecker `group:"healthy"` + } +) + +const defaultChronologyDuraion = 2 + +func newPlacement(p placementParams) placementOutput { + place := placement.New(placement.Params{ + Log: p.Log, + Peerstore: p.Peers, + Fetcher: p.Fetcher, + ChronologyDuration: defaultChronologyDuraion, + }) + + if handlerInfo, ok := p.MorphEventHandlers[morph.ContractEventOptPath( + morph.NetmapContractName, + morph.NewEpochEventType, + )]; ok { + handlerInfo.SetHandler(func(ev event.Event) { + nmRes, err := p.NetMapStorage.GetNetMap(netmap.GetParams{}) + if err != nil { + p.Log.Error("could not get network map", + zap.String("error", err.Error()), + ) + return + } + + if err := place.Update( + ev.(netmapevent.NewEpoch).EpochNumber(), + nmRes.NetMap(), + ); err != nil { + p.Log.Error("could not update network map in placement component", + zap.String("error", err.Error()), + ) + } + }) + + p.MorphEventListener.RegisterHandler(handlerInfo) + } + + return placementOutput{ + Placement: place, + Healthy: place.(state.HealthChecker), + } +} diff --git a/modules/node/audit.go b/modules/node/audit.go new file mode 100644 index 000000000..a2c02b288 --- /dev/null +++ b/modules/node/audit.go @@ -0,0 +1,63 @@ +package node + +import ( + "crypto/ecdsa" + + "github.com/nspcc-dev/neofs-api-go/session" + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/nspcc-dev/neofs-node/lib/peers" + "github.com/nspcc-dev/neofs-node/services/public/object" + "github.com/spf13/viper" + "go.uber.org/zap" +) + +type ( + cnrHandlerParams struct { + *viper.Viper + *zap.Logger + Placer implementations.ObjectPlacer + PeerStore peers.Store + Peers peers.Interface + TimeoutsPrefix string + Key *ecdsa.PrivateKey + + TokenStore session.PrivateTokenStore + } +) + +func newObjectsContainerHandler(p cnrHandlerParams) (implementations.SelectiveContainerExecutor, error) { + as, err := implementations.NewAddressStore(p.PeerStore, p.Logger) + if err != nil { + return nil, err + } + + multiTransport, err := object.NewMultiTransport(object.MultiTransportParams{ + AddressStore: as, + EpochReceiver: p.Placer, + RemoteService: object.NewRemoteService(p.Peers), + Logger: p.Logger, + Key: p.Key, + PutTimeout: p.Viper.GetDuration(p.TimeoutsPrefix + ".timeouts.put"), + GetTimeout: p.Viper.GetDuration(p.TimeoutsPrefix + ".timeouts.get"), + HeadTimeout: p.Viper.GetDuration(p.TimeoutsPrefix + ".timeouts.head"), + SearchTimeout: p.Viper.GetDuration(p.TimeoutsPrefix + ".timeouts.search"), + RangeHashTimeout: p.Viper.GetDuration(p.TimeoutsPrefix + ".timeouts.range_hash"), + DialTimeout: p.Viper.GetDuration("object.dial_timeout"), + + PrivateTokenStore: p.TokenStore, + }) + if err != nil { + return nil, err + } + + exec, err := implementations.NewContainerTraverseExecutor(multiTransport) + if err != nil { + return nil, err + } + + return implementations.NewObjectContainerHandler(implementations.ObjectContainerHandlerParams{ + NodeLister: p.Placer, + Executor: exec, + Logger: p.Logger, + }) +} diff --git a/modules/node/container.go b/modules/node/container.go new file mode 100644 index 000000000..af081cb4c --- /dev/null +++ b/modules/node/container.go @@ -0,0 +1,31 @@ +package node + +import ( + "github.com/nspcc-dev/neofs-node/lib/acl" + libcnr "github.com/nspcc-dev/neofs-node/lib/container" + svc "github.com/nspcc-dev/neofs-node/modules/bootstrap" + "github.com/nspcc-dev/neofs-node/services/public/container" + "go.uber.org/dig" + "go.uber.org/zap" +) + +type cnrParams struct { + dig.In + + Logger *zap.Logger + + Healthy svc.HealthyClient + + ExtendedACLStore acl.BinaryExtendedACLStore + + ContainerStorage libcnr.Storage +} + +func newContainerService(p cnrParams) (container.Service, error) { + return container.New(container.Params{ + Logger: p.Logger, + Healthy: p.Healthy, + Store: p.ContainerStorage, + ExtendedACLStore: p.ExtendedACLStore, + }) +} diff --git a/modules/node/core.go b/modules/node/core.go new file mode 100644 index 000000000..665836eee --- /dev/null +++ b/modules/node/core.go @@ -0,0 +1,29 @@ +package node + +import ( + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/nspcc-dev/neofs-node/lib/storage" + "github.com/spf13/viper" + "go.uber.org/zap" +) + +func listBuckets(v *viper.Viper) []core.BucketType { + var ( + items = v.GetStringMap("storage") + result = make([]core.BucketType, 0, len(items)) + ) + + for name := range items { + result = append(result, core.BucketType(name)) + } + + return result +} + +func newStorage(l *zap.Logger, v *viper.Viper) (core.Storage, error) { + return storage.New(storage.Params{ + Viper: v, + Logger: l, + Buckets: listBuckets(v), + }) +} diff --git a/modules/node/localstore.go b/modules/node/localstore.go new file mode 100644 index 000000000..7be10bed0 --- /dev/null +++ b/modules/node/localstore.go @@ -0,0 +1,64 @@ +package node + +import ( + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/nspcc-dev/neofs-node/lib/localstore" + "github.com/nspcc-dev/neofs-node/lib/meta" + "github.com/nspcc-dev/neofs-node/lib/metrics" + "go.uber.org/atomic" + "go.uber.org/dig" + "go.uber.org/zap" +) + +type ( + localstoreParams struct { + dig.In + + Logger *zap.Logger + Storage core.Storage + Counter *atomic.Float64 + Collector metrics.Collector + } + + metaIterator struct { + iter localstore.Iterator + } +) + +func newMetaIterator(iter localstore.Iterator) meta.Iterator { + return &metaIterator{iter: iter} +} + +func (m *metaIterator) Iterate(handler meta.IterateFunc) error { + return m.iter.Iterate(nil, func(objMeta *localstore.ObjectMeta) bool { + return handler == nil || handler(objMeta.Object) != nil + }) +} + +func newLocalstore(p localstoreParams) (localstore.Localstore, error) { + metaBucket, err := p.Storage.GetBucket(core.MetaStore) + if err != nil { + return nil, err + } + + blobBucket, err := p.Storage.GetBucket(core.BlobStore) + if err != nil { + return nil, err + } + + local, err := localstore.New(localstore.Params{ + BlobBucket: blobBucket, + MetaBucket: metaBucket, + Logger: p.Logger, + Collector: p.Collector, + }) + if err != nil { + return nil, err + } + + iter := newMetaIterator(local) + p.Collector.SetCounter(local) + p.Collector.SetIterator(iter) + + return local, nil +} diff --git a/modules/node/metrics.go b/modules/node/metrics.go new file mode 100644 index 000000000..0faad5d1c --- /dev/null +++ b/modules/node/metrics.go @@ -0,0 +1,52 @@ +package node + +import ( + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/nspcc-dev/neofs-node/lib/metrics" + mService "github.com/nspcc-dev/neofs-node/services/metrics" + "github.com/spf13/viper" + "go.uber.org/atomic" + "go.uber.org/dig" + "go.uber.org/zap" +) + +type ( + metricsParams struct { + dig.In + + Logger *zap.Logger + Options []string `name:"node_options"` + Viper *viper.Viper + Store core.Storage + } + + metricsServiceParams struct { + dig.In + + Logger *zap.Logger + Collector metrics.Collector + } +) + +func newObjectCounter() *atomic.Float64 { return atomic.NewFloat64(0) } + +func newMetricsService(p metricsServiceParams) (mService.Service, error) { + return mService.New(mService.Params{ + Logger: p.Logger, + Collector: p.Collector, + }) +} + +func newMetricsCollector(p metricsParams) (metrics.Collector, error) { + store, err := p.Store.GetBucket(core.SpaceMetricsStore) + if err != nil { + return nil, err + } + + return metrics.New(metrics.Params{ + Options: p.Options, + Logger: p.Logger, + Interval: p.Viper.GetDuration("metrics_collector.interval"), + MetricsStore: store, + }) +} diff --git a/modules/node/module.go b/modules/node/module.go new file mode 100644 index 000000000..83a81b484 --- /dev/null +++ b/modules/node/module.go @@ -0,0 +1,91 @@ +package node + +import ( + "github.com/nspcc-dev/neofs-api-go/session" + "github.com/nspcc-dev/neofs-node/lib/blockchain/event" + "github.com/nspcc-dev/neofs-node/lib/boot" + "github.com/nspcc-dev/neofs-node/lib/fix/module" + "github.com/nspcc-dev/neofs-node/lib/fix/worker" + "github.com/nspcc-dev/neofs-node/lib/metrics" + "github.com/nspcc-dev/neofs-node/lib/netmap" + "github.com/nspcc-dev/neofs-node/lib/peers" + "github.com/nspcc-dev/neofs-node/lib/replication" + "github.com/nspcc-dev/neofs-node/modules/bootstrap" + "github.com/nspcc-dev/neofs-node/modules/grpc" + "github.com/nspcc-dev/neofs-node/modules/morph" + "github.com/nspcc-dev/neofs-node/modules/network" + "github.com/nspcc-dev/neofs-node/modules/settings" + "github.com/nspcc-dev/neofs-node/modules/workers" + "github.com/spf13/viper" + "go.uber.org/dig" + "go.uber.org/zap" +) + +type jobParams struct { + dig.In + + Logger *zap.Logger + Viper *viper.Viper + Peers peers.Store + + Replicator replication.Manager + PeersInterface peers.Interface + Metrics metrics.Collector + + MorphEventListener event.Listener + + StorageBootController boot.StorageBootController +} + +// Module is a NeoFS node module. +var Module = module.Module{ + {Constructor: attachJobs}, + {Constructor: newPeerstore}, + {Constructor: attachServices}, + {Constructor: netmap.NewNetmap}, + {Constructor: newStorage}, + {Constructor: newMetricsCollector}, + {Constructor: newObjectCounter}, + + // -- Container gRPC handlers -- // + {Constructor: newContainerService}, + + // -- gRPC Services -- // + + // -- Local store -- // + {Constructor: newLocalstore}, + + // -- Object manager -- // + {Constructor: newObjectManager}, + + // -- Replication manager -- // + {Constructor: newReplicationManager}, + + // -- Session service -- // + {Constructor: session.NewMapTokenStore}, + {Constructor: newSessionService}, + + // -- Placement tool -- // + {Constructor: newPlacementTool}, + + // metrics service -- // + {Constructor: newMetricsService}, +}.Append( + // app specific modules: + grpc.Module, + network.Module, + workers.Module, + settings.Module, + bootstrap.Module, + morph.Module, +) + +func attachJobs(p jobParams) worker.Jobs { + return worker.Jobs{ + "peers": p.PeersInterface.Job, + "metrics": p.Metrics.Start, + "event_listener": p.MorphEventListener.Listen, + "replicator": p.Replicator.Process, + "boot": p.StorageBootController.Bootstrap, + } +} diff --git a/modules/node/objectmanager.go b/modules/node/objectmanager.go new file mode 100644 index 000000000..6d96f5c71 --- /dev/null +++ b/modules/node/objectmanager.go @@ -0,0 +1,219 @@ +package node + +import ( + "crypto/ecdsa" + + "github.com/nspcc-dev/neofs-api-go/bootstrap" + "github.com/nspcc-dev/neofs-api-go/hash" + apiobj "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/session" + libacl "github.com/nspcc-dev/neofs-node/lib/acl" + "github.com/nspcc-dev/neofs-node/lib/container" + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/nspcc-dev/neofs-node/lib/ir" + "github.com/nspcc-dev/neofs-node/lib/localstore" + "github.com/nspcc-dev/neofs-node/lib/peers" + "github.com/nspcc-dev/neofs-node/lib/placement" + "github.com/nspcc-dev/neofs-node/lib/transformer" + "github.com/nspcc-dev/neofs-node/services/public/object" + "github.com/spf13/viper" + "go.uber.org/dig" + "go.uber.org/zap" +) + +type ( + objectManagerParams struct { + dig.In + + Logger *zap.Logger + Viper *viper.Viper + LocalStore localstore.Localstore + + PeersInterface peers.Interface + + Peers peers.Store + Placement placement.Component + TokenStore session.PrivateTokenStore + Options []string `name:"node_options"` + Key *ecdsa.PrivateKey + + IRStorage ir.Storage + + EpochReceiver implementations.EpochReceiver + + Placer implementations.ObjectPlacer + + ExtendedACLStore libacl.ExtendedACLSource + + ContainerStorage container.Storage + } +) + +const ( + transformersSectionPath = "object.transformers." + + aclMandatorySetBits = 0x04040444 +) + +const xorSalitor = "xor" + +func newObjectManager(p objectManagerParams) (object.Service, error) { + var sltr object.Salitor + + if p.Viper.GetString("object.salitor") == xorSalitor { + sltr = hash.SaltXOR + } + + as, err := implementations.NewAddressStore(p.Peers, p.Logger) + if err != nil { + return nil, err + } + + rs := object.NewRemoteService(p.PeersInterface) + + pto := p.Viper.GetDuration("object.put.timeout") + gto := p.Viper.GetDuration("object.get.timeout") + hto := p.Viper.GetDuration("object.head.timeout") + sto := p.Viper.GetDuration("object.search.timeout") + rhto := p.Viper.GetDuration("object.range_hash.timeout") + dto := p.Viper.GetDuration("object.dial_timeout") + + tr, err := object.NewMultiTransport(object.MultiTransportParams{ + AddressStore: as, + EpochReceiver: p.EpochReceiver, + RemoteService: rs, + Logger: p.Logger, + Key: p.Key, + PutTimeout: pto, + GetTimeout: gto, + HeadTimeout: hto, + SearchTimeout: sto, + RangeHashTimeout: rhto, + DialTimeout: dto, + + PrivateTokenStore: p.TokenStore, + }) + if err != nil { + return nil, err + } + + exec, err := implementations.NewContainerTraverseExecutor(tr) + if err != nil { + return nil, err + } + + selectiveExec, err := implementations.NewObjectContainerHandler(implementations.ObjectContainerHandlerParams{ + NodeLister: p.Placer, + Executor: exec, + Logger: p.Logger, + }) + if err != nil { + return nil, err + } + + sgInfoRecv, err := implementations.NewStorageGroupInfoReceiver(implementations.StorageGroupInfoReceiverParams{ + SelectiveContainerExecutor: selectiveExec, + Logger: p.Logger, + }) + if err != nil { + return nil, err + } + + verifier, err := implementations.NewLocalIntegrityVerifier( + core.NewNeoKeyVerifier(), + ) + if err != nil { + return nil, err + } + + trans, err := transformer.NewTransformer(transformer.Params{ + SGInfoReceiver: sgInfoRecv, + EpochReceiver: p.EpochReceiver, + SizeLimit: uint64(p.Viper.GetInt64(transformersSectionPath+"payload_limiter.max_payload_size") * apiobj.UnitsKB), + Verifier: verifier, + }) + if err != nil { + return nil, err + } + + aclChecker := libacl.NewMaskedBasicACLChecker(aclMandatorySetBits, libacl.DefaultAndFilter) + + aclHelper, err := implementations.NewACLHelper(p.ContainerStorage) + if err != nil { + return nil, err + } + + verifier, err = implementations.NewLocalHeadIntegrityVerifier( + core.NewNeoKeyVerifier(), + ) + if err != nil { + return nil, err + } + + return object.New(&object.Params{ + Verifier: verifier, + Salitor: sltr, + LocalStore: p.LocalStore, + MaxProcessingSize: p.Viper.GetUint64("object.max_processing_size") * uint64(apiobj.UnitsMB), + StorageCapacity: bootstrap.NodeInfo{Options: p.Options}.Capacity() * uint64(apiobj.UnitsGB), + PoolSize: p.Viper.GetInt("object.workers_count"), + Placer: p.Placer, + Transformer: trans, + ObjectRestorer: transformer.NewRestorePipeline( + transformer.SplitRestorer(), + ), + RemoteService: rs, + AddressStore: as, + Logger: p.Logger, + TokenStore: p.TokenStore, + EpochReceiver: p.EpochReceiver, + ContainerNodesLister: p.Placer, + Key: p.Key, + CheckACL: p.Viper.GetBool("object.check_acl"), + DialTimeout: p.Viper.GetDuration("object.dial_timeout"), + MaxPayloadSize: p.Viper.GetUint64("object.transformers.payload_limiter.max_payload_size") * uint64(apiobj.UnitsKB), + PutParams: object.OperationParams{ + Timeout: pto, + LogErrors: p.Viper.GetBool("object.put.log_errs"), + }, + GetParams: object.OperationParams{ + Timeout: gto, + LogErrors: p.Viper.GetBool("object.get.log_errs"), + }, + HeadParams: object.OperationParams{ + Timeout: hto, + LogErrors: p.Viper.GetBool("object.head.log_errs"), + }, + DeleteParams: object.OperationParams{ + Timeout: p.Viper.GetDuration("object.delete.timeout"), + LogErrors: p.Viper.GetBool("object.get.log_errs"), + }, + SearchParams: object.OperationParams{ + Timeout: sto, + LogErrors: p.Viper.GetBool("object.search.log_errs"), + }, + RangeParams: object.OperationParams{ + Timeout: p.Viper.GetDuration("object.range.timeout"), + LogErrors: p.Viper.GetBool("object.range.log_errs"), + }, + RangeHashParams: object.OperationParams{ + Timeout: rhto, + LogErrors: p.Viper.GetBool("object.range_hash.log_errs"), + }, + Assembly: p.Viper.GetBool("object.assembly"), + + WindowSize: p.Viper.GetInt("object.window_size"), + + ACLHelper: aclHelper, + BasicACLChecker: aclChecker, + IRStorage: p.IRStorage, + ContainerLister: p.Placer, + + SGInfoReceiver: sgInfoRecv, + + OwnerKeyVerifier: core.NewNeoKeyVerifier(), + + ExtendedACLSource: p.ExtendedACLStore, + }) +} diff --git a/modules/node/peerstore.go b/modules/node/peerstore.go new file mode 100644 index 000000000..1ccd1f1d6 --- /dev/null +++ b/modules/node/peerstore.go @@ -0,0 +1,28 @@ +package node + +import ( + "crypto/ecdsa" + + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-node/lib/peers" + "go.uber.org/dig" + "go.uber.org/zap" +) + +type peerstoreParams struct { + dig.In + + Logger *zap.Logger + PrivateKey *ecdsa.PrivateKey + Address multiaddr.Multiaddr + Store peers.Storage `optional:"true"` +} + +func newPeerstore(p peerstoreParams) (peers.Store, error) { + return peers.NewStore(peers.StoreParams{ + Storage: p.Store, + Logger: p.Logger, + Addr: p.Address, + Key: p.PrivateKey, + }) +} diff --git a/modules/node/placement.go b/modules/node/placement.go new file mode 100644 index 000000000..9834f7b60 --- /dev/null +++ b/modules/node/placement.go @@ -0,0 +1,33 @@ +package node + +import ( + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/nspcc-dev/neofs-node/lib/placement" + "go.uber.org/dig" +) + +type ( + placementToolParams struct { + dig.In + + Placement placement.Component + } + + placementToolResult struct { + dig.Out + + Placer implementations.ObjectPlacer + + Receiver implementations.EpochReceiver + } +) + +func newPlacementTool(p placementToolParams) (res placementToolResult, err error) { + if res.Placer, err = implementations.NewObjectPlacer(p.Placement); err != nil { + return + } + + res.Receiver = res.Placer + + return +} diff --git a/modules/node/replication.go b/modules/node/replication.go new file mode 100644 index 000000000..546fdda9b --- /dev/null +++ b/modules/node/replication.go @@ -0,0 +1,394 @@ +package node + +import ( + "context" + "crypto/ecdsa" + + "github.com/nspcc-dev/neofs-api-go/hash" + "github.com/nspcc-dev/neofs-api-go/session" + "github.com/nspcc-dev/neofs-node/lib/blockchain/event" + "github.com/nspcc-dev/neofs-node/lib/blockchain/event/netmap" + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/nspcc-dev/neofs-node/lib/ir" + "github.com/nspcc-dev/neofs-node/lib/localstore" + "github.com/nspcc-dev/neofs-node/lib/peers" + "github.com/nspcc-dev/neofs-node/lib/placement" + "github.com/nspcc-dev/neofs-node/lib/replication" + "github.com/nspcc-dev/neofs-node/modules/morph" + "github.com/pkg/errors" + "github.com/spf13/viper" + "go.uber.org/dig" + "go.uber.org/zap" +) + +type ( + replicationManagerParams struct { + dig.In + + Viper *viper.Viper + + PeersInterface peers.Interface + + LocalStore localstore.Localstore + Peers peers.Store + Placement placement.Component + Logger *zap.Logger + Lister ir.Storage + Key *ecdsa.PrivateKey + + Placer implementations.ObjectPlacer + + TokenStore session.PrivateTokenStore + + MorphEventListener event.Listener + MorphEventHandlers morph.EventHandlers + } +) + +const ( + mainReplicationPrefix = "replication" + managerPrefix = "manager" + placementHonorerPrefix = "placement_honorer" + locationDetectorPrefix = "location_detector" + storageValidatorPrefix = "storage_validator" + replicatorPrefix = "replicator" + restorerPrefix = "restorer" +) + +func newReplicationManager(p replicationManagerParams) (replication.Manager, error) { + as, err := implementations.NewAddressStore(p.Peers, p.Logger) + if err != nil { + return nil, err + } + + ms, err := replication.NewMultiSolver(replication.MultiSolverParams{ + AddressStore: as, + Placement: p.Placement, + }) + if err != nil { + return nil, err + } + + op := replication.NewObjectPool() + + schd, err := replication.NewReplicationScheduler(replication.SchedulerParams{ + ContainerActualityChecker: ms, + Iterator: p.LocalStore, + }) + if err != nil { + return nil, err + } + + integrityVerifier, err := implementations.NewLocalIntegrityVerifier( + core.NewNeoKeyVerifier(), + ) + if err != nil { + return nil, err + } + + verifier, err := implementations.NewObjectValidator(&implementations.ObjectValidatorParams{ + AddressStore: ms, + Localstore: p.LocalStore, + Logger: p.Logger, + Verifier: integrityVerifier, + }) + if err != nil { + return nil, err + } + + placementHonorer, err := newPlacementHonorer(p, ms) + if err != nil { + return nil, err + } + + locationDetector, err := newLocationDetector(p, ms) + if err != nil { + return nil, err + } + + storageValidator, err := newStorageValidator(p, ms) + if err != nil { + return nil, err + } + + replicator, err := newObjectReplicator(p, ms) + if err != nil { + return nil, err + } + + restorer, err := newRestorer(p, ms) + if err != nil { + return nil, err + } + + prefix := mainReplicationPrefix + "." + managerPrefix + "." + capPrefix := prefix + "capacities." + + mngr, err := replication.NewManager(replication.ManagerParams{ + Interval: p.Viper.GetDuration(prefix + "read_pool_interval"), + PushTaskTimeout: p.Viper.GetDuration(prefix + "push_task_timeout"), + InitPoolSize: p.Viper.GetInt(prefix + "pool_size"), + ExpansionRate: p.Viper.GetFloat64(prefix + "pool_expansion_rate"), + PlacementHonorerEnabled: p.Viper.GetBool(prefix + "placement_honorer_enabled"), + ReplicateTaskChanCap: p.Viper.GetInt(capPrefix + "replicate"), + RestoreTaskChanCap: p.Viper.GetInt(capPrefix + "restore"), + GarbageChanCap: p.Viper.GetInt(capPrefix + "garbage"), + ObjectPool: op, + ObjectVerifier: verifier, + PlacementHonorer: placementHonorer, + ObjectLocationDetector: locationDetector, + StorageValidator: storageValidator, + ObjectReplicator: replicator, + ObjectRestorer: restorer, + Scheduler: schd, + Logger: p.Logger, + }) + if err != nil { + return nil, err + } + + if handlerInfo, ok := p.MorphEventHandlers[morph.ContractEventOptPath( + morph.NetmapContractName, + morph.NewEpochEventType, + )]; ok { + handlerInfo.SetHandler(func(ev event.Event) { + mngr.HandleEpoch( + context.Background(), + ev.(netmap.NewEpoch).EpochNumber(), + ) + }) + + p.MorphEventListener.RegisterHandler(handlerInfo) + } + + return mngr, nil +} + +func newPlacementHonorer(p replicationManagerParams, rss replication.RemoteStorageSelector) (replication.PlacementHonorer, error) { + prefix := mainReplicationPrefix + "." + placementHonorerPrefix + + och, err := newObjectsContainerHandler(cnrHandlerParams{ + Viper: p.Viper, + Logger: p.Logger, + Placer: p.Placer, + PeerStore: p.Peers, + Peers: p.PeersInterface, + TimeoutsPrefix: prefix, + Key: p.Key, + + TokenStore: p.TokenStore, + }) + if err != nil { + return nil, err + } + + storage, err := implementations.NewObjectStorage(implementations.ObjectStorageParams{ + Localstore: p.LocalStore, + SelectiveContainerExecutor: och, + Logger: p.Logger, + }) + if err != nil { + return nil, err + } + + return replication.NewPlacementHonorer(replication.PlacementHonorerParams{ + ObjectSource: storage, + ObjectReceptacle: storage, + RemoteStorageSelector: rss, + PresenceChecker: p.LocalStore, + Logger: p.Logger, + TaskChanCap: p.Viper.GetInt(prefix + ".chan_capacity"), + ResultTimeout: p.Viper.GetDuration(prefix + ".result_timeout"), + }) +} + +func newLocationDetector(p replicationManagerParams, ms replication.MultiSolver) (replication.ObjectLocationDetector, error) { + prefix := mainReplicationPrefix + "." + locationDetectorPrefix + + och, err := newObjectsContainerHandler(cnrHandlerParams{ + Viper: p.Viper, + Logger: p.Logger, + Placer: p.Placer, + PeerStore: p.Peers, + Peers: p.PeersInterface, + TimeoutsPrefix: prefix, + Key: p.Key, + + TokenStore: p.TokenStore, + }) + if err != nil { + return nil, err + } + + locator, err := implementations.NewObjectLocator(implementations.LocatorParams{ + SelectiveContainerExecutor: och, + Logger: p.Logger, + }) + if err != nil { + return nil, err + } + + return replication.NewLocationDetector(&replication.LocationDetectorParams{ + WeightComparator: ms, + ObjectLocator: locator, + ReservationRatioReceiver: ms, + PresenceChecker: p.LocalStore, + Logger: p.Logger, + TaskChanCap: p.Viper.GetInt(prefix + ".chan_capacity"), + ResultTimeout: p.Viper.GetDuration(prefix + ".result_timeout"), + }) +} + +func newStorageValidator(p replicationManagerParams, as replication.AddressStore) (replication.StorageValidator, error) { + prefix := mainReplicationPrefix + "." + storageValidatorPrefix + + var sltr implementations.Salitor + + switch v := p.Viper.GetString(prefix + ".salitor"); v { + case xorSalitor: + sltr = hash.SaltXOR + default: + return nil, errors.Errorf("unsupported salitor: %s", v) + } + + och, err := newObjectsContainerHandler(cnrHandlerParams{ + Viper: p.Viper, + Logger: p.Logger, + Placer: p.Placer, + PeerStore: p.Peers, + Peers: p.PeersInterface, + TimeoutsPrefix: prefix, + Key: p.Key, + + TokenStore: p.TokenStore, + }) + if err != nil { + return nil, err + } + + headVerifier, err := implementations.NewLocalHeadIntegrityVerifier( + core.NewNeoKeyVerifier(), + ) + if err != nil { + return nil, err + } + + verifier, err := implementations.NewObjectValidator(&implementations.ObjectValidatorParams{ + AddressStore: as, + Localstore: p.LocalStore, + SelectiveContainerExecutor: och, + Logger: p.Logger, + Salitor: sltr, + SaltSize: p.Viper.GetInt(prefix + ".salt_size"), + MaxPayloadRangeSize: p.Viper.GetUint64(prefix + ".max_payload_range_size"), + PayloadRangeCount: p.Viper.GetInt(prefix + ".payload_range_count"), + Verifier: headVerifier, + }) + if err != nil { + return nil, err + } + + return replication.NewStorageValidator(replication.StorageValidatorParams{ + ObjectVerifier: verifier, + PresenceChecker: p.LocalStore, + Logger: p.Logger, + TaskChanCap: p.Viper.GetInt(prefix + ".chan_capacity"), + ResultTimeout: p.Viper.GetDuration(prefix + ".result_timeout"), + AddrStore: as, + }) +} + +func newObjectReplicator(p replicationManagerParams, rss replication.RemoteStorageSelector) (replication.ObjectReplicator, error) { + prefix := mainReplicationPrefix + "." + replicatorPrefix + + och, err := newObjectsContainerHandler(cnrHandlerParams{ + Viper: p.Viper, + Logger: p.Logger, + Placer: p.Placer, + PeerStore: p.Peers, + Peers: p.PeersInterface, + TimeoutsPrefix: prefix, + Key: p.Key, + + TokenStore: p.TokenStore, + }) + if err != nil { + return nil, err + } + + storage, err := implementations.NewObjectStorage(implementations.ObjectStorageParams{ + Localstore: p.LocalStore, + SelectiveContainerExecutor: och, + Logger: p.Logger, + }) + if err != nil { + return nil, err + } + + return replication.NewReplicator(replication.ObjectReplicatorParams{ + RemoteStorageSelector: rss, + ObjectSource: storage, + ObjectReceptacle: storage, + PresenceChecker: p.LocalStore, + Logger: p.Logger, + TaskChanCap: p.Viper.GetInt(prefix + ".chan_capacity"), + ResultTimeout: p.Viper.GetDuration(prefix + ".result_timeout"), + }) +} + +func newRestorer(p replicationManagerParams, ms replication.MultiSolver) (replication.ObjectRestorer, error) { + prefix := mainReplicationPrefix + "." + restorerPrefix + + och, err := newObjectsContainerHandler(cnrHandlerParams{ + Viper: p.Viper, + Logger: p.Logger, + Placer: p.Placer, + PeerStore: p.Peers, + Peers: p.PeersInterface, + TimeoutsPrefix: prefix, + Key: p.Key, + + TokenStore: p.TokenStore, + }) + if err != nil { + return nil, err + } + + integrityVerifier, err := implementations.NewLocalIntegrityVerifier( + core.NewNeoKeyVerifier(), + ) + if err != nil { + return nil, err + } + + verifier, err := implementations.NewObjectValidator(&implementations.ObjectValidatorParams{ + AddressStore: ms, + Localstore: p.LocalStore, + SelectiveContainerExecutor: och, + Logger: p.Logger, + Verifier: integrityVerifier, + }) + if err != nil { + return nil, err + } + + storage, err := implementations.NewObjectStorage(implementations.ObjectStorageParams{ + Localstore: p.LocalStore, + Logger: p.Logger, + }) + if err != nil { + return nil, err + } + + return replication.NewObjectRestorer(&replication.ObjectRestorerParams{ + ObjectVerifier: verifier, + ObjectReceptacle: storage, + EpochReceiver: ms, + RemoteStorageSelector: ms, + PresenceChecker: p.LocalStore, + Logger: p.Logger, + TaskChanCap: p.Viper.GetInt(prefix + ".chan_capacity"), + ResultTimeout: p.Viper.GetDuration(prefix + ".result_timeout"), + }) +} diff --git a/modules/node/services.go b/modules/node/services.go new file mode 100644 index 000000000..d6c3cadca --- /dev/null +++ b/modules/node/services.go @@ -0,0 +1,36 @@ +package node + +import ( + "github.com/nspcc-dev/neofs-node/modules/grpc" + "github.com/nspcc-dev/neofs-node/services/metrics" + "github.com/nspcc-dev/neofs-node/services/public/accounting" + "github.com/nspcc-dev/neofs-node/services/public/container" + "github.com/nspcc-dev/neofs-node/services/public/object" + "github.com/nspcc-dev/neofs-node/services/public/session" + "github.com/nspcc-dev/neofs-node/services/public/state" + "go.uber.org/dig" +) + +type servicesParams struct { + dig.In + + Status state.Service + Container container.Service + Object object.Service + Session session.Service + Accounting accounting.Service + Metrics metrics.Service +} + +func attachServices(p servicesParams) grpc.ServicesResult { + return grpc.ServicesResult{ + Services: []grpc.Service{ + p.Status, + p.Container, + p.Accounting, + p.Metrics, + p.Session, + p.Object, + }, + } +} diff --git a/modules/node/session.go b/modules/node/session.go new file mode 100644 index 000000000..aaa252779 --- /dev/null +++ b/modules/node/session.go @@ -0,0 +1,26 @@ +package node + +import ( + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/nspcc-dev/neofs-node/services/public/session" + "go.uber.org/dig" + "go.uber.org/zap" +) + +type sessionParams struct { + dig.In + + Logger *zap.Logger + + TokenStore session.TokenStore + + EpochReceiver implementations.EpochReceiver +} + +func newSessionService(p sessionParams) (session.Service, error) { + return session.New(session.Params{ + TokenStore: p.TokenStore, + Logger: p.Logger, + EpochReceiver: p.EpochReceiver, + }), nil +} diff --git a/modules/settings/address.go b/modules/settings/address.go new file mode 100644 index 000000000..c1c9722c4 --- /dev/null +++ b/modules/settings/address.go @@ -0,0 +1,109 @@ +package settings + +import ( + "net" + "strconv" + "strings" + + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/pkg/errors" +) + +const ( + protoTCP = "tcp" + protoUDP = "udp" + protoQUIC = "quic" +) + +const emptyAddr = "0.0.0.0" + +const ip4ColonCount = 1 + +var ( + errEmptyAddress = internal.Error("`node.address` could not be empty") + errEmptyProtocol = internal.Error("`node.protocol` could not be empty") + errUnknownProtocol = internal.Error("`node.protocol` unknown protocol") + errEmptyShutdownTTL = internal.Error("`node.shutdown_ttl` could not be empty") +) + +func ipVersion(address string) string { + if strings.Count(address, ":") > ip4ColonCount { + return "ip6" + } + + return "ip4" +} + +func prepareAddress(address string) (string, error) { + host, port, err := net.SplitHostPort(address) + if err != nil { + return "", errors.Wrapf(err, "could not fetch host/port: %s", address) + } else if host == "" { + host = emptyAddr + } + + addr, err := net.ResolveIPAddr("ip", host) + if err != nil { + return "", errors.Wrapf(err, "could not resolve address: %s:%s", host, port) + } + + return net.JoinHostPort(addr.IP.String(), port), nil +} + +func resolveAddress(proto, address string) (string, string, error) { + var ( + ip net.IP + host, port string + ) + + switch proto { + case protoTCP: + addr, err := net.ResolveTCPAddr(protoTCP, address) + if err != nil { + return "", "", errors.Wrapf(err, "could not parse address: '%s'", address) + } + + ip = addr.IP + port = strconv.Itoa(addr.Port) + case protoUDP, protoQUIC: + addr, err := net.ResolveUDPAddr(protoUDP, address) + if err != nil { + return "", "", errors.Wrapf(err, "could not parse address: '%s'", address) + } + + ip = addr.IP + port = strconv.Itoa(addr.Port) + default: + return "", "", errors.Wrapf(errUnknownProtocol, "unknown protocol: '%s'", proto) + } + + if host = ip.String(); ip == nil { + host = emptyAddr + } + + return host, port, nil +} + +func multiAddressFromProtoAddress(proto, addr string) (multiaddr.Multiaddr, error) { + var ( + err error + host, port string + ipVer = ipVersion(addr) + ) + + if host, port, err = resolveAddress(proto, addr); err != nil { + return nil, errors.Wrapf(err, "could not resolve address: (%s) '%s'", proto, addr) + } + + items := []string{ + ipVer, + host, + proto, + port, + } + + addr = "/" + strings.Join(items, "/") + + return multiaddr.NewMultiaddr(addr) +} diff --git a/modules/settings/module.go b/modules/settings/module.go new file mode 100644 index 000000000..1e075103d --- /dev/null +++ b/modules/settings/module.go @@ -0,0 +1,10 @@ +package settings + +import ( + "github.com/nspcc-dev/neofs-node/lib/fix/module" +) + +// Module is a node settings module. +var Module = module.Module{ + {Constructor: newNodeSettings}, +} diff --git a/modules/settings/node.go b/modules/settings/node.go new file mode 100644 index 000000000..47b940e69 --- /dev/null +++ b/modules/settings/node.go @@ -0,0 +1,149 @@ +package settings + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "fmt" + "strconv" + "strings" + "time" + + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-api-go/bootstrap" + crypto "github.com/nspcc-dev/neofs-crypto" + "github.com/nspcc-dev/neofs-node/lib/peers" + "github.com/pkg/errors" + "github.com/spf13/viper" + "go.uber.org/dig" + "go.uber.org/zap" +) + +type ( + nodeSettings struct { + dig.Out + + Address multiaddr.Multiaddr + PrivateKey *ecdsa.PrivateKey + NodeOpts []string `name:"node_options"` + ShutdownTTL time.Duration `name:"shutdown_ttl"` + + NodeInfo bootstrap.NodeInfo + } +) + +const generateKey = "generated" + +var errEmptyNodeSettings = errors.New("node settings could not be empty") + +func newNodeSettings(v *viper.Viper, l *zap.Logger) (cfg nodeSettings, err error) { + // check, that we have node settings in provided config + if !v.IsSet("node") { + err = errEmptyNodeSettings + return + } + + // try to load and setup ecdsa.PrivateKey + key := v.GetString("node.private_key") + switch key { + case "": + err = crypto.ErrEmptyPrivateKey + return cfg, err + case generateKey: + if cfg.PrivateKey, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader); err != nil { + return cfg, err + } + default: + if cfg.PrivateKey, err = crypto.LoadPrivateKey(key); err != nil { + return cfg, errors.Wrap(err, "cannot unmarshal private key") + } + } + + id := peers.IDFromPublicKey(&cfg.PrivateKey.PublicKey) + pub := crypto.MarshalPublicKey(&cfg.PrivateKey.PublicKey) + l.Debug("private key loaded successful", + zap.String("file", v.GetString("node.private_key")), + zap.Binary("public", pub), + zap.Stringer("node-id", id)) + + var ( + addr string + proto string + ) + + // fetch shutdown timeout from settings + if cfg.ShutdownTTL = v.GetDuration("node.shutdown_ttl"); cfg.ShutdownTTL == 0 { + return cfg, errEmptyShutdownTTL + } + + // fetch address and protocol from settings + if addr = v.GetString("node.address"); addr == "" { + return cfg, errors.Wrapf(errEmptyAddress, "given '%s'", addr) + } else if addr, err := prepareAddress(addr); err != nil { + return cfg, err + } else if proto = v.GetString("node.proto"); proto == "" { + return cfg, errors.Wrapf(errEmptyProtocol, "given '%s'", proto) + } else if cfg.Address, err = multiAddressFromProtoAddress(proto, addr); err != nil { + return cfg, errors.Wrapf(err, "given '%s' '%s'", proto, addr) + } + + // add well-known options + items := map[string]string{ + "Capacity": "capacity", + "Price": "price", + "Location": "location", + "Country": "country", + "City": "city", + } + + // TODO: use const namings + prefix := "node." + + for opt, path := range items { + val := v.GetString(prefix + path) + if len(val) == 0 { + err = errors.Errorf("node option %s must be set explicitly", opt) + return + } + + cfg.NodeOpts = append(cfg.NodeOpts, + fmt.Sprintf("/%s:%s", + opt, + val, + ), + ) + } + + // add other options + + var ( + i int + val string + ) +loop: + for ; ; i++ { + val = v.GetString("node.options." + strconv.Itoa(i)) + if val == "" { + break + } + + for opt := range items { + if strings.Contains(val, "/"+opt) { + continue loop + } + } + + cfg.NodeOpts = append(cfg.NodeOpts, val) + } + + cfg.NodeInfo = bootstrap.NodeInfo{ + Address: cfg.Address.String(), + PubKey: crypto.MarshalPublicKey(&cfg.PrivateKey.PublicKey), + Options: cfg.NodeOpts, + } + + l.Debug("loaded node options", + zap.Strings("options", cfg.NodeOpts)) + + return cfg, err +} diff --git a/modules/workers/module.go b/modules/workers/module.go new file mode 100644 index 000000000..275a5faf2 --- /dev/null +++ b/modules/workers/module.go @@ -0,0 +1,10 @@ +package workers + +import ( + "github.com/nspcc-dev/neofs-node/lib/fix/module" +) + +// Module is a workers module. +var Module = module.Module{ + {Constructor: prepare}, +} diff --git a/modules/workers/prepare.go b/modules/workers/prepare.go new file mode 100644 index 000000000..ea5411fbf --- /dev/null +++ b/modules/workers/prepare.go @@ -0,0 +1,132 @@ +package workers + +import ( + "context" + "time" + + "github.com/nspcc-dev/neofs-node/lib/fix/worker" + "github.com/spf13/viper" + "go.uber.org/dig" + "go.uber.org/zap" +) + +type ( + // Result returns wrapped workers group for DI. + Result struct { + dig.Out + + Workers []*worker.Job + } + + // Params is dependencies for create workers slice. + Params struct { + dig.In + + Jobs worker.Jobs + Viper *viper.Viper + Logger *zap.Logger + } +) + +func prepare(p Params) worker.Workers { + w := worker.New() + + for name, handler := range p.Jobs { + if job := byConfig(name, handler, p.Logger, p.Viper); job != nil { + p.Logger.Debug("worker: add new job", + zap.String("name", name)) + + w.Add(job) + } + } + + return w +} + +func byTicker(d time.Duration, h worker.Handler) worker.Handler { + return func(ctx context.Context) { + ticker := time.NewTicker(d) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + default: + select { + case <-ctx.Done(): + return + case <-ticker.C: + h(ctx) + } + } + } + } +} + +func byTimer(d time.Duration, h worker.Handler) worker.Handler { + return func(ctx context.Context) { + timer := time.NewTimer(d) + defer timer.Stop() + + for { + select { + case <-ctx.Done(): + return + default: + select { + case <-ctx.Done(): + return + case <-timer.C: + h(ctx) + timer.Reset(d) + } + } + } + } +} + +func byConfig(name string, h worker.Handler, l *zap.Logger, v *viper.Viper) worker.Handler { + var job worker.Handler + + if !v.IsSet("workers." + name) { + l.Info("worker: has no configuration", + zap.String("worker", name)) + return nil + } + + if v.GetBool("workers." + name + ".disabled") { + l.Info("worker: disabled", + zap.String("worker", name)) + return nil + } + + if ticker := v.GetDuration("workers." + name + ".ticker"); ticker > 0 { + job = byTicker(ticker, h) + } + + if timer := v.GetDuration("workers." + name + ".timer"); timer > 0 { + job = byTimer(timer, h) + } + + if v.GetBool("workers." + name + ".immediately") { + return func(ctx context.Context) { + h(ctx) + + if job == nil { + return + } + + // check context before run immediately job again + select { + case <-ctx.Done(): + return + default: + } + + job(ctx) + } + } + + return job +} diff --git a/services/metrics/service.go b/services/metrics/service.go new file mode 100644 index 000000000..1acd9970e --- /dev/null +++ b/services/metrics/service.go @@ -0,0 +1,60 @@ +package metrics + +import ( + "context" + + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/metrics" + "github.com/nspcc-dev/neofs-node/modules/grpc" + "go.uber.org/zap" +) + +type ( + // Service is an interface of the server of Metrics service. + Service interface { + MetricsServer + grpc.Service + } + + // Params groups the parameters of Metrics service server's constructor. + Params struct { + Logger *zap.Logger + Collector metrics.Collector + } + + serviceMetrics struct { + log *zap.Logger + col metrics.Collector + } +) + +const ( + errEmptyLogger = internal.Error("empty logger") + errEmptyCollector = internal.Error("empty metrics collector") +) + +// New is a Metrics service server's constructor. +func New(p Params) (Service, error) { + switch { + case p.Logger == nil: + return nil, errEmptyLogger + case p.Collector == nil: + return nil, errEmptyCollector + } + + return &serviceMetrics{ + log: p.Logger, + col: p.Collector, + }, nil +} + +func (s *serviceMetrics) ResetSpaceCounter(_ context.Context, _ *ResetSpaceRequest) (*ResetSpaceResponse, error) { + s.col.UpdateSpaceUsage() + return &ResetSpaceResponse{}, nil +} + +func (s *serviceMetrics) Name() string { return "metrics" } + +func (s *serviceMetrics) Register(srv *grpc.Server) { + RegisterMetricsServer(srv, s) +} diff --git a/services/metrics/service.pb.go b/services/metrics/service.pb.go new file mode 100644 index 000000000..14062bfba --- /dev/null +++ b/services/metrics/service.pb.go @@ -0,0 +1,496 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: services/metrics/service.proto + +package metrics + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type ResetSpaceRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResetSpaceRequest) Reset() { *m = ResetSpaceRequest{} } +func (m *ResetSpaceRequest) String() string { return proto.CompactTextString(m) } +func (*ResetSpaceRequest) ProtoMessage() {} +func (*ResetSpaceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cf31ac6ef743be7f, []int{0} +} +func (m *ResetSpaceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResetSpaceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResetSpaceRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResetSpaceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResetSpaceRequest.Merge(m, src) +} +func (m *ResetSpaceRequest) XXX_Size() int { + return m.Size() +} +func (m *ResetSpaceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ResetSpaceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ResetSpaceRequest proto.InternalMessageInfo + +type ResetSpaceResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResetSpaceResponse) Reset() { *m = ResetSpaceResponse{} } +func (m *ResetSpaceResponse) String() string { return proto.CompactTextString(m) } +func (*ResetSpaceResponse) ProtoMessage() {} +func (*ResetSpaceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cf31ac6ef743be7f, []int{1} +} +func (m *ResetSpaceResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResetSpaceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResetSpaceResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResetSpaceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResetSpaceResponse.Merge(m, src) +} +func (m *ResetSpaceResponse) XXX_Size() int { + return m.Size() +} +func (m *ResetSpaceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ResetSpaceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ResetSpaceResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ResetSpaceRequest)(nil), "metrics.ResetSpaceRequest") + proto.RegisterType((*ResetSpaceResponse)(nil), "metrics.ResetSpaceResponse") +} + +func init() { proto.RegisterFile("services/metrics/service.proto", fileDescriptor_cf31ac6ef743be7f) } + +var fileDescriptor_cf31ac6ef743be7f = []byte{ + // 182 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2b, 0x4e, 0x2d, 0x2a, + 0xcb, 0x4c, 0x4e, 0x2d, 0xd6, 0xcf, 0x4d, 0x2d, 0x29, 0xca, 0x4c, 0x2e, 0xd6, 0x87, 0x0a, 0xe8, + 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xb1, 0x43, 0x85, 0x95, 0x84, 0xb9, 0x04, 0x83, 0x52, 0x8b, + 0x53, 0x4b, 0x82, 0x0b, 0x12, 0x93, 0x53, 0x83, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x94, 0x44, + 0xb8, 0x84, 0x90, 0x05, 0x8b, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x8d, 0xc2, 0xb9, 0xd8, 0x7d, 0x21, + 0xba, 0x84, 0x7c, 0x90, 0x75, 0x39, 0xe7, 0x97, 0xe6, 0x95, 0xa4, 0x16, 0x09, 0x49, 0xe9, 0x41, + 0x0d, 0xd5, 0xc3, 0x30, 0x51, 0x4a, 0x1a, 0xab, 0x1c, 0xc4, 0x60, 0x27, 0xc7, 0x13, 0x8f, 0xe4, + 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc6, 0x63, 0x39, 0x86, 0x28, 0xfd, + 0xf4, 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0xfd, 0xbc, 0xe2, 0x82, 0xe4, 0x64, + 0xdd, 0x94, 0xd4, 0x32, 0xfd, 0xbc, 0xd4, 0xfc, 0xb4, 0x62, 0xdd, 0xbc, 0xfc, 0x94, 0x54, 0x98, + 0x6f, 0x60, 0xbe, 0x4b, 0x62, 0x03, 0x7b, 0xcb, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x44, 0xfc, + 0x9a, 0x7c, 0xf8, 0x00, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MetricsClient is the client API for Metrics service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MetricsClient interface { + ResetSpaceCounter(ctx context.Context, in *ResetSpaceRequest, opts ...grpc.CallOption) (*ResetSpaceResponse, error) +} + +type metricsClient struct { + cc *grpc.ClientConn +} + +func NewMetricsClient(cc *grpc.ClientConn) MetricsClient { + return &metricsClient{cc} +} + +func (c *metricsClient) ResetSpaceCounter(ctx context.Context, in *ResetSpaceRequest, opts ...grpc.CallOption) (*ResetSpaceResponse, error) { + out := new(ResetSpaceResponse) + err := c.cc.Invoke(ctx, "/metrics.Metrics/ResetSpaceCounter", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MetricsServer is the server API for Metrics service. +type MetricsServer interface { + ResetSpaceCounter(context.Context, *ResetSpaceRequest) (*ResetSpaceResponse, error) +} + +// UnimplementedMetricsServer can be embedded to have forward compatible implementations. +type UnimplementedMetricsServer struct { +} + +func (*UnimplementedMetricsServer) ResetSpaceCounter(ctx context.Context, req *ResetSpaceRequest) (*ResetSpaceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ResetSpaceCounter not implemented") +} + +func RegisterMetricsServer(s *grpc.Server, srv MetricsServer) { + s.RegisterService(&_Metrics_serviceDesc, srv) +} + +func _Metrics_ResetSpaceCounter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResetSpaceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricsServer).ResetSpaceCounter(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/metrics.Metrics/ResetSpaceCounter", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricsServer).ResetSpaceCounter(ctx, req.(*ResetSpaceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Metrics_serviceDesc = grpc.ServiceDesc{ + ServiceName: "metrics.Metrics", + HandlerType: (*MetricsServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ResetSpaceCounter", + Handler: _Metrics_ResetSpaceCounter_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "services/metrics/service.proto", +} + +func (m *ResetSpaceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResetSpaceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResetSpaceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *ResetSpaceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResetSpaceResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResetSpaceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func encodeVarintService(dAtA []byte, offset int, v uint64) int { + offset -= sovService(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ResetSpaceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResetSpaceResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovService(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozService(x uint64) (n int) { + return sovService(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ResetSpaceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResetSpaceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResetSpaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResetSpaceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResetSpaceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResetSpaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipService(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthService + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupService + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthService + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthService = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowService = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupService = fmt.Errorf("proto: unexpected end of group") +) diff --git a/services/metrics/service.proto b/services/metrics/service.proto new file mode 100644 index 000000000..6d4b29f63 --- /dev/null +++ b/services/metrics/service.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; +package metrics; +option go_package = "github.com/nspcc-dev/neofs-node/service/metrics"; + +service Metrics { + rpc ResetSpaceCounter(ResetSpaceRequest) returns (ResetSpaceResponse); +} + +message ResetSpaceRequest {} +message ResetSpaceResponse {} \ No newline at end of file diff --git a/services/public/accounting/service.go b/services/public/accounting/service.go new file mode 100644 index 000000000..e6baee30b --- /dev/null +++ b/services/public/accounting/service.go @@ -0,0 +1,85 @@ +package accounting + +import ( + "context" + + "github.com/nspcc-dev/neofs-api-go/accounting" + "github.com/nspcc-dev/neofs-api-go/decimal" + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/nspcc-dev/neofs-node/modules/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type ( + // Service is an interface of the server of Accounting service. + Service interface { + grpc.Service + accounting.AccountingServer + } + + // Params groups the parameters of Accounting service server's constructor. + Params struct { + MorphBalanceContract implementations.MorphBalanceContract + } + + accService struct { + balanceContract implementations.MorphBalanceContract + } +) + +var requestVerifyFunc = core.VerifyRequestWithSignatures + +// New is an Accounting service server's constructor. +func New(p Params) (Service, error) { + return &accService{ + balanceContract: p.MorphBalanceContract, + }, nil +} + +func (accService) Name() string { return "AccountingService" } + +func (s accService) Register(g *grpc.Server) { accounting.RegisterAccountingServer(g, s) } + +func (s accService) Balance(ctx context.Context, req *accounting.BalanceRequest) (*accounting.BalanceResponse, error) { + // verify request structure + if err := requestVerifyFunc(req); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + // prepare balanceOf parameters + p := implementations.BalanceOfParams{} + p.SetOwnerID(req.GetOwnerID()) + + // get balance of + bRes, err := s.balanceContract.BalanceOf(p) + if err != nil { + return nil, status.Error(codes.Aborted, err.Error()) + } + + // get decimals + + // TODO: Reconsider the approach of getting decimals. + // + // Decimals value does not seem to be frequently changing. + // In this case service can work in static decimals mode and + // the value can be received once to facilitate call flow. + // + // In a true dynamic value installation it is advisable to get + // a balance with decimals through a single call. Variations: + // - add decimal value stack parameter of balanceOf method; + // - create a new method entitled smth like balanceWithDecimals. + decRes, err := s.balanceContract.Decimals(implementations.DecimalsParams{}) + if err != nil { + return nil, status.Error(codes.Aborted, err.Error()) + } + + res := new(accounting.BalanceResponse) + res.Balance = decimal.NewWithPrecision( + bRes.Amount(), + uint32(decRes.Decimals()), + ) + + return res, nil +} diff --git a/services/public/accounting/service_test.go b/services/public/accounting/service_test.go new file mode 100644 index 000000000..d59995eff --- /dev/null +++ b/services/public/accounting/service_test.go @@ -0,0 +1,3 @@ +package accounting + +// TODO: write unit tests diff --git a/services/public/container/acl.go b/services/public/container/acl.go new file mode 100644 index 000000000..3b96e5a6b --- /dev/null +++ b/services/public/container/acl.go @@ -0,0 +1,64 @@ +package container + +import ( + "context" + + "github.com/nspcc-dev/neofs-api-go/container" + "github.com/nspcc-dev/neofs-node/lib/acl" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (s cnrService) SetExtendedACL(ctx context.Context, req *container.SetExtendedACLRequest) (*container.SetExtendedACLResponse, error) { + // check healthiness + if err := s.healthy.Healthy(); err != nil { + return nil, status.Error(codes.Unavailable, err.Error()) + } + + // verify request structure + if err := requestVerifyFunc(req); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + // store binary EACL + key := acl.BinaryEACLKey{} + key.SetCID(req.GetID()) + + val := acl.BinaryEACLValue{} + val.SetEACL(req.GetEACL()) + val.SetSignature(req.GetSignature()) + + if err := s.aclStore.PutBinaryEACL(ctx, key, val); err != nil { + return nil, status.Error(codes.Aborted, err.Error()) + } + + return new(container.SetExtendedACLResponse), nil +} + +func (s cnrService) GetExtendedACL(ctx context.Context, req *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error) { + // check healthiness + if err := s.healthy.Healthy(); err != nil { + return nil, status.Error(codes.Unavailable, err.Error()) + } + + // verify request structure + if err := requestVerifyFunc(req); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + // receive binary EACL + key := acl.BinaryEACLKey{} + key.SetCID(req.GetID()) + + val, err := s.aclStore.GetBinaryEACL(ctx, key) + if err != nil { + return nil, status.Error(codes.NotFound, err.Error()) + } + + // fill the response + res := new(container.GetExtendedACLResponse) + res.SetEACL(val.EACL()) + res.SetSignature(val.Signature()) + + return res, nil +} diff --git a/services/public/container/acl_test.go b/services/public/container/acl_test.go new file mode 100644 index 000000000..7c7621d6e --- /dev/null +++ b/services/public/container/acl_test.go @@ -0,0 +1,211 @@ +package container + +import ( + "context" + "errors" + "testing" + + "github.com/nspcc-dev/neofs-api-go/container" + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-node/lib/acl" + "github.com/nspcc-dev/neofs-node/lib/test" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// Entity for mocking interfaces. +// Implementation of any interface intercepts arguments via f (if not nil). +// If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. +type testEACLEntity struct { + // Set of interfaces which entity must implement, but some methods from those does not call. + + // Argument interceptor. Used for ascertain of correct parameter passage between components. + f func(...interface{}) + // Mocked result of any interface. + res interface{} + // Mocked error of any interface. + err error +} + +var requestSignFunc = service.SignRequestData + +func (s testEACLEntity) GetBinaryEACL(_ context.Context, key acl.BinaryEACLKey) (acl.BinaryEACLValue, error) { + if s.f != nil { + s.f(key) + } + + if s.err != nil { + return acl.BinaryEACLValue{}, s.err + } + + return s.res.(acl.BinaryEACLValue), nil +} + +func (s testEACLEntity) PutBinaryEACL(_ context.Context, key acl.BinaryEACLKey, val acl.BinaryEACLValue) error { + if s.f != nil { + s.f(key, val) + } + + return s.err +} + +func TestCnrService_SetExtendedACL(t *testing.T) { + ctx := context.TODO() + + t.Run("unhealthy", func(t *testing.T) { + s := cnrService{ + healthy: &testCommonEntity{ + err: errors.New("some error"), + }, + } + + _, err := s.SetExtendedACL(ctx, new(container.SetExtendedACLRequest)) + require.Error(t, err) + }) + + t.Run("invalid request structure", func(t *testing.T) { + s := cnrService{ + healthy: new(testCommonEntity), + } + + // create unsigned request + req := new(container.SetExtendedACLRequest) + require.Error(t, requestVerifyFunc(req)) + + _, err := s.SetExtendedACL(ctx, req) + require.Error(t, err) + + st, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.InvalidArgument, st.Code()) + }) + + t.Run("binary EACL storage failure", func(t *testing.T) { + req := new(container.SetExtendedACLRequest) + req.SetID(CID{1, 2, 3}) + req.SetEACL([]byte{4, 5, 6}) + req.SetSignature([]byte{7, 8, 9}) + + require.NoError(t, requestSignFunc(test.DecodeKey(0), req)) + + s := cnrService{ + healthy: new(testCommonEntity), + aclStore: &testEACLEntity{ + f: func(items ...interface{}) { + key := items[0].(acl.BinaryEACLKey) + require.Equal(t, req.GetID(), key.CID()) + + val := items[1].(acl.BinaryEACLValue) + require.Equal(t, req.GetEACL(), val.EACL()) + require.Equal(t, req.GetSignature(), val.Signature()) + }, + err: errors.New("storage error"), + }, + } + + _, err := s.SetExtendedACL(ctx, req) + require.Error(t, err) + + st, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.Aborted, st.Code()) + }) + + t.Run("correct result", func(t *testing.T) { + req := new(container.SetExtendedACLRequest) + + require.NoError(t, requestSignFunc(test.DecodeKey(0), req)) + + s := cnrService{ + healthy: new(testCommonEntity), + aclStore: new(testEACLEntity), + } + + res, err := s.SetExtendedACL(ctx, req) + require.NoError(t, err) + require.NotNil(t, res) + }) +} + +func TestCnrService_GetExtendedACL(t *testing.T) { + ctx := context.TODO() + + t.Run("unhealthy", func(t *testing.T) { + s := cnrService{ + healthy: &testCommonEntity{ + err: errors.New("some error"), + }, + } + + _, err := s.GetExtendedACL(ctx, new(container.GetExtendedACLRequest)) + require.Error(t, err) + }) + + t.Run("invalid request structure", func(t *testing.T) { + s := cnrService{ + healthy: new(testCommonEntity), + } + + // create unsigned request + req := new(container.GetExtendedACLRequest) + require.Error(t, requestVerifyFunc(req)) + + _, err := s.GetExtendedACL(ctx, req) + require.Error(t, err) + + st, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.InvalidArgument, st.Code()) + }) + + t.Run("binary EACL storage failure", func(t *testing.T) { + req := new(container.GetExtendedACLRequest) + req.SetID(CID{1, 2, 3}) + + require.NoError(t, requestSignFunc(test.DecodeKey(0), req)) + + s := cnrService{ + healthy: new(testCommonEntity), + aclStore: &testEACLEntity{ + f: func(items ...interface{}) { + key := items[0].(acl.BinaryEACLKey) + require.Equal(t, req.GetID(), key.CID()) + }, + err: errors.New("storage error"), + }, + } + + _, err := s.GetExtendedACL(ctx, req) + require.Error(t, err) + + st, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.NotFound, st.Code()) + }) + + t.Run("correct result", func(t *testing.T) { + req := new(container.GetExtendedACLRequest) + + require.NoError(t, requestSignFunc(test.DecodeKey(0), req)) + + eacl := []byte{1, 2, 3} + sig := []byte{4, 5, 6} + + val := acl.BinaryEACLValue{} + val.SetEACL(eacl) + val.SetSignature(sig) + + s := cnrService{ + healthy: new(testCommonEntity), + aclStore: &testEACLEntity{ + res: val, + }, + } + + res, err := s.GetExtendedACL(ctx, req) + require.NoError(t, err) + require.Equal(t, eacl, res.GetEACL()) + require.Equal(t, sig, res.GetSignature()) + }) +} diff --git a/services/public/container/alias.go b/services/public/container/alias.go new file mode 100644 index 000000000..138f09a5b --- /dev/null +++ b/services/public/container/alias.go @@ -0,0 +1,15 @@ +package container + +import ( + "github.com/nspcc-dev/neofs-api-go/container" + "github.com/nspcc-dev/neofs-api-go/refs" +) + +// CID is a type alias of CID. +type CID = refs.CID + +// OwnerID is a type alias of OwnerID. +type OwnerID = refs.OwnerID + +// Container is a type alias of Container. +type Container = container.Container diff --git a/services/public/container/common_test.go b/services/public/container/common_test.go new file mode 100644 index 000000000..8d218c7b1 --- /dev/null +++ b/services/public/container/common_test.go @@ -0,0 +1,19 @@ +package container + +// Entity for mocking interfaces. +// Implementation of any interface intercepts arguments via f (if not nil). +// If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. +type testCommonEntity struct { + // Set of interfaces which entity must implement, but some methods from those does not call. + + // Argument interceptor. Used for ascertain of correct parameter passage between components. + f func(...interface{}) + // Mocked result of any interface. + res interface{} + // Mocked error of any interface. + err error +} + +func (s testCommonEntity) Healthy() error { + return s.err +} diff --git a/services/public/container/delete.go b/services/public/container/delete.go new file mode 100644 index 000000000..1df1c88ca --- /dev/null +++ b/services/public/container/delete.go @@ -0,0 +1,37 @@ +package container + +import ( + "context" + + "github.com/nspcc-dev/neofs-api-go/container" + libcnr "github.com/nspcc-dev/neofs-node/lib/container" + "github.com/pkg/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (s cnrService) Delete(ctx context.Context, req *container.DeleteRequest) (*container.DeleteResponse, error) { + // check healthiness + if err := s.healthy.Healthy(); err != nil { + return nil, errors.Wrap(err, "try again later") + } + + // verify request structure + if err := requestVerifyFunc(req); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + p := libcnr.DeleteParams{} + p.SetContext(ctx) + p.SetCID(req.GetCID()) + // TODO: add owner ID and CID signature + + if _, err := s.cnrStore.DeleteContainer(p); err != nil { + return nil, status.Error( + codes.Aborted, + errors.Wrapf(err, "could not remove container %d", req.CID).Error(), + ) + } + + return new(container.DeleteResponse), nil +} diff --git a/services/public/container/delete_test.go b/services/public/container/delete_test.go new file mode 100644 index 000000000..935e87c70 --- /dev/null +++ b/services/public/container/delete_test.go @@ -0,0 +1,118 @@ +package container + +import ( + "context" + "errors" + "testing" + + "github.com/nspcc-dev/neofs-api-go/container" + libcnr "github.com/nspcc-dev/neofs-node/lib/container" + "github.com/nspcc-dev/neofs-node/lib/test" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// Entity for mocking interfaces. +// Implementation of any interface intercepts arguments via f (if not nil). +// If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. +type testDeleteEntity struct { + // Set of interfaces which entity must implement, but some methods from those does not call. + libcnr.Storage + + // Argument interceptor. Used for ascertain of correct parameter passage between components. + f func(...interface{}) + // Mocked result of any interface. + res interface{} + // Mocked error of any interface. + err error +} + +func (s testDeleteEntity) DeleteContainer(p libcnr.DeleteParams) (*libcnr.DeleteResult, error) { + if s.f != nil { + s.f(p) + } + + if s.err != nil { + return nil, s.err + } + + return s.res.(*libcnr.DeleteResult), nil +} + +func TestCnrService_Delete(t *testing.T) { + ctx := context.TODO() + + t.Run("unhealthy", func(t *testing.T) { + s := cnrService{ + healthy: &testCommonEntity{ + err: errors.New("some error"), + }, + } + + _, err := s.Delete(ctx, new(container.DeleteRequest)) + require.Error(t, err) + }) + + t.Run("invalid request structure", func(t *testing.T) { + s := cnrService{ + healthy: new(testCommonEntity), + } + + // create unsigned request + req := new(container.DeleteRequest) + require.Error(t, requestVerifyFunc(req)) + + _, err := s.Delete(ctx, req) + require.Error(t, err) + + st, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.InvalidArgument, st.Code()) + }) + + t.Run("container storage failure", func(t *testing.T) { + req := new(container.DeleteRequest) + req.SetCID(CID{1, 2, 3}) + + require.NoError(t, requestSignFunc(test.DecodeKey(0), req)) + + s := cnrService{ + healthy: new(testCommonEntity), + cnrStore: &testDeleteEntity{ + f: func(items ...interface{}) { + p := items[0].(libcnr.DeleteParams) + require.Equal(t, ctx, p.Context()) + require.Equal(t, req.GetCID(), p.CID()) + }, + err: errors.New("storage error"), + }, + } + + _, err := s.Delete(ctx, req) + require.Error(t, err) + + st, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.Aborted, st.Code()) + }) + + t.Run("correct result", func(t *testing.T) { + req := new(container.DeleteRequest) + + require.NoError(t, requestSignFunc(test.DecodeKey(0), req)) + + delRes := new(libcnr.DeleteResult) + + s := cnrService{ + healthy: new(testCommonEntity), + cnrStore: &testDeleteEntity{ + res: delRes, + }, + } + + res, err := s.Delete(ctx, req) + require.NoError(t, err) + require.NotNil(t, res) + }) +} diff --git a/services/public/container/get.go b/services/public/container/get.go new file mode 100644 index 000000000..8e48bae2d --- /dev/null +++ b/services/public/container/get.go @@ -0,0 +1,38 @@ +package container + +import ( + "context" + + "github.com/nspcc-dev/neofs-api-go/container" + libcnr "github.com/nspcc-dev/neofs-node/lib/container" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (s cnrService) Get(ctx context.Context, req *container.GetRequest) (*container.GetResponse, error) { + // check healthiness + if err := s.healthy.Healthy(); err != nil { + return nil, status.Error(codes.Unavailable, err.Error()) + } + + // verify request structure + if err := requestVerifyFunc(req); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + // get container + p := libcnr.GetParams{} + p.SetContext(ctx) + p.SetCID(req.GetCID()) + + gRes, err := s.cnrStore.GetContainer(p) + if err != nil { + return nil, status.Error(codes.NotFound, err.Error()) + } + + // fill the response + res := new(container.GetResponse) + res.Container = gRes.Container() + + return res, nil +} diff --git a/services/public/container/get_test.go b/services/public/container/get_test.go new file mode 100644 index 000000000..875a14d74 --- /dev/null +++ b/services/public/container/get_test.go @@ -0,0 +1,123 @@ +package container + +import ( + "context" + "errors" + "testing" + + "github.com/nspcc-dev/neofs-api-go/container" + libcnr "github.com/nspcc-dev/neofs-node/lib/container" + "github.com/nspcc-dev/neofs-node/lib/test" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// Entity for mocking interfaces. +// Implementation of any interface intercepts arguments via f (if not nil). +// If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. +type testGetEntity struct { + // Set of interfaces which entity must implement, but some methods from those does not call. + libcnr.Storage + + // Argument interceptor. Used for ascertain of correct parameter passage between components. + f func(...interface{}) + // Mocked result of any interface. + res interface{} + // Mocked error of any interface. + err error +} + +func (s testGetEntity) GetContainer(p libcnr.GetParams) (*libcnr.GetResult, error) { + if s.f != nil { + s.f(p) + } + + if s.err != nil { + return nil, s.err + } + + return s.res.(*libcnr.GetResult), nil +} + +func TestCnrService_Get(t *testing.T) { + ctx := context.TODO() + + t.Run("unhealthy", func(t *testing.T) { + s := cnrService{ + healthy: &testCommonEntity{ + err: errors.New("some error"), + }, + } + + _, err := s.Get(ctx, new(container.GetRequest)) + require.Error(t, err) + }) + + t.Run("invalid request structure", func(t *testing.T) { + s := cnrService{ + healthy: new(testCommonEntity), + } + + // create unsigned request + req := new(container.GetRequest) + require.Error(t, requestVerifyFunc(req)) + + _, err := s.Get(ctx, req) + require.Error(t, err) + + st, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.InvalidArgument, st.Code()) + }) + + t.Run("container storage failure", func(t *testing.T) { + req := new(container.GetRequest) + req.SetCID(CID{1, 2, 3}) + + require.NoError(t, requestSignFunc(test.DecodeKey(0), req)) + + s := cnrService{ + healthy: new(testCommonEntity), + cnrStore: &testGetEntity{ + f: func(items ...interface{}) { + p := items[0].(libcnr.GetParams) + require.Equal(t, ctx, p.Context()) + require.Equal(t, req.GetCID(), p.CID()) + }, + err: errors.New("storage error"), + }, + } + + _, err := s.Get(ctx, req) + require.Error(t, err) + + st, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.NotFound, st.Code()) + }) + + t.Run("correct result", func(t *testing.T) { + req := new(container.GetRequest) + + require.NoError(t, requestSignFunc(test.DecodeKey(0), req)) + + cnr := &Container{ + Capacity: 1, + } + + getRes := new(libcnr.GetResult) + getRes.SetContainer(cnr) + + s := cnrService{ + healthy: new(testCommonEntity), + cnrStore: &testGetEntity{ + res: getRes, + }, + } + + res, err := s.Get(ctx, req) + require.NoError(t, err) + require.Equal(t, cnr, res.GetContainer()) + }) +} diff --git a/services/public/container/list.go b/services/public/container/list.go new file mode 100644 index 000000000..abbe641a8 --- /dev/null +++ b/services/public/container/list.go @@ -0,0 +1,39 @@ +package container + +import ( + "context" + + "github.com/nspcc-dev/neofs-api-go/container" + libcnr "github.com/nspcc-dev/neofs-node/lib/container" + "github.com/pkg/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (s cnrService) List(ctx context.Context, req *container.ListRequest) (*container.ListResponse, error) { + // check healthiness + if err := s.healthy.Healthy(); err != nil { + return nil, errors.Wrap(err, "try again later") + } + + // verify request structure + if err := requestVerifyFunc(req); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + // list containers + p := libcnr.ListParams{} + p.SetContext(ctx) + p.SetOwnerIDList(req.GetOwnerID()) + + lRes, err := s.cnrStore.ListContainers(p) + if err != nil { + return nil, status.Error(codes.NotFound, err.Error()) + } + + // fill the response + res := new(container.ListResponse) + res.CID = lRes.CIDList() + + return res, nil +} diff --git a/services/public/container/list_test.go b/services/public/container/list_test.go new file mode 100644 index 000000000..38123ece0 --- /dev/null +++ b/services/public/container/list_test.go @@ -0,0 +1,124 @@ +package container + +import ( + "context" + "errors" + "testing" + + "github.com/nspcc-dev/neofs-api-go/container" + libcnr "github.com/nspcc-dev/neofs-node/lib/container" + "github.com/nspcc-dev/neofs-node/lib/test" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// Entity for mocking interfaces. +// Implementation of any interface intercepts arguments via f (if not nil). +// If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. +type testListEntity struct { + // Set of interfaces which entity must implement, but some methods from those does not call. + libcnr.Storage + + // Argument interceptor. Used for ascertain of correct parameter passage between components. + f func(...interface{}) + // Mocked result of any interface. + res interface{} + // Mocked error of any interface. + err error +} + +func (s testListEntity) ListContainers(p libcnr.ListParams) (*libcnr.ListResult, error) { + if s.f != nil { + s.f(p) + } + + if s.err != nil { + return nil, s.err + } + + return s.res.(*libcnr.ListResult), nil +} + +func TestCnrService_List(t *testing.T) { + ctx := context.TODO() + + t.Run("unhealthy", func(t *testing.T) { + s := cnrService{ + healthy: &testCommonEntity{ + err: errors.New("some error"), + }, + } + + _, err := s.List(ctx, new(container.ListRequest)) + require.Error(t, err) + }) + + t.Run("invalid request structure", func(t *testing.T) { + s := cnrService{ + healthy: new(testCommonEntity), + } + + // create unsigned request + req := new(container.ListRequest) + require.Error(t, requestVerifyFunc(req)) + + _, err := s.List(ctx, req) + require.Error(t, err) + + st, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.InvalidArgument, st.Code()) + }) + + t.Run("container storage failure", func(t *testing.T) { + req := new(container.ListRequest) + req.SetOwnerID(OwnerID{1, 2, 3}) + + require.NoError(t, requestSignFunc(test.DecodeKey(0), req)) + + s := cnrService{ + healthy: new(testCommonEntity), + cnrStore: &testListEntity{ + f: func(items ...interface{}) { + p := items[0].(libcnr.ListParams) + require.Equal(t, ctx, p.Context()) + require.Equal(t, req.GetOwnerID(), p.OwnerIDList()[0]) + }, + err: errors.New("storage error"), + }, + } + + _, err := s.List(ctx, req) + require.Error(t, err) + + st, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.NotFound, st.Code()) + }) + + t.Run("correct result", func(t *testing.T) { + req := new(container.ListRequest) + + require.NoError(t, requestSignFunc(test.DecodeKey(0), req)) + + cidList := []CID{ + {1, 2, 3}, + {4, 5, 6}, + } + + listRes := new(libcnr.ListResult) + listRes.SetCIDList(cidList) + + s := cnrService{ + healthy: new(testCommonEntity), + cnrStore: &testListEntity{ + res: listRes, + }, + } + + res, err := s.List(ctx, req) + require.NoError(t, err) + require.Equal(t, cidList, res.CID) + }) +} diff --git a/services/public/container/put.go b/services/public/container/put.go new file mode 100644 index 000000000..9ed642ace --- /dev/null +++ b/services/public/container/put.go @@ -0,0 +1,54 @@ +package container + +import ( + "context" + + "github.com/nspcc-dev/neofs-api-go/container" + "github.com/nspcc-dev/neofs-api-go/refs" + libcnr "github.com/nspcc-dev/neofs-node/lib/container" + "github.com/pkg/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// TODO verify MessageID. +func (s cnrService) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) { + // check healthiness + if err := s.healthy.Healthy(); err != nil { + return nil, errors.Wrap(err, "try again later") + } + + // verify request structure + if err := requestVerifyFunc(req); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + // create container structure + cnr := new(container.Container) + cnr.OwnerID = req.GetOwnerID() + cnr.Capacity = req.GetCapacity() + cnr.Rules = req.GetRules() + cnr.BasicACL = req.GetBasicACL() + + var err error + if cnr.Salt, err = refs.NewUUID(); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + // put the container to storage + p := libcnr.PutParams{} + p.SetContext(ctx) + p.SetContainer(cnr) + // TODO: add user signature + + pRes, err := s.cnrStore.PutContainer(p) + if err != nil { + return nil, status.Error(codes.Aborted, err.Error()) + } + + // fill the response + res := new(container.PutResponse) + res.CID = pRes.CID() + + return res, nil +} diff --git a/services/public/container/put_test.go b/services/public/container/put_test.go new file mode 100644 index 000000000..a777c23a0 --- /dev/null +++ b/services/public/container/put_test.go @@ -0,0 +1,132 @@ +package container + +import ( + "context" + "errors" + "testing" + + "github.com/nspcc-dev/neofs-api-go/container" + libcnr "github.com/nspcc-dev/neofs-node/lib/container" + "github.com/nspcc-dev/neofs-node/lib/netmap" + "github.com/nspcc-dev/neofs-node/lib/test" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// Entity for mocking interfaces. +// Implementation of any interface intercepts arguments via f (if not nil). +// If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. +type testPutEntity struct { + // Set of interfaces which entity must implement, but some methods from those does not call. + libcnr.Storage + + // Argument interceptor. Used for ascertain of correct parameter passage between components. + f func(...interface{}) + // Mocked result of any interface. + res interface{} + // Mocked error of any interface. + err error +} + +func (s testPutEntity) PutContainer(p libcnr.PutParams) (*libcnr.PutResult, error) { + if s.f != nil { + s.f(p) + } + + if s.err != nil { + return nil, s.err + } + + return s.res.(*libcnr.PutResult), nil +} + +func TestCnrService_Put(t *testing.T) { + ctx := context.TODO() + + t.Run("unhealthy", func(t *testing.T) { + s := cnrService{ + healthy: &testCommonEntity{ + err: errors.New("some error"), + }, + } + + _, err := s.Put(ctx, new(container.PutRequest)) + require.Error(t, err) + }) + + t.Run("invalid request structure", func(t *testing.T) { + s := cnrService{ + healthy: new(testCommonEntity), + } + + // create unsigned request + req := new(container.PutRequest) + require.Error(t, requestVerifyFunc(req)) + + _, err := s.Put(ctx, req) + require.Error(t, err) + + st, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.InvalidArgument, st.Code()) + }) + + t.Run("container storage failure", func(t *testing.T) { + req := new(container.PutRequest) + req.SetCapacity(1) + req.SetBasicACL(2) + req.SetOwnerID(OwnerID{1, 2, 3}) + req.SetRules(netmap.PlacementRule{ + ReplFactor: 3, + }) + + require.NoError(t, requestSignFunc(test.DecodeKey(0), req)) + + s := cnrService{ + healthy: new(testCommonEntity), + cnrStore: &testPutEntity{ + f: func(items ...interface{}) { + p := items[0].(libcnr.PutParams) + require.Equal(t, ctx, p.Context()) + + cnr := p.Container() + require.Equal(t, req.GetCapacity(), cnr.GetCapacity()) + require.Equal(t, req.GetBasicACL(), cnr.GetBasicACL()) + require.Equal(t, req.GetRules(), cnr.GetRules()) + require.Equal(t, req.GetOwnerID(), cnr.OwnerID) + }, + err: errors.New("storage error"), + }, + } + + _, err := s.Put(ctx, req) + require.Error(t, err) + + st, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.Aborted, st.Code()) + }) + + t.Run("correct result", func(t *testing.T) { + req := new(container.PutRequest) + + require.NoError(t, requestSignFunc(test.DecodeKey(0), req)) + + cid := CID{1, 2, 3} + + putRes := new(libcnr.PutResult) + putRes.SetCID(cid) + + s := cnrService{ + healthy: new(testCommonEntity), + cnrStore: &testPutEntity{ + res: putRes, + }, + } + + res, err := s.Put(ctx, req) + require.NoError(t, err) + require.Equal(t, cid, res.CID) + }) +} diff --git a/services/public/container/service.go b/services/public/container/service.go new file mode 100644 index 000000000..446406c7b --- /dev/null +++ b/services/public/container/service.go @@ -0,0 +1,78 @@ +package container + +import ( + "github.com/nspcc-dev/neofs-api-go/container" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/acl" + libcnr "github.com/nspcc-dev/neofs-node/lib/container" + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/nspcc-dev/neofs-node/modules/grpc" + "go.uber.org/zap" +) + +type ( + // Service is an interface of the server of Container service. + Service interface { + grpc.Service + container.ServiceServer + } + + // HealthChecker is an interface of node healthiness checking tool. + HealthChecker interface { + Healthy() error + } + + // Params groups the parameters of Container service server's constructor. + Params struct { + Logger *zap.Logger + + Healthy HealthChecker + + Store libcnr.Storage + + ExtendedACLStore acl.BinaryExtendedACLStore + } + + cnrService struct { + log *zap.Logger + + healthy HealthChecker + + cnrStore libcnr.Storage + + aclStore acl.BinaryExtendedACLStore + } +) + +const ( + errEmptyLogger = internal.Error("empty log component") + errEmptyStore = internal.Error("empty store component") + errEmptyHealthChecker = internal.Error("empty healthy component") +) + +var requestVerifyFunc = core.VerifyRequestWithSignatures + +// New is an Container service server's constructor. +func New(p Params) (Service, error) { + switch { + case p.Logger == nil: + return nil, errEmptyLogger + case p.Store == nil: + return nil, errEmptyStore + case p.Healthy == nil: + return nil, errEmptyHealthChecker + case p.ExtendedACLStore == nil: + return nil, acl.ErrNilBinaryExtendedACLStore + } + + return &cnrService{ + log: p.Logger, + healthy: p.Healthy, + cnrStore: p.Store, + aclStore: p.ExtendedACLStore, + }, nil +} + +func (cnrService) Name() string { return "ContainerService" } + +func (s cnrService) Register(g *grpc.Server) { container.RegisterServiceServer(g, s) } diff --git a/services/public/object/acl.go b/services/public/object/acl.go new file mode 100644 index 000000000..26264f28d --- /dev/null +++ b/services/public/object/acl.go @@ -0,0 +1,428 @@ +package object + +import ( + "bytes" + "context" + "crypto/ecdsa" + + "github.com/nspcc-dev/neofs-api-go/acl" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-api-go/service" + crypto "github.com/nspcc-dev/neofs-crypto" + libacl "github.com/nspcc-dev/neofs-node/lib/acl" + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/nspcc-dev/neofs-node/lib/ir" + "github.com/nspcc-dev/neofs-node/lib/localstore" + "github.com/nspcc-dev/neofs-node/lib/transport" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +type ( + // RequestTargeter is an interface of request's ACL target calculator. + RequestTargeter interface { + Target(context.Context, serviceRequest) acl.Target + } + + // aclPreProcessor is an implementation of requestPreProcessor interface. + aclPreProcessor struct { + log *zap.Logger + + aclInfoReceiver aclInfoReceiver + + basicChecker libacl.BasicChecker + + reqActionCalc requestActionCalculator + + localStore localstore.Localstore + + extACLSource libacl.ExtendedACLSource + + bearerVerifier bearerTokenVerifier + } + + targetFinder struct { + log *zap.Logger + + irStorage ir.Storage + cnrLister implementations.ContainerNodesLister + cnrOwnerChecker implementations.ContainerOwnerChecker + } +) + +type objectHeadersSource interface { + getHeaders() (*Object, bool) +} + +type requestActionCalculator interface { + calculateRequestAction(context.Context, requestActionParams) acl.ExtendedACLAction +} + +type aclInfoReceiver struct { + basicACLGetter implementations.BasicACLGetter + + basicChecker libacl.BasicChecker + + targetFinder RequestTargeter +} + +type aclInfo struct { + rule uint32 + + checkExtended bool + + checkBearer bool + + target acl.Target +} + +type reqActionCalc struct { + extACLChecker libacl.ExtendedACLChecker + + log *zap.Logger +} + +type serviceRequestInfo struct { + target acl.Target + + req serviceRequest + + objHdrSrc objectHeadersSource +} + +type requestObjHdrSrc struct { + req serviceRequest + + ls localstore.Localstore +} + +type eaclFromBearer struct { + bearer service.BearerToken +} + +var _ requestPreProcessor = (*aclPreProcessor)(nil) + +var errMissingSignatures = errors.New("empty signature list") + +func (p *aclPreProcessor) preProcess(ctx context.Context, req serviceRequest) error { + if req == nil { + panic(pmEmptyServiceRequest) + } + + // fetch ACL info + aclInfo, err := p.aclInfoReceiver.getACLInfo(ctx, req) + if err != nil { + p.log.Warn("can't get acl of the container", zap.Stringer("cid", req.CID())) + return errAccessDenied + } + + // check basic ACL permissions + allow, err := p.basicChecker.Action(aclInfo.rule, req.Type(), aclInfo.target) + if err != nil || !allow { + return errAccessDenied + } + + if aclInfo.target != acl.Target_System && + p.basicChecker.Sticky(aclInfo.rule) && + !checkObjectRequestOwnerMatch(req) { + return errAccessDenied + } + + if !aclInfo.checkBearer && !aclInfo.checkExtended { + return nil + } + + actionParams := requestActionParams{ + eaclSrc: p.extACLSource, + request: req, + objHdrSrc: &requestObjHdrSrc{ + req: req, + ls: p.localStore, + }, + target: aclInfo.target, + } + + if aclInfo.checkBearer { + bearer := req.GetBearerToken() + + if err := p.bearerVerifier.verifyBearerToken(ctx, req.CID(), bearer); err != nil { + p.log.Warn("bearer token verification failure", + zap.String("error", err.Error()), + ) + + return errAccessDenied + } + + actionParams.eaclSrc = eaclFromBearer{ + bearer: bearer, + } + } + + if p.reqActionCalc.calculateRequestAction(ctx, actionParams) != acl.ActionAllow { + return errAccessDenied + } + + return nil +} + +func (t *targetFinder) Target(ctx context.Context, req serviceRequest) acl.Target { + ownerID, ownerKey, err := requestOwner(req) + if err != nil { + t.log.Warn("could not get request owner", + zap.String("error", err.Error()), + ) + + return acl.Target_Unknown + } else if ownerKey == nil { + t.log.Warn("signature with nil public key detected") + return acl.Target_Unknown + } + + // if request from container owner then return Target_User + isOwner, err := t.cnrOwnerChecker.IsContainerOwner(ctx, req.CID(), ownerID) + if err != nil { + t.log.Warn("can't check container owner", zap.String("err", err.Error())) + return acl.Target_Unknown + } else if isOwner { + return acl.Target_User + } + + ownerKeyBytes := crypto.MarshalPublicKey(ownerKey) + + // if request from inner ring then return Target_System + isIRKey, err := ir.IsInnerRingKey(t.irStorage, ownerKeyBytes) + if err != nil { + t.log.Warn("could not verify the key belongs to the node", zap.String("err", err.Error())) + return acl.Target_Unknown + } else if isIRKey { + return acl.Target_System + } + + // if request from current container node then return Target_System + cnr, err := t.cnrLister.ContainerNodesInfo(ctx, req.CID(), 0) + if err != nil { + t.log.Warn("can't get current container list", zap.String("err", err.Error())) + return acl.Target_Unknown + } + + for i := range cnr { + if bytes.Equal(cnr[i].PubKey, ownerKeyBytes) { + return acl.Target_System + } + } + + // if request from previous container node then return Target_System + cnr, err = t.cnrLister.ContainerNodesInfo(ctx, req.CID(), 1) + if err != nil { + t.log.Warn("can't get previous container list", zap.String("err", err.Error())) + return acl.Target_Unknown + } + + for i := range cnr { + if bytes.Equal(cnr[i].PubKey, ownerKeyBytes) { + return acl.Target_System + } + } + + // if none of the above return Target_Others + return acl.Target_Others +} + +func checkObjectRequestOwnerMatch(req serviceRequest) bool { + rt := req.Type() + + // ignore all request types except Put and Delete + if rt != object.RequestPut && rt != object.RequestDelete { + return true + } + + // get request owner + reqOwner, _, err := requestOwner(req) + if err != nil { + return false + } + + var payloadOwner OwnerID + + // get owner from request payload + if rt == object.RequestPut { + obj := req.(transport.PutInfo).GetHead() + if obj == nil { + return false + } + + payloadOwner = obj.GetSystemHeader().OwnerID + } else { + payloadOwner = req.(*object.DeleteRequest).OwnerID + } + + return reqOwner.Equal(payloadOwner) +} + +// FIXME: this solution only works with healthy key-to-owner conversion. +func requestOwner(req serviceRequest) (OwnerID, *ecdsa.PublicKey, error) { + // if session token exists => return its owner + if token := req.GetSessionToken(); token != nil { + return token.GetOwnerID(), crypto.UnmarshalPublicKey(token.GetOwnerKey()), nil + } + + signKeys := req.GetSignKeyPairs() + if len(signKeys) == 0 { + return OwnerID{}, nil, errMissingSignatures + } + + firstKey := signKeys[0].GetPublicKey() + if firstKey == nil { + return OwnerID{}, nil, crypto.ErrEmptyPublicKey + } + + owner, err := refs.NewOwnerID(firstKey) + + return owner, firstKey, err +} + +// HeadersOfType returns request or object headers. +func (s serviceRequestInfo) HeadersOfType(typ acl.HeaderType) ([]acl.Header, bool) { + switch typ { + default: + return nil, true + case acl.HdrTypeRequest: + return libacl.TypedHeaderSourceFromExtendedHeaders(s.req).HeadersOfType(typ) + case acl.HdrTypeObjSys, acl.HdrTypeObjUsr: + obj, ok := s.objHdrSrc.getHeaders() + if !ok { + return nil, false + } + + return libacl.TypedHeaderSourceFromObject(obj).HeadersOfType(typ) + } +} + +// Key returns a binary representation of sender public key. +func (s serviceRequestInfo) Key() []byte { + _, key, err := requestOwner(s.req) + if err != nil { + return nil + } + + return crypto.MarshalPublicKey(key) +} + +// TypeOf returns true of object request type corresponds to passed OperationType. +func (s serviceRequestInfo) TypeOf(opType acl.OperationType) bool { + switch s.req.Type() { + case object.RequestGet: + return opType == acl.OpTypeGet + case object.RequestPut: + return opType == acl.OpTypePut + case object.RequestHead: + return opType == acl.OpTypeHead + case object.RequestSearch: + return opType == acl.OpTypeSearch + case object.RequestDelete: + return opType == acl.OpTypeDelete + case object.RequestRange: + return opType == acl.OpTypeRange + case object.RequestRangeHash: + return opType == acl.OpTypeRangeHash + default: + return false + } +} + +// TargetOf return true if target field is equal to passed ACL target. +func (s serviceRequestInfo) TargetOf(target acl.Target) bool { + return s.target == target +} + +func (s requestObjHdrSrc) getHeaders() (*Object, bool) { + switch s.req.Type() { + case object.RequestSearch: + // object header filters is not supported in Search request now + return nil, true + case object.RequestPut: + // for Put we get object headers from request + return s.req.(transport.PutInfo).GetHead(), true + default: + tReq := &transportRequest{ + serviceRequest: s.req, + } + + // for other requests we get object headers from local storage + m, err := s.ls.Meta(tReq.GetAddress()) + if err == nil { + return m.GetObject(), true + } + + return nil, false + } +} + +type requestActionParams struct { + eaclSrc libacl.ExtendedACLSource + + request serviceRequest + + objHdrSrc objectHeadersSource + + target acl.Target +} + +func (s reqActionCalc) calculateRequestAction(ctx context.Context, p requestActionParams) acl.ExtendedACLAction { + // get EACL table + table, err := p.eaclSrc.GetExtendedACLTable(ctx, p.request.CID()) + if err != nil { + s.log.Warn("could not get extended acl of the container", + zap.Stringer("cid", p.request.CID()), + zap.String("error", err.Error()), + ) + + return acl.ActionUndefined + } + + // create RequestInfo instance + reqInfo := &serviceRequestInfo{ + target: p.target, + req: p.request, + objHdrSrc: p.objHdrSrc, + } + + // calculate ACL action + return s.extACLChecker.Action(table, reqInfo) +} + +func (s aclInfoReceiver) getACLInfo(ctx context.Context, req serviceRequest) (*aclInfo, error) { + rule, err := s.basicACLGetter.GetBasicACL(ctx, req.CID()) + if err != nil { + return nil, err + } + + isBearer, err := s.basicChecker.Bearer(rule, req.Type()) + if err != nil { + return nil, err + } + + // fetch target from the request + target := s.targetFinder.Target(ctx, req) + + return &aclInfo{ + rule: rule, + + checkExtended: target != acl.Target_System && s.basicChecker.Extended(rule), + + target: target, + + checkBearer: target != acl.Target_System && isBearer && req.GetBearerToken() != nil, + }, nil +} + +func (s eaclFromBearer) GetExtendedACLTable(ctx context.Context, cid CID) (acl.ExtendedACLTable, error) { + table := acl.WrapEACLTable(nil) + + if err := table.UnmarshalBinary(s.bearer.GetACLRules()); err != nil { + return nil, err + } + + return table, nil +} diff --git a/services/public/object/acl_test.go b/services/public/object/acl_test.go new file mode 100644 index 000000000..052791376 --- /dev/null +++ b/services/public/object/acl_test.go @@ -0,0 +1,512 @@ +package object + +import ( + "context" + "crypto/ecdsa" + "errors" + "testing" + + "github.com/nspcc-dev/neofs-api-go/acl" + "github.com/nspcc-dev/neofs-api-go/bootstrap" + "github.com/nspcc-dev/neofs-api-go/container" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-api-go/service" + crypto "github.com/nspcc-dev/neofs-crypto" + libacl "github.com/nspcc-dev/neofs-node/lib/acl" + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/nspcc-dev/neofs-node/lib/ir" + "github.com/nspcc-dev/neofs-node/lib/test" + "github.com/stretchr/testify/require" +) + +type ( + testACLEntity struct { + // Set of interfaces which testCommonEntity must implement, but some methods from those does not call. + serviceRequest + RequestTargeter + implementations.ACLHelper + implementations.ContainerNodesLister + implementations.ContainerOwnerChecker + acl.ExtendedACLTable + libacl.RequestInfo + + // Argument interceptor. Used for ascertain of correct parameter passage between components. + f func(...interface{}) + // Mocked result of any interface. + res interface{} + // Mocked error of any interface. + err error + } +) + +type testBasicChecker struct { + libacl.BasicChecker + + actionErr error + action bool + + sticky bool + + extended bool + + bearer bool +} + +func (t *testACLEntity) calculateRequestAction(context.Context, requestActionParams) acl.ExtendedACLAction { + return t.res.(acl.ExtendedACLAction) +} + +func (t *testACLEntity) buildRequestInfo(req serviceRequest, target acl.Target) (libacl.RequestInfo, error) { + if t.f != nil { + t.f(req, target) + } + + if t.err != nil { + return nil, t.err + } + + return t.res.(libacl.RequestInfo), nil +} + +func (t *testACLEntity) Action(table acl.ExtendedACLTable, req libacl.RequestInfo) acl.ExtendedACLAction { + if t.f != nil { + t.f(table, req) + } + + return t.res.(acl.ExtendedACLAction) +} + +func (t *testACLEntity) GetExtendedACLTable(_ context.Context, cid CID) (acl.ExtendedACLTable, error) { + if t.f != nil { + t.f(cid) + } + + if t.err != nil { + return nil, t.err + } + + return t.res.(acl.ExtendedACLTable), nil +} + +func (s *testBasicChecker) Extended(uint32) bool { + return s.extended +} + +func (s *testBasicChecker) Sticky(uint32) bool { + return s.sticky +} + +func (s *testBasicChecker) Bearer(uint32, object.RequestType) (bool, error) { + return s.bearer, nil +} + +func (s *testBasicChecker) Action(uint32, object.RequestType, acl.Target) (bool, error) { + return s.action, s.actionErr +} + +func (t *testACLEntity) GetBasicACL(context.Context, CID) (uint32, error) { + if t.err != nil { + return 0, t.err + } + + return t.res.(uint32), nil +} + +func (t *testACLEntity) Target(context.Context, serviceRequest) acl.Target { + return t.res.(acl.Target) +} + +func (t *testACLEntity) CID() CID { return CID{} } + +func (t *testACLEntity) Type() object.RequestType { return t.res.(object.RequestType) } + +func (t *testACLEntity) GetBearerToken() service.BearerToken { return nil } + +func (t *testACLEntity) GetOwner() (*ecdsa.PublicKey, error) { + if t.err != nil { + return nil, t.err + } + + return t.res.(*ecdsa.PublicKey), nil +} + +func (t testACLEntity) GetIRInfo(ir.GetInfoParams) (*ir.GetInfoResult, error) { + if t.err != nil { + return nil, t.err + } + + res := new(ir.GetInfoResult) + res.SetInfo(*t.res.(*ir.Info)) + + return res, nil +} + +func (t *testACLEntity) ContainerNodesInfo(ctx context.Context, cid CID, prev int) ([]bootstrap.NodeInfo, error) { + if t.err != nil { + return nil, t.err + } + + return t.res.([][]bootstrap.NodeInfo)[prev], nil +} + +func (t *testACLEntity) IsContainerOwner(_ context.Context, cid CID, owner OwnerID) (bool, error) { + if t.f != nil { + t.f(cid, owner) + } + if t.err != nil { + return false, t.err + } + + return t.res.(bool), nil +} + +func (t testACLEntity) GetSignKeyPairs() []service.SignKeyPair { + if t.res == nil { + return nil + } + return t.res.([]service.SignKeyPair) +} + +func TestPreprocessor(t *testing.T) { + ctx := context.TODO() + + t.Run("empty request", func(t *testing.T) { + require.PanicsWithValue(t, pmEmptyServiceRequest, func() { + _ = new(aclPreProcessor).preProcess(ctx, nil) + }) + }) + + t.Run("everything is okay", func(t *testing.T) { + rule := uint32(0x00000003) + // set F-bit + rule |= 1 << 28 + + checker := new(libacl.BasicACLChecker) + + preprocessor := aclPreProcessor{ + log: test.NewTestLogger(false), + aclInfoReceiver: aclInfoReceiver{ + basicACLGetter: &testACLEntity{res: rule}, + basicChecker: checker, + targetFinder: &testACLEntity{res: acl.Target_Others}, + }, + basicChecker: checker, + } + require.NoError(t, preprocessor.preProcess(ctx, &testACLEntity{res: object.RequestGet})) + + preprocessor.aclInfoReceiver.targetFinder = &testACLEntity{res: acl.Target_System} + require.Error(t, preprocessor.preProcess(ctx, &testACLEntity{res: object.RequestGet})) + preprocessor.aclInfoReceiver.targetFinder = &testACLEntity{res: acl.Target_User} + require.Error(t, preprocessor.preProcess(ctx, &testACLEntity{res: object.RequestGet})) + }) + + t.Run("can't fetch container", func(t *testing.T) { + preprocessor := aclPreProcessor{ + log: test.NewTestLogger(false), + aclInfoReceiver: aclInfoReceiver{ + basicACLGetter: &testACLEntity{err: container.ErrNotFound}, + targetFinder: &testACLEntity{res: acl.Target_Others}, + }, + } + require.Error(t, preprocessor.preProcess(ctx, &testACLEntity{res: object.RequestGet})) + + }) + + t.Run("sticky bit", func(t *testing.T) { + checker := &testBasicChecker{ + actionErr: nil, + action: true, + sticky: true, + } + + s := &aclPreProcessor{ + log: test.NewTestLogger(false), + aclInfoReceiver: aclInfoReceiver{ + basicACLGetter: &testACLEntity{ + res: uint32(0), + }, + basicChecker: checker, + targetFinder: &testACLEntity{ + res: acl.Target_User, + }, + }, + basicChecker: checker, + } + + ownerKey := &test.DecodeKey(0).PublicKey + + ownerID, err := refs.NewOwnerID(ownerKey) + require.NoError(t, err) + + okItems := []func() []serviceRequest{ + // Read requests + func() []serviceRequest { + return []serviceRequest{ + new(object.GetRequest), + new(object.HeadRequest), + new(object.SearchRequest), + new(GetRangeRequest), + new(object.GetRangeHashRequest), + } + }, + // PutRequest / DeleteRequest (w/o token) + func() []serviceRequest { + req := object.MakePutRequestHeader(&Object{ + SystemHeader: SystemHeader{ + OwnerID: ownerID, + }, + }) + req.AddSignKey(nil, ownerKey) + putReq := &putRequest{ + PutRequest: req, + } + + delReq := new(object.DeleteRequest) + delReq.OwnerID = ownerID + delReq.AddSignKey(nil, ownerKey) + + return []serviceRequest{putReq, delReq} + }, + // PutRequest / DeleteRequest (w/ token) + func() []serviceRequest { + token := new(service.Token) + token.SetOwnerID(ownerID) + token.SetOwnerKey(crypto.MarshalPublicKey(ownerKey)) + + req := object.MakePutRequestHeader(&Object{ + SystemHeader: SystemHeader{ + OwnerID: ownerID, + }, + }) + req.SetToken(token) + putReq := &putRequest{ + PutRequest: req, + } + + delReq := new(object.DeleteRequest) + delReq.OwnerID = ownerID + delReq.SetToken(token) + + return []serviceRequest{putReq, delReq} + }, + } + + failItems := []func() []serviceRequest{ + // PutRequest / DeleteRequest (w/o token and wrong owner) + func() []serviceRequest { + otherOwner := ownerID + otherOwner[0]++ + + req := object.MakePutRequestHeader(&Object{ + SystemHeader: SystemHeader{ + OwnerID: otherOwner, + }, + }) + req.AddSignKey(nil, ownerKey) + putReq := &putRequest{ + PutRequest: req, + } + + delReq := new(object.DeleteRequest) + delReq.OwnerID = otherOwner + delReq.AddSignKey(nil, ownerKey) + + return []serviceRequest{putReq, delReq} + }, + // PutRequest / DeleteRequest (w/ token w/ wrong owner) + func() []serviceRequest { + otherOwner := ownerID + otherOwner[0]++ + + token := new(service.Token) + token.SetOwnerID(ownerID) + token.SetOwnerKey(crypto.MarshalPublicKey(ownerKey)) + + req := object.MakePutRequestHeader(&Object{ + SystemHeader: SystemHeader{ + OwnerID: otherOwner, + }, + }) + req.SetToken(token) + putReq := &putRequest{ + PutRequest: req, + } + + delReq := new(object.DeleteRequest) + delReq.OwnerID = otherOwner + delReq.SetToken(token) + + return []serviceRequest{putReq, delReq} + }, + } + + for _, ok := range okItems { + for _, req := range ok() { + require.NoError(t, s.preProcess(ctx, req)) + } + } + + for _, fail := range failItems { + for _, req := range fail() { + require.Error(t, s.preProcess(ctx, req)) + } + } + }) + + t.Run("extended ACL", func(t *testing.T) { + target := acl.Target_Others + + req := &testACLEntity{ + res: object.RequestGet, + } + + actCalc := new(testACLEntity) + + checker := &testBasicChecker{ + action: true, + extended: true, + } + + s := &aclPreProcessor{ + log: test.NewTestLogger(false), + aclInfoReceiver: aclInfoReceiver{ + basicACLGetter: &testACLEntity{ + res: uint32(1), + }, + basicChecker: checker, + targetFinder: &testACLEntity{ + res: target, + }, + }, + basicChecker: checker, + + reqActionCalc: actCalc, + } + + // force to return non-ActionAllow + actCalc.res = acl.ActionAllow + 1 + require.EqualError(t, s.preProcess(ctx, req), errAccessDenied.Error()) + + // force to return ActionAllow + actCalc.res = acl.ActionAllow + require.NoError(t, s.preProcess(ctx, req)) + }) +} + +func TestTargetFinder(t *testing.T) { + ctx := context.TODO() + irKey := test.DecodeKey(2) + containerKey := test.DecodeKey(3) + prevContainerKey := test.DecodeKey(4) + + irInfo := new(ir.Info) + irNode := ir.Node{} + irNode.SetKey(crypto.MarshalPublicKey(&irKey.PublicKey)) + irInfo.SetNodes([]ir.Node{irNode}) + + finder := &targetFinder{ + log: test.NewTestLogger(false), + irStorage: &testACLEntity{ + res: irInfo, + }, + cnrLister: &testACLEntity{res: [][]bootstrap.NodeInfo{ + {{PubKey: crypto.MarshalPublicKey(&containerKey.PublicKey)}}, + {{PubKey: crypto.MarshalPublicKey(&prevContainerKey.PublicKey)}}, + }}, + } + + t.Run("trusted node", func(t *testing.T) { + + pk := &test.DecodeKey(0).PublicKey + + ownerKey := &test.DecodeKey(1).PublicKey + owner, err := refs.NewOwnerID(ownerKey) + require.NoError(t, err) + + token := new(service.Token) + token.SetSessionKey(crypto.MarshalPublicKey(pk)) + token.SetOwnerKey(crypto.MarshalPublicKey(ownerKey)) + token.SetOwnerID(owner) + + req := new(object.SearchRequest) + req.ContainerID = CID{1, 2, 3} + req.SetToken(token) + req.AddSignKey(nil, pk) + + finder.cnrOwnerChecker = &testACLEntity{ + f: func(items ...interface{}) { + require.Equal(t, req.CID(), items[0]) + require.Equal(t, owner, items[1]) + }, + res: true, + } + + require.Equal(t, acl.Target_User, finder.Target(ctx, req)) + }) + + t.Run("container owner", func(t *testing.T) { + finder.cnrOwnerChecker = &testACLEntity{res: true} + + req := new(object.SearchRequest) + req.AddSignKey(nil, &test.DecodeKey(0).PublicKey) + + require.Equal(t, acl.Target_User, finder.Target(ctx, req)) + }) + + t.Run("system owner", func(t *testing.T) { + finder.cnrOwnerChecker = &testACLEntity{res: false} + + req := new(object.SearchRequest) + req.AddSignKey(nil, &irKey.PublicKey) + require.Equal(t, acl.Target_System, finder.Target(ctx, req)) + + req = new(object.SearchRequest) + req.AddSignKey(nil, &containerKey.PublicKey) + require.Equal(t, acl.Target_System, finder.Target(ctx, req)) + + req = new(object.SearchRequest) + req.AddSignKey(nil, &prevContainerKey.PublicKey) + require.Equal(t, acl.Target_System, finder.Target(ctx, req)) + }) + + t.Run("other owner", func(t *testing.T) { + finder.cnrOwnerChecker = &testACLEntity{res: false} + + req := new(object.SearchRequest) + req.AddSignKey(nil, &test.DecodeKey(0).PublicKey) + require.Equal(t, acl.Target_Others, finder.Target(ctx, req)) + }) + + t.Run("can't fetch request owner", func(t *testing.T) { + req := new(object.SearchRequest) + + require.Equal(t, acl.Target_Unknown, finder.Target(ctx, req)) + }) + + t.Run("can't fetch container", func(t *testing.T) { + finder.cnrOwnerChecker = &testACLEntity{err: container.ErrNotFound} + + req := new(object.SearchRequest) + req.AddSignKey(nil, &test.DecodeKey(0).PublicKey) + require.Equal(t, acl.Target_Unknown, finder.Target(ctx, req)) + }) + + t.Run("can't fetch ir list", func(t *testing.T) { + finder.cnrOwnerChecker = &testACLEntity{res: false} + finder.irStorage = &testACLEntity{err: errors.New("blockchain is busy")} + + req := new(object.SearchRequest) + req.AddSignKey(nil, &test.DecodeKey(0).PublicKey) + require.Equal(t, acl.Target_Unknown, finder.Target(ctx, req)) + }) + + t.Run("can't fetch container list", func(t *testing.T) { + finder.cnrOwnerChecker = &testACLEntity{res: false} + finder.cnrLister = &testACLEntity{err: container.ErrNotFound} + + req := new(object.SearchRequest) + req.AddSignKey(nil, &test.DecodeKey(0).PublicKey) + require.Equal(t, acl.Target_Unknown, finder.Target(ctx, req)) + }) +} diff --git a/services/public/object/bearer.go b/services/public/object/bearer.go new file mode 100644 index 000000000..ec01dc584 --- /dev/null +++ b/services/public/object/bearer.go @@ -0,0 +1,72 @@ +package object + +import ( + "context" + + "github.com/nspcc-dev/neofs-api-go/service" + crypto "github.com/nspcc-dev/neofs-crypto" + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/pkg/errors" +) + +type bearerTokenVerifier interface { + verifyBearerToken(context.Context, CID, service.BearerToken) error +} + +type complexBearerVerifier struct { + items []bearerTokenVerifier +} + +type bearerActualityVerifier struct { + epochRecv EpochReceiver +} + +type bearerOwnershipVerifier struct { + cnrOwnerChecker implementations.ContainerOwnerChecker +} + +type bearerSignatureVerifier struct{} + +var errWrongBearerOwner = errors.New("bearer author is not a container owner") + +func (s complexBearerVerifier) verifyBearerToken(ctx context.Context, cid CID, token service.BearerToken) error { + for i := range s.items { + if err := s.items[i].verifyBearerToken(ctx, cid, token); err != nil { + return err + } + } + + return nil +} + +func (s bearerActualityVerifier) verifyBearerToken(_ context.Context, _ CID, token service.BearerToken) error { + local := s.epochRecv.Epoch() + validUntil := token.ExpirationEpoch() + + if local > validUntil { + return errors.Errorf("bearer token is expired (local %d, valid until %d)", + local, + validUntil, + ) + } + + return nil +} + +func (s bearerOwnershipVerifier) verifyBearerToken(ctx context.Context, cid CID, token service.BearerToken) error { + isOwner, err := s.cnrOwnerChecker.IsContainerOwner(ctx, cid, token.GetOwnerID()) + if err != nil { + return err + } else if !isOwner { + return errWrongBearerOwner + } + + return nil +} + +func (s bearerSignatureVerifier) verifyBearerToken(_ context.Context, _ CID, token service.BearerToken) error { + return service.VerifySignatureWithKey( + crypto.UnmarshalPublicKey(token.GetOwnerKey()), + service.NewVerifiedBearerToken(token), + ) +} diff --git a/services/public/object/capacity.go b/services/public/object/capacity.go new file mode 100644 index 000000000..d0cc58c82 --- /dev/null +++ b/services/public/object/capacity.go @@ -0,0 +1,19 @@ +package object + +func (s *objectService) RelativeAvailableCap() float64 { + diff := float64(s.ls.Size()) / float64(s.storageCap) + if 1-diff < 0 { + return 0 + } + + return 1 - diff +} + +func (s *objectService) AbsoluteAvailableCap() uint64 { + localSize := uint64(s.ls.Size()) + if localSize > s.storageCap { + return 0 + } + + return s.storageCap - localSize +} diff --git a/services/public/object/capacity_test.go b/services/public/object/capacity_test.go new file mode 100644 index 000000000..deb34afb3 --- /dev/null +++ b/services/public/object/capacity_test.go @@ -0,0 +1,75 @@ +package object + +import ( + "testing" + + "github.com/nspcc-dev/neofs-node/lib/localstore" + "github.com/stretchr/testify/require" +) + +type ( + // Entity for mocking interfaces. + // Implementation of any interface intercepts arguments via f (if not nil). + // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. + testCapacityEntity struct { + // Set of interfaces which entity must implement, but some methods from those does not call. + localstore.Localstore + + // Argument interceptor. Used for ascertain of correct parameter passage between components. + f func(...interface{}) + // Mocked result of any interface. + res interface{} + // Mocked error of any interface. + err error + } +) + +var _ localstore.Localstore = (*testCapacityEntity)(nil) + +func (s *testCapacityEntity) Size() int64 { return s.res.(int64) } + +func TestObjectService_RelativeAvailableCap(t *testing.T) { + localStoreSize := int64(100) + + t.Run("oversize", func(t *testing.T) { + s := objectService{ + ls: &testCapacityEntity{res: localStoreSize}, + storageCap: uint64(localStoreSize - 1), + } + + require.Zero(t, s.RelativeAvailableCap()) + }) + + t.Run("correct calculation", func(t *testing.T) { + s := objectService{ + ls: &testCapacityEntity{res: localStoreSize}, + storageCap: 13 * uint64(localStoreSize), + } + + require.Equal(t, 1-float64(localStoreSize)/float64(s.storageCap), s.RelativeAvailableCap()) + }) +} + +func TestObjectService_AbsoluteAvailableCap(t *testing.T) { + localStoreSize := int64(100) + + t.Run("free space", func(t *testing.T) { + s := objectService{ + ls: &testCapacityEntity{res: localStoreSize}, + storageCap: uint64(localStoreSize), + } + + require.Zero(t, s.AbsoluteAvailableCap()) + s.storageCap-- + require.Zero(t, s.AbsoluteAvailableCap()) + }) + + t.Run("correct calculation", func(t *testing.T) { + s := objectService{ + ls: &testCapacityEntity{res: localStoreSize}, + storageCap: uint64(localStoreSize) + 12, + } + + require.Equal(t, s.storageCap-uint64(localStoreSize), s.AbsoluteAvailableCap()) + }) +} diff --git a/services/public/object/delete.go b/services/public/object/delete.go new file mode 100644 index 000000000..8e8c5e2a5 --- /dev/null +++ b/services/public/object/delete.go @@ -0,0 +1,285 @@ +package object + +import ( + "context" + "crypto/sha256" + "time" + + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-api-go/session" + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/nspcc-dev/neofs-node/lib/transformer" + "github.com/nspcc-dev/neofs-node/lib/transport" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +type ( + objectRemover interface { + delete(context.Context, deleteInfo) error + } + + coreObjRemover struct { + delPrep deletePreparer + straightRem objectRemover + tokenStore session.PrivateTokenStore + + // Set of potential deletePreparer errors that won't be converted into errDeletePrepare + mErr map[error]struct{} + + log *zap.Logger + } + + straightObjRemover struct { + tombCreator tombstoneCreator + objStorer objectStorer + } + + tombstoneCreator interface { + createTombstone(context.Context, deleteInfo) *Object + } + + coreTombCreator struct{} + + deletePreparer interface { + prepare(context.Context, deleteInfo) ([]deleteInfo, error) + } + + coreDelPreparer struct { + timeout time.Duration + childLister objectChildrenLister + } + + deleteInfo interface { + transport.AddressInfo + GetOwnerID() OwnerID + } + + rawDeleteInfo struct { + rawAddrInfo + ownerID OwnerID + } +) + +const emRemovePart = "could not remove object part #%d of #%d" + +var ( + _ tombstoneCreator = (*coreTombCreator)(nil) + _ deleteInfo = (*rawDeleteInfo)(nil) + _ deletePreparer = (*coreDelPreparer)(nil) + _ objectRemover = (*straightObjRemover)(nil) + _ objectRemover = (*coreObjRemover)(nil) + _ deleteInfo = (*transportRequest)(nil) + + checksumOfEmptyPayload = sha256.Sum256([]byte{}) +) + +func (s *objectService) Delete(ctx context.Context, req *object.DeleteRequest) (res *object.DeleteResponse, err error) { + defer func() { + if r := recover(); r != nil { + s.log.Error(panicLogMsg, + zap.Stringer("request", object.RequestDelete), + zap.Any("reason", r), + ) + + err = errServerPanic + } + + err = s.statusCalculator.make(requestError{ + t: object.RequestDelete, + e: err, + }) + }() + + if _, err = s.requestHandler.handleRequest(ctx, handleRequestParams{ + request: req, + executor: s, + }); err != nil { + return + } + + res = makeDeleteResponse() + err = s.respPreparer.prepareResponse(ctx, req, res) + + return +} + +func (s *coreObjRemover) delete(ctx context.Context, dInfo deleteInfo) error { + token := dInfo.GetSessionToken() + if token == nil { + return errNilToken + } + + key := session.PrivateTokenKey{} + key.SetOwnerID(dInfo.GetOwnerID()) + key.SetTokenID(token.GetID()) + + pToken, err := s.tokenStore.Fetch(key) + if err != nil { + return &detailedError{ + error: errTokenRetrieval, + d: privateTokenRecvDetails(token.GetID(), token.GetOwnerID()), + } + } + + deleteList, err := s.delPrep.prepare(ctx, dInfo) + if err != nil { + if _, ok := s.mErr[errors.Cause(err)]; !ok { + s.log.Error("delete info preparation failure", + zap.String("error", err.Error()), + ) + + err = errDeletePrepare + } + + return err + } + + ctx = contextWithValues(ctx, + transformer.PrivateSessionToken, pToken, + transformer.PublicSessionToken, token, + implementations.BearerToken, dInfo.GetBearerToken(), + implementations.ExtendedHeaders, dInfo.ExtendedHeaders(), + ) + + for i := range deleteList { + if err := s.straightRem.delete(ctx, deleteList[i]); err != nil { + return errors.Wrapf(err, emRemovePart, i+1, len(deleteList)) + } + } + + return nil +} + +func (s *coreDelPreparer) prepare(ctx context.Context, src deleteInfo) ([]deleteInfo, error) { + var ( + ownerID = src.GetOwnerID() + token = src.GetSessionToken() + addr = src.GetAddress() + bearer = src.GetBearerToken() + extHdrs = src.ExtendedHeaders() + ) + + dInfo := newRawDeleteInfo() + dInfo.setOwnerID(ownerID) + dInfo.setAddress(addr) + dInfo.setTTL(service.NonForwardingTTL) + dInfo.setSessionToken(token) + dInfo.setBearerToken(bearer) + dInfo.setExtendedHeaders(extHdrs) + dInfo.setTimeout(s.timeout) + + ctx = contextWithValues(ctx, + transformer.PublicSessionToken, src.GetSessionToken(), + implementations.BearerToken, bearer, + implementations.ExtendedHeaders, extHdrs, + ) + + children := s.childLister.children(ctx, addr) + + res := make([]deleteInfo, 0, len(children)+1) + + res = append(res, dInfo) + + for i := range children { + dInfo = newRawDeleteInfo() + dInfo.setOwnerID(ownerID) + dInfo.setAddress(Address{ + ObjectID: children[i], + CID: addr.CID, + }) + dInfo.setTTL(service.NonForwardingTTL) + dInfo.setSessionToken(token) + dInfo.setBearerToken(bearer) + dInfo.setExtendedHeaders(extHdrs) + dInfo.setTimeout(s.timeout) + + res = append(res, dInfo) + } + + return res, nil +} + +func (s *straightObjRemover) delete(ctx context.Context, dInfo deleteInfo) error { + putInfo := newRawPutInfo() + putInfo.setHead( + s.tombCreator.createTombstone(ctx, dInfo), + ) + putInfo.setSessionToken(dInfo.GetSessionToken()) + putInfo.setBearerToken(dInfo.GetBearerToken()) + putInfo.setExtendedHeaders(dInfo.ExtendedHeaders()) + putInfo.setTTL(dInfo.GetTTL()) + putInfo.setTimeout(dInfo.GetTimeout()) + + _, err := s.objStorer.putObject(ctx, putInfo) + + return err +} + +func (s *coreTombCreator) createTombstone(ctx context.Context, dInfo deleteInfo) *Object { + addr := dInfo.GetAddress() + obj := &Object{ + SystemHeader: SystemHeader{ + ID: addr.ObjectID, + CID: addr.CID, + OwnerID: dInfo.GetOwnerID(), + }, + Headers: []Header{ + { + Value: &object.Header_Tombstone{ + Tombstone: new(object.Tombstone), + }, + }, + { + Value: &object.Header_PayloadChecksum{ + PayloadChecksum: checksumOfEmptyPayload[:], + }, + }, + }, + } + + return obj +} + +func (s *rawDeleteInfo) GetAddress() Address { + return s.addr +} + +func (s *rawDeleteInfo) setAddress(addr Address) { + s.addr = addr +} + +func (s *rawDeleteInfo) GetOwnerID() OwnerID { + return s.ownerID +} + +func (s *rawDeleteInfo) setOwnerID(id OwnerID) { + s.ownerID = id +} + +func (s *rawDeleteInfo) setAddrInfo(v *rawAddrInfo) { + s.rawAddrInfo = *v + s.setType(object.RequestDelete) +} + +func newRawDeleteInfo() *rawDeleteInfo { + res := new(rawDeleteInfo) + + res.setAddrInfo(newRawAddressInfo()) + + return res +} + +func (s *transportRequest) GetToken() *session.Token { + return s.serviceRequest.(*object.DeleteRequest).GetToken() +} +func (s *transportRequest) GetHead() *Object { + return &Object{SystemHeader: SystemHeader{ + ID: s.serviceRequest.(*object.DeleteRequest).Address.ObjectID, + }} +} + +func (s *transportRequest) GetOwnerID() OwnerID { + return s.serviceRequest.(*object.DeleteRequest).OwnerID +} diff --git a/services/public/object/delete_test.go b/services/public/object/delete_test.go new file mode 100644 index 000000000..c954a7c35 --- /dev/null +++ b/services/public/object/delete_test.go @@ -0,0 +1,449 @@ +package object + +import ( + "context" + "testing" + "time" + + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-api-go/session" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/rand" + "github.com/nspcc-dev/neofs-node/lib/transformer" + "github.com/nspcc-dev/neofs-node/lib/transport" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +type ( + // Entity for mocking interfaces. + // Implementation of any interface intercepts arguments via f (if not nil). + // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. + testDeleteEntity struct { + // Set of interfaces which testDeleteEntity must implement, but some methods from those does not call. + session.PrivateTokenStore + + // Argument interceptor. Used for ascertain of correct parameter passage between components. + f func(...interface{}) + // Mocked result of any interface. + res interface{} + // Mocked error of any interface. + err error + } +) + +var ( + _ EpochReceiver = (*testDeleteEntity)(nil) + _ objectStorer = (*testDeleteEntity)(nil) + _ tombstoneCreator = (*testDeleteEntity)(nil) + _ objectChildrenLister = (*testDeleteEntity)(nil) + _ objectRemover = (*testDeleteEntity)(nil) + _ requestHandler = (*testDeleteEntity)(nil) + _ deletePreparer = (*testDeleteEntity)(nil) + _ responsePreparer = (*testDeleteEntity)(nil) +) + +func (s *testDeleteEntity) verify(context.Context, *session.Token, *Object) error { + return nil +} + +func (s *testDeleteEntity) Fetch(id session.PrivateTokenKey) (session.PrivateToken, error) { + if s.f != nil { + s.f(id) + } + if s.err != nil { + return nil, s.err + } + return s.res.(session.PrivateToken), nil +} + +func (s *testDeleteEntity) prepareResponse(_ context.Context, req serviceRequest, resp serviceResponse) error { + if s.f != nil { + s.f(req, resp) + } + return s.err +} + +func (s *testDeleteEntity) Epoch() uint64 { return s.res.(uint64) } + +func (s *testDeleteEntity) putObject(_ context.Context, p transport.PutInfo) (*Address, error) { + if s.f != nil { + s.f(p) + } + if s.err != nil { + return nil, s.err + } + return s.res.(*Address), nil +} + +func (s *testDeleteEntity) createTombstone(_ context.Context, p deleteInfo) *Object { + if s.f != nil { + s.f(p) + } + return s.res.(*Object) +} + +func (s *testDeleteEntity) children(ctx context.Context, addr Address) []ID { + if s.f != nil { + s.f(addr, ctx) + } + return s.res.([]ID) +} + +func (s *testDeleteEntity) delete(ctx context.Context, p deleteInfo) error { + if s.f != nil { + s.f(p, ctx) + } + return s.err +} + +func (s *testDeleteEntity) prepare(_ context.Context, p deleteInfo) ([]deleteInfo, error) { + if s.f != nil { + s.f(p) + } + if s.err != nil { + return nil, s.err + } + return s.res.([]deleteInfo), nil +} + +func (s *testDeleteEntity) handleRequest(_ context.Context, p handleRequestParams) (interface{}, error) { + if s.f != nil { + s.f(p) + } + return s.res, s.err +} + +func Test_objectService_Delete(t *testing.T) { + ctx := context.TODO() + req := &object.DeleteRequest{Address: testObjectAddress(t)} + + t.Run("handler error", func(t *testing.T) { + rhErr := internal.Error("test error for request handler") + + s := &objectService{ + statusCalculator: newStatusCalculator(), + } + + s.requestHandler = &testDeleteEntity{ + f: func(items ...interface{}) { + t.Run("correct request handler params", func(t *testing.T) { + p := items[0].(handleRequestParams) + require.Equal(t, req, p.request) + require.Equal(t, s, p.executor) + }) + }, + err: rhErr, // force requestHandler to return rhErr + } + + res, err := s.Delete(ctx, req) + // ascertain that error returns as expected + require.EqualError(t, err, rhErr.Error()) + require.Nil(t, res) + }) + + t.Run("correct result", func(t *testing.T) { + s := objectService{ + requestHandler: new(testDeleteEntity), + respPreparer: &testDeleteEntity{res: new(object.DeleteResponse)}, + + statusCalculator: newStatusCalculator(), + } + + res, err := s.Delete(ctx, req) + require.NoError(t, err) + require.Equal(t, new(object.DeleteResponse), res) + }) +} + +func Test_coreObjRemover_delete(t *testing.T) { + ctx := context.TODO() + pToken, err := session.NewPrivateToken(0) + require.NoError(t, err) + + addr := testObjectAddress(t) + + token := new(service.Token) + token.SetAddress(addr) + + req := newRawDeleteInfo() + req.setAddress(addr) + req.setSessionToken(token) + + t.Run("nil token", func(t *testing.T) { + s := new(coreObjRemover) + + req := newRawDeleteInfo() + require.Nil(t, req.GetSessionToken()) + + require.EqualError(t, s.delete(ctx, req), errNilToken.Error()) + }) + + t.Run("prepare error", func(t *testing.T) { + dpErr := internal.Error("test error for delete preparer") + + dp := &testDeleteEntity{ + f: func(items ...interface{}) { + t.Run("correct delete preparer params", func(t *testing.T) { + require.Equal(t, req, items[0]) + }) + }, + err: dpErr, // force deletePreparer to return dpErr + } + + s := &coreObjRemover{ + delPrep: dp, + tokenStore: &testDeleteEntity{res: pToken}, + mErr: map[error]struct{}{ + dpErr: {}, + }, + log: zap.L(), + } + + // ascertain that error returns as expected + require.EqualError(t, s.delete(ctx, req), dpErr.Error()) + + dp.err = internal.Error("some other error") + + // ascertain that error returns as expected + require.EqualError(t, s.delete(ctx, req), errDeletePrepare.Error()) + }) + + t.Run("straight remover error", func(t *testing.T) { + dInfo := newRawDeleteInfo() + dInfo.setAddress(addr) + dInfo.setSessionToken(token) + + list := []deleteInfo{ + dInfo, + } + + srErr := internal.Error("test error for straight remover") + + s := &coreObjRemover{ + delPrep: &testDeleteEntity{ + res: list, // force deletePreparer to return list + }, + straightRem: &testDeleteEntity{ + f: func(items ...interface{}) { + t.Run("correct straight remover params", func(t *testing.T) { + require.Equal(t, list[0], items[0]) + + ctx := items[1].(context.Context) + + require.Equal(t, + dInfo.GetSessionToken(), + ctx.Value(transformer.PublicSessionToken), + ) + + require.Equal(t, + pToken, + ctx.Value(transformer.PrivateSessionToken), + ) + }) + }, + err: srErr, // force objectRemover to return srErr + }, + tokenStore: &testDeleteEntity{res: pToken}, + } + + // ascertain that error returns as expected + require.EqualError(t, s.delete(ctx, req), errors.Wrapf(srErr, emRemovePart, 1, 1).Error()) + }) + + t.Run("success", func(t *testing.T) { + dInfo := newRawDeleteInfo() + dInfo.setAddress(addr) + dInfo.setSessionToken(token) + + list := []deleteInfo{ + dInfo, + } + + s := &coreObjRemover{ + delPrep: &testDeleteEntity{ + res: list, // force deletePreparer to return list + }, + straightRem: &testDeleteEntity{ + err: nil, // force objectRemover to return empty error + }, + tokenStore: &testDeleteEntity{res: pToken}, + } + + // ascertain that nil error returns + require.NoError(t, s.delete(ctx, req)) + }) +} + +func Test_coreDelPreparer_prepare(t *testing.T) { + var ( + ctx = context.TODO() + ownerID = OwnerID{1, 2, 3} + addr = testObjectAddress(t) + timeout = 5 * time.Second + token = new(service.Token) + childCount = 10 + children = make([]ID, 0, childCount) + ) + + req := newRawDeleteInfo() + req.setAddress(addr) + req.setSessionToken(token) + req.setOwnerID(ownerID) + + token.SetID(session.TokenID{1, 2, 3}) + + for i := 0; i < childCount; i++ { + children = append(children, testObjectAddress(t).ObjectID) + } + + s := &coreDelPreparer{ + timeout: timeout, + childLister: &testDeleteEntity{ + f: func(items ...interface{}) { + t.Run("correct children lister params", func(t *testing.T) { + require.Equal(t, addr, items[0]) + require.Equal(t, + token, + items[1].(context.Context).Value(transformer.PublicSessionToken), + ) + }) + }, + res: children, + }, + } + + res, err := s.prepare(ctx, req) + require.NoError(t, err) + + require.Len(t, res, childCount+1) + + for i := range res { + require.Equal(t, timeout, res[i].GetTimeout()) + require.Equal(t, token, res[i].GetSessionToken()) + require.Equal(t, uint32(service.NonForwardingTTL), res[i].GetTTL()) + + a := res[i].GetAddress() + require.Equal(t, addr.CID, a.CID) + if i > 0 { + require.Equal(t, children[i-1], a.ObjectID) + } else { + require.Equal(t, addr.ObjectID, a.ObjectID) + } + } +} + +func Test_straightObjRemover_delete(t *testing.T) { + var ( + ctx = context.TODO() + addr = testObjectAddress(t) + ttl = uint32(10) + timeout = 5 * time.Second + token = new(service.Token) + obj = &Object{SystemHeader: SystemHeader{ID: addr.ObjectID, CID: addr.CID}} + ) + + token.SetID(session.TokenID{1, 2, 3}) + + req := newRawDeleteInfo() + req.setTTL(ttl) + req.setTimeout(timeout) + req.setAddress(testObjectAddress(t)) + req.setSessionToken(token) + + t.Run("correct result", func(t *testing.T) { + osErr := internal.Error("test error for object storer") + + s := &straightObjRemover{ + tombCreator: &testDeleteEntity{ + f: func(items ...interface{}) { + t.Run("correct tombstone creator params", func(t *testing.T) { + require.Equal(t, req, items[0]) + }) + }, + res: obj, + }, + objStorer: &testDeleteEntity{ + f: func(items ...interface{}) { + t.Run("correct object storer params", func(t *testing.T) { + p := items[0].(transport.PutInfo) + require.Equal(t, timeout, p.GetTimeout()) + require.Equal(t, ttl, p.GetTTL()) + require.Equal(t, obj, p.GetHead()) + require.Equal(t, token, p.GetSessionToken()) + }) + }, + err: osErr, // force objectStorer to return osErr + }, + } + + // ascertain that error returns as expected + require.EqualError(t, s.delete(ctx, req), osErr.Error()) + }) +} + +func Test_coreTombCreator_createTombstone(t *testing.T) { + var ( + ctx = context.TODO() + addr = testObjectAddress(t) + ownerID = OwnerID{1, 2, 3} + ) + + req := newRawDeleteInfo() + req.setAddress(addr) + req.setOwnerID(ownerID) + + t.Run("correct result", func(t *testing.T) { + s := new(coreTombCreator) + + res := s.createTombstone(ctx, req) + require.Equal(t, addr.CID, res.SystemHeader.CID) + require.Equal(t, addr.ObjectID, res.SystemHeader.ID) + require.Equal(t, ownerID, res.SystemHeader.OwnerID) + + _, tsHdr := res.LastHeader(object.HeaderType(object.TombstoneHdr)) + require.NotNil(t, tsHdr) + require.Equal(t, new(object.Tombstone), tsHdr.Value.(*object.Header_Tombstone).Tombstone) + }) +} + +func Test_deleteInfo(t *testing.T) { + t.Run("address", func(t *testing.T) { + addr := testObjectAddress(t) + + req := newRawDeleteInfo() + req.setAddress(addr) + + require.Equal(t, addr, req.GetAddress()) + }) + + t.Run("owner ID", func(t *testing.T) { + ownerID := OwnerID{} + _, err := rand.Read(ownerID[:]) + require.NoError(t, err) + + req := newRawDeleteInfo() + req.setOwnerID(ownerID) + require.Equal(t, ownerID, req.GetOwnerID()) + + tReq := &transportRequest{serviceRequest: &object.DeleteRequest{OwnerID: ownerID}} + require.Equal(t, ownerID, tReq.GetOwnerID()) + }) + + t.Run("token", func(t *testing.T) { + token := new(session.Token) + _, err := rand.Read(token.ID[:]) + require.NoError(t, err) + + req := newRawDeleteInfo() + req.setSessionToken(token) + require.Equal(t, token, req.GetSessionToken()) + + dReq := new(object.DeleteRequest) + dReq.SetToken(token) + tReq := &transportRequest{serviceRequest: dReq} + require.Equal(t, token, tReq.GetSessionToken()) + }) +} diff --git a/services/public/object/execution.go b/services/public/object/execution.go new file mode 100644 index 000000000..a8880930a --- /dev/null +++ b/services/public/object/execution.go @@ -0,0 +1,471 @@ +package object + +import ( + "bytes" + "context" + + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-api-go/container" + "github.com/nspcc-dev/neofs-api-go/hash" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/nspcc-dev/neofs-node/lib/localstore" + "github.com/nspcc-dev/neofs-node/lib/placement" + "github.com/nspcc-dev/neofs-node/lib/transport" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +type ( + operationExecutor interface { + executeOperation(context.Context, transport.MetaInfo, responseItemHandler) error + } + + coreOperationExecutor struct { + pre executionParamsComputer + fin operationFinalizer + loc operationExecutor + } + + operationFinalizer interface { + completeExecution(context.Context, operationParams) error + } + + computableParams struct { + addr Address + stopCount int + allowPartialResult bool + tryPreviousNetMap bool + selfForward bool + maxRecycleCount int + reqType object.RequestType + } + + responseItemHandler interface { + handleItem(interface{}) + } + + operationParams struct { + computableParams + metaInfo transport.MetaInfo + itemHandler responseItemHandler + } + + coreOperationFinalizer struct { + curPlacementBuilder placementBuilder + prevPlacementBuilder placementBuilder + interceptorPreparer interceptorPreparer + workerPool WorkerPool + traverseExec implementations.ContainerTraverseExecutor + resLogger resultLogger + log *zap.Logger + } + + localFullObjectReceiver interface { + getObject(context.Context, Address) (*Object, error) + } + + localHeadReceiver interface { + headObject(context.Context, Address) (*Object, error) + } + + localObjectStorer interface { + putObject(context.Context, *Object) error + } + + localQueryImposer interface { + imposeQuery(context.Context, CID, []byte, int) ([]Address, error) + } + + localRangeReader interface { + getRange(context.Context, Address, Range) ([]byte, error) + } + + localRangeHasher interface { + getHashes(context.Context, Address, []Range, []byte) ([]Hash, error) + } + + localStoreExecutor struct { + salitor Salitor + epochRecv EpochReceiver + localStore localstore.Localstore + } + + localOperationExecutor struct { + objRecv localFullObjectReceiver + headRecv localHeadReceiver + objStore localObjectStorer + queryImp localQueryImposer + rngReader localRangeReader + rngHasher localRangeHasher + } + + coreHandler struct { + traverser containerTraverser + itemHandler responseItemHandler + resLogger resultLogger + reqType object.RequestType + } + + executionParamsComputer interface { + computeParams(*computableParams, transport.MetaInfo) + } + + coreExecParamsComp struct{} + + resultTracker interface { + trackResult(context.Context, resultItems) + } + + interceptorPreparer interface { + prepareInterceptor(interceptorItems) (func(context.Context, multiaddr.Multiaddr) bool, error) + } + + interceptorItems struct { + selfForward bool + handler transport.ResultHandler + metaInfo transport.MetaInfo + itemHandler responseItemHandler + } + + coreInterceptorPreparer struct { + localExec operationExecutor + addressStore implementations.AddressStore + } + + resultItems struct { + requestType object.RequestType + node multiaddr.Multiaddr + satisfactory bool + } + + idleResultTracker struct { + } + + resultLogger interface { + logErr(object.RequestType, multiaddr.Multiaddr, error) + } + + coreResultLogger struct { + mLog map[object.RequestType]struct{} + log *zap.Logger + } +) + +const ( + errIncompleteOperation = internal.Error("operation is not completed") + + emRangeReadFail = "could not read %d range data" +) + +var ( + _ resultTracker = (*idleResultTracker)(nil) + _ executionParamsComputer = (*coreExecParamsComp)(nil) + _ operationFinalizer = (*coreOperationFinalizer)(nil) + _ operationExecutor = (*localOperationExecutor)(nil) + _ operationExecutor = (*coreOperationExecutor)(nil) + _ transport.ResultHandler = (*coreHandler)(nil) + _ localFullObjectReceiver = (*localStoreExecutor)(nil) + _ localHeadReceiver = (*localStoreExecutor)(nil) + _ localObjectStorer = (*localStoreExecutor)(nil) + _ localRangeReader = (*localStoreExecutor)(nil) + _ localRangeHasher = (*localStoreExecutor)(nil) + _ resultLogger = (*coreResultLogger)(nil) +) + +func (s *coreExecParamsComp) computeParams(p *computableParams, req transport.MetaInfo) { + switch p.reqType = req.Type(); p.reqType { + case object.RequestPut: + if req.GetTTL() < service.NonForwardingTTL { + p.stopCount = 1 + } else { + p.stopCount = int(req.(transport.PutInfo).CopiesNumber()) + } + + p.allowPartialResult = false + p.tryPreviousNetMap = false + p.selfForward = false + p.addr = *req.(transport.PutInfo).GetHead().Address() + p.maxRecycleCount = 0 + case object.RequestGet: + p.stopCount = 1 + p.allowPartialResult = false + p.tryPreviousNetMap = true + p.selfForward = false + p.addr = req.(transport.AddressInfo).GetAddress() + p.maxRecycleCount = 0 + case object.RequestHead: + p.stopCount = 1 + p.allowPartialResult = false + p.tryPreviousNetMap = true + p.selfForward = false + p.addr = req.(transport.AddressInfo).GetAddress() + p.maxRecycleCount = 0 + case object.RequestSearch: + p.stopCount = -1 // to traverse all possible nodes in current and prev container + p.allowPartialResult = true + p.tryPreviousNetMap = true + p.selfForward = false + p.addr = Address{CID: req.(transport.SearchInfo).GetCID()} + p.maxRecycleCount = 0 + case object.RequestRange: + p.stopCount = 1 + p.allowPartialResult = false + p.tryPreviousNetMap = false + p.selfForward = false + p.addr = req.(transport.AddressInfo).GetAddress() + p.maxRecycleCount = 0 + case object.RequestRangeHash: + p.stopCount = 1 + p.allowPartialResult = false + p.tryPreviousNetMap = false + p.selfForward = false + p.addr = req.(transport.AddressInfo).GetAddress() + p.maxRecycleCount = 0 + } +} + +func (s idleResultTracker) trackResult(context.Context, resultItems) {} + +func (s *coreOperationExecutor) executeOperation(ctx context.Context, req transport.MetaInfo, h responseItemHandler) error { + // if TTL is zero then execute local operation + if req.GetTTL() < service.NonForwardingTTL { + return s.loc.executeOperation(ctx, req, h) + } + + p := new(computableParams) + s.pre.computeParams(p, req) + + return s.fin.completeExecution(ctx, operationParams{ + computableParams: *p, + metaInfo: req, + itemHandler: h, + }) +} + +func (s *coreOperationFinalizer) completeExecution(ctx context.Context, p operationParams) error { + traverser := newContainerTraverser(&traverseParams{ + tryPrevNM: p.tryPreviousNetMap, + addr: p.addr, + curPlacementBuilder: s.curPlacementBuilder, + prevPlacementBuilder: s.prevPlacementBuilder, + maxRecycleCount: p.maxRecycleCount, + stopCount: p.stopCount, + }) + + handler := &coreHandler{ + traverser: traverser, + itemHandler: p.itemHandler, + resLogger: s.resLogger, + reqType: p.reqType, + } + + interceptor, err := s.interceptorPreparer.prepareInterceptor(interceptorItems{ + selfForward: p.selfForward, + handler: handler, + metaInfo: p.metaInfo, + itemHandler: p.itemHandler, + }) + if err != nil { + return err + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + s.traverseExec.Execute(ctx, implementations.TraverseParams{ + TransportInfo: p.metaInfo, + Handler: handler, + Traverser: traverser, + WorkerPool: s.workerPool, + ExecutionInterceptor: interceptor, + }) + + switch err := errors.Cause(traverser.Err()); err { + case container.ErrNotFound: + return &detailedError{ + error: errContainerNotFound, + d: containerDetails(p.addr.CID, descContainerNotFound), + } + case placement.ErrEmptyNodes: + if !p.allowPartialResult { + return errIncompleteOperation + } + + return nil + default: + if err != nil { + s.log.Error("traverse failure", + zap.String("error", err.Error()), + ) + + err = errPlacementProblem + } else if !p.allowPartialResult && !traverser.finished() { + err = errIncompleteOperation + } + + return err + } +} + +func (s *coreInterceptorPreparer) prepareInterceptor(p interceptorItems) (func(context.Context, multiaddr.Multiaddr) bool, error) { + selfAddr, err := s.addressStore.SelfAddr() + if err != nil { + return nil, err + } + + return func(ctx context.Context, node multiaddr.Multiaddr) (res bool) { + if node.Equal(selfAddr) { + p.handler.HandleResult(ctx, selfAddr, nil, + s.localExec.executeOperation(ctx, p.metaInfo, p.itemHandler)) + return !p.selfForward + } + + return false + }, nil +} + +func (s *coreHandler) HandleResult(ctx context.Context, n multiaddr.Multiaddr, r interface{}, e error) { + ok := e == nil + + s.traverser.add(n, ok) + + if ok && r != nil { + s.itemHandler.handleItem(r) + } + + s.resLogger.logErr(s.reqType, n, e) +} + +func (s *coreResultLogger) logErr(t object.RequestType, n multiaddr.Multiaddr, e error) { + if e == nil { + return + } else if _, ok := s.mLog[t]; !ok { + return + } + + s.log.Error("object request failure", + zap.Stringer("type", t), + zap.Stringer("node", n), + zap.String("error", e.Error()), + ) +} + +func (s *localOperationExecutor) executeOperation(ctx context.Context, req transport.MetaInfo, h responseItemHandler) error { + switch req.Type() { + case object.RequestPut: + obj := req.(transport.PutInfo).GetHead() + if err := s.objStore.putObject(ctx, obj); err != nil { + return err + } + + h.handleItem(obj.Address()) + case object.RequestGet: + obj, err := s.objRecv.getObject(ctx, req.(transport.AddressInfo).GetAddress()) + if err != nil { + return err + } + + h.handleItem(obj) + case object.RequestHead: + head, err := s.headRecv.headObject(ctx, req.(transport.AddressInfo).GetAddress()) + if err != nil { + return err + } + + h.handleItem(head) + case object.RequestSearch: + r := req.(transport.SearchInfo) + + addrList, err := s.queryImp.imposeQuery(ctx, r.GetCID(), r.GetQuery(), 1) // TODO: add query version to SearchInfo + if err != nil { + return err + } + + h.handleItem(addrList) + case object.RequestRange: + r := req.(transport.RangeInfo) + + rangesData, err := s.rngReader.getRange(ctx, r.GetAddress(), r.GetRange()) + if err != nil { + return err + } + + h.handleItem(bytes.NewReader(rangesData)) + case object.RequestRangeHash: + r := req.(transport.RangeHashInfo) + + rangesHashes, err := s.rngHasher.getHashes(ctx, r.GetAddress(), r.GetRanges(), r.GetSalt()) + if err != nil { + return err + } + + h.handleItem(rangesHashes) + default: + return errors.Errorf(pmWrongRequestType, req) + } + + return nil +} + +func (s *localStoreExecutor) getHashes(ctx context.Context, addr Address, ranges []Range, salt []byte) ([]Hash, error) { + res := make([]Hash, 0, len(ranges)) + + for i := range ranges { + chunk, err := s.localStore.PRead(ctx, addr, ranges[i]) + if err != nil { + return nil, errors.Wrapf(err, emRangeReadFail, i+1) + } + + res = append(res, hash.Sum(s.salitor(chunk, salt))) + } + + return res, nil +} + +func (s *localStoreExecutor) getRange(ctx context.Context, addr Address, r Range) ([]byte, error) { + return s.localStore.PRead(ctx, addr, r) +} + +func (s *localStoreExecutor) putObject(ctx context.Context, obj *Object) error { + ctx = context.WithValue(ctx, localstore.StoreEpochValue, s.epochRecv.Epoch()) + + switch err := s.localStore.Put(ctx, obj); err { + // TODO: add all error cases + case nil: + return nil + default: + return errPutLocal + } +} + +func (s *localStoreExecutor) headObject(_ context.Context, addr Address) (*Object, error) { + m, err := s.localStore.Meta(addr) + if err != nil { + switch errors.Cause(err) { + case core.ErrNotFound: + return nil, errIncompleteOperation + default: + return nil, err + } + } + + return m.Object, nil +} + +func (s *localStoreExecutor) getObject(_ context.Context, addr Address) (*Object, error) { + obj, err := s.localStore.Get(addr) + if err != nil { + switch errors.Cause(err) { + case core.ErrNotFound: + return nil, errIncompleteOperation + default: + return nil, err + } + } + + return obj, nil +} diff --git a/services/public/object/execution_test.go b/services/public/object/execution_test.go new file mode 100644 index 000000000..81af16a62 --- /dev/null +++ b/services/public/object/execution_test.go @@ -0,0 +1,1207 @@ +package object + +import ( + "context" + "io" + "io/ioutil" + "testing" + "time" + + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-api-go/hash" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/nspcc-dev/neofs-node/lib/localstore" + "github.com/nspcc-dev/neofs-node/lib/transport" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +type ( + // Entity for mocking interfaces. + // Implementation of any interface intercepts arguments via f (if not nil). + // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. + testExecutionEntity struct { + // Set of interfaces which testExecutionEntity must implement, but some methods from those does not call. + transport.MetaInfo + localstore.Localstore + containerTraverser + + // Argument interceptor. Used for ascertain of correct parameter passage between components. + f func(...interface{}) + // Mocked result of any interface. + res interface{} + // Mocked error of any interface. + err error + } +) + +func (s *testExecutionEntity) HandleResult(_ context.Context, n multiaddr.Multiaddr, r interface{}, e error) { + if s.f != nil { + s.f(n, r, e) + } +} + +var ( + _ transport.ResultHandler = (*testExecutionEntity)(nil) + _ interceptorPreparer = (*testExecutionEntity)(nil) + _ implementations.ContainerTraverseExecutor = (*testExecutionEntity)(nil) + _ WorkerPool = (*testExecutionEntity)(nil) + _ operationExecutor = (*testExecutionEntity)(nil) + _ placementBuilder = (*testExecutionEntity)(nil) + _ implementations.AddressStore = (*testExecutionEntity)(nil) + _ executionParamsComputer = (*testExecutionEntity)(nil) + _ operationFinalizer = (*testExecutionEntity)(nil) + _ EpochReceiver = (*testExecutionEntity)(nil) + _ localstore.Localstore = (*testExecutionEntity)(nil) + _ containerTraverser = (*testExecutionEntity)(nil) + _ responseItemHandler = (*testExecutionEntity)(nil) + _ resultTracker = (*testExecutionEntity)(nil) + _ localObjectStorer = (*testExecutionEntity)(nil) + _ localFullObjectReceiver = (*testExecutionEntity)(nil) + _ localHeadReceiver = (*testExecutionEntity)(nil) + _ localQueryImposer = (*testExecutionEntity)(nil) + _ localRangeReader = (*testExecutionEntity)(nil) + _ localRangeHasher = (*testExecutionEntity)(nil) +) + +func (s *testExecutionEntity) prepareInterceptor(p interceptorItems) (func(context.Context, multiaddr.Multiaddr) bool, error) { + if s.f != nil { + s.f(p) + } + if s.err != nil { + return nil, s.err + } + return s.res.(func(context.Context, multiaddr.Multiaddr) bool), nil +} + +func (s *testExecutionEntity) Execute(_ context.Context, p implementations.TraverseParams) { + if s.f != nil { + s.f(p) + } +} + +func (s *testExecutionEntity) Submit(func()) error { + return s.err +} + +func (s *testExecutionEntity) executeOperation(ctx context.Context, r transport.MetaInfo, h responseItemHandler) error { + if s.f != nil { + s.f(r, h) + } + return s.err +} + +func (s *testExecutionEntity) buildPlacement(_ context.Context, a Address, n ...multiaddr.Multiaddr) ([]multiaddr.Multiaddr, error) { + if s.f != nil { + s.f(a, n) + } + if s.err != nil { + return nil, s.err + } + return s.res.([]multiaddr.Multiaddr), nil +} + +func (s *testExecutionEntity) getHashes(_ context.Context, a Address, r []Range, sa []byte) ([]Hash, error) { + if s.f != nil { + s.f(a, r, sa) + } + if s.err != nil { + return nil, s.err + } + return s.res.([]Hash), nil +} + +func (s *testExecutionEntity) getRange(_ context.Context, addr Address, rngs Range) ([]byte, error) { + if s.f != nil { + s.f(addr, rngs) + } + if s.err != nil { + return nil, s.err + } + return s.res.([]byte), nil +} + +func (s *testExecutionEntity) imposeQuery(_ context.Context, c CID, d []byte, v int) ([]Address, error) { + if s.f != nil { + s.f(c, d, v) + } + if s.err != nil { + return nil, s.err + } + return s.res.([]Address), nil +} + +func (s *testExecutionEntity) headObject(_ context.Context, addr Address) (*Object, error) { + if s.f != nil { + s.f(addr) + } + if s.err != nil { + return nil, s.err + } + return s.res.(*Object), nil +} + +func (s *testExecutionEntity) getObject(_ context.Context, addr Address) (*Object, error) { + if s.f != nil { + s.f(addr) + } + if s.err != nil { + return nil, s.err + } + return s.res.(*Object), nil +} + +func (s *testExecutionEntity) putObject(_ context.Context, obj *Object) error { + if s.f != nil { + s.f(obj) + } + return s.err +} + +func (s *testExecutionEntity) trackResult(_ context.Context, p resultItems) { + if s.f != nil { + s.f(p) + } +} + +func (s *testExecutionEntity) handleItem(v interface{}) { + if s.f != nil { + s.f(v) + } +} + +func (s *testExecutionEntity) add(n multiaddr.Multiaddr, b bool) { + if s.f != nil { + s.f(n, b) + } +} + +func (s *testExecutionEntity) done(n multiaddr.Multiaddr) bool { + if s.f != nil { + s.f(n) + } + return s.res.(bool) +} + +func (s *testExecutionEntity) close() { + if s.f != nil { + s.f() + } +} + +func (s *testExecutionEntity) PRead(ctx context.Context, addr Address, rng Range) ([]byte, error) { + if s.f != nil { + s.f(addr, rng) + } + if s.err != nil { + return nil, s.err + } + return s.res.([]byte), nil +} + +func (s *testExecutionEntity) Put(ctx context.Context, obj *Object) error { + if s.f != nil { + s.f(ctx, obj) + } + return s.err +} + +func (s *testExecutionEntity) Get(addr Address) (*Object, error) { + if s.f != nil { + s.f(addr) + } + if s.err != nil { + return nil, s.err + } + return s.res.(*Object), nil +} + +func (s *testExecutionEntity) Meta(addr Address) (*Meta, error) { + if s.f != nil { + s.f(addr) + } + if s.err != nil { + return nil, s.err + } + return s.res.(*Meta), nil +} + +func (s *testExecutionEntity) Has(addr Address) (bool, error) { + if s.f != nil { + s.f(addr) + } + if s.err != nil { + return false, s.err + } + return s.res.(bool), nil +} + +func (s *testExecutionEntity) Epoch() uint64 { return s.res.(uint64) } + +func (s *testExecutionEntity) completeExecution(_ context.Context, p operationParams) error { + if s.f != nil { + s.f(p) + } + return s.err +} + +func (s *testExecutionEntity) computeParams(p *computableParams, r transport.MetaInfo) { + if s.f != nil { + s.f(p, r) + } +} + +func (s *testExecutionEntity) SelfAddr() (multiaddr.Multiaddr, error) { + if s.err != nil { + return nil, s.err + } + return s.res.(multiaddr.Multiaddr), nil +} + +func (s *testExecutionEntity) Type() object.RequestType { + return s.res.(object.RequestType) +} + +func Test_typeOfRequest(t *testing.T) { + t.Run("correct mapping", func(t *testing.T) { + items := []struct { + exp object.RequestType + v transport.MetaInfo + }{ + {exp: object.RequestSearch, v: &transportRequest{serviceRequest: new(object.SearchRequest)}}, + {exp: object.RequestSearch, v: newRawSearchInfo()}, + {exp: object.RequestPut, v: new(putRequest)}, + {exp: object.RequestPut, v: &transportRequest{serviceRequest: new(object.PutRequest)}}, + {exp: object.RequestGet, v: newRawGetInfo()}, + {exp: object.RequestGet, v: &transportRequest{serviceRequest: new(object.GetRequest)}}, + {exp: object.RequestHead, v: newRawHeadInfo()}, + {exp: object.RequestHead, v: &transportRequest{serviceRequest: new(object.HeadRequest)}}, + {exp: object.RequestRange, v: newRawRangeInfo()}, + {exp: object.RequestRange, v: &transportRequest{serviceRequest: new(GetRangeRequest)}}, + {exp: object.RequestRangeHash, v: newRawRangeHashInfo()}, + {exp: object.RequestRangeHash, v: &transportRequest{serviceRequest: new(object.GetRangeHashRequest)}}, + } + + for i := range items { + require.Equal(t, items[i].exp, items[i].v.Type()) + } + }) +} + +func Test_coreExecParamsComp_computeParams(t *testing.T) { + s := new(coreExecParamsComp) + addr := testObjectAddress(t) + + t.Run("put", func(t *testing.T) { + addr := testObjectAddress(t) + + p := new(computableParams) + r := &putRequest{PutRequest: &object.PutRequest{ + R: &object.PutRequest_Header{ + Header: &object.PutRequest_PutHeader{ + Object: &Object{ + SystemHeader: SystemHeader{ + ID: addr.ObjectID, + CID: addr.CID, + }, + }, + }, + }, + }} + + s.computeParams(p, r) + + t.Run("non-forwarding behavior", func(t *testing.T) { + require.Equal(t, 1, p.stopCount) + }) + + r.SetTTL(service.NonForwardingTTL) + + s.computeParams(p, r) + + require.False(t, p.allowPartialResult) + require.False(t, p.tryPreviousNetMap) + require.False(t, p.selfForward) + require.Equal(t, addr, p.addr) + require.Equal(t, 0, p.maxRecycleCount) + require.Equal(t, 0, int(r.CopiesNumber())) + }) + + t.Run("get", func(t *testing.T) { + p := new(computableParams) + + r := newRawGetInfo() + r.setAddress(addr) + + s.computeParams(p, r) + + require.Equal(t, 1, p.stopCount) + require.False(t, p.allowPartialResult) + require.True(t, p.tryPreviousNetMap) + require.False(t, p.selfForward) + require.Equal(t, addr, p.addr) + require.Equal(t, 0, p.maxRecycleCount) + }) + + t.Run("head", func(t *testing.T) { + p := new(computableParams) + r := &transportRequest{serviceRequest: &object.HeadRequest{Address: addr}} + + s.computeParams(p, r) + + require.Equal(t, 1, p.stopCount) + require.False(t, p.allowPartialResult) + require.True(t, p.tryPreviousNetMap) + require.False(t, p.selfForward) + require.Equal(t, addr, p.addr) + require.Equal(t, 0, p.maxRecycleCount) + }) + + t.Run("search", func(t *testing.T) { + p := new(computableParams) + r := &transportRequest{serviceRequest: &object.SearchRequest{ContainerID: addr.CID}} + + s.computeParams(p, r) + + require.Equal(t, -1, p.stopCount) + require.True(t, p.allowPartialResult) + require.True(t, p.tryPreviousNetMap) + require.False(t, p.selfForward) + require.Equal(t, addr.CID, p.addr.CID) + require.True(t, p.addr.ObjectID.Empty()) + require.Equal(t, 0, p.maxRecycleCount) + }) + + t.Run("range", func(t *testing.T) { + p := new(computableParams) + + r := newRawRangeInfo() + r.setAddress(addr) + + s.computeParams(p, r) + + require.Equal(t, 1, p.stopCount) + require.False(t, p.allowPartialResult) + require.False(t, p.tryPreviousNetMap) + require.False(t, p.selfForward) + require.Equal(t, addr, p.addr) + require.Equal(t, 0, p.maxRecycleCount) + }) + + t.Run("range hash", func(t *testing.T) { + p := new(computableParams) + + r := newRawRangeHashInfo() + r.setAddress(addr) + + s.computeParams(p, r) + + require.Equal(t, 1, p.stopCount) + require.False(t, p.allowPartialResult) + require.False(t, p.tryPreviousNetMap) + require.False(t, p.selfForward) + require.Equal(t, addr, p.addr) + require.Equal(t, 0, p.maxRecycleCount) + }) +} + +func Test_coreOperationExecutor_executeOperation(t *testing.T) { + ctx := context.TODO() + + t.Run("correct result", func(t *testing.T) { + t.Run("error", func(t *testing.T) { + p := new(testExecutionEntity) + req := newRawPutInfo() + req.setTTL(1) + finErr := internal.Error("test error for operation finalizer") + + s := &coreOperationExecutor{ + pre: &testExecutionEntity{ + f: func(items ...interface{}) { + t.Run("correct params computer arguments", func(t *testing.T) { + require.Equal(t, computableParams{}, *items[0].(*computableParams)) + require.Equal(t, req, items[1].(transport.MetaInfo)) + }) + }, + }, + fin: &testExecutionEntity{ + f: func(items ...interface{}) { + par := items[0].(operationParams) + require.Equal(t, req, par.metaInfo) + require.Equal(t, p, par.itemHandler) + }, + err: finErr, + }, + loc: new(testExecutionEntity), + } + + require.EqualError(t, + s.executeOperation(ctx, req, p), + finErr.Error(), + ) + }) + + t.Run("zero ttl", func(t *testing.T) { + p := new(testExecutionEntity) + req := newRawPutInfo() + finErr := internal.Error("test error for operation finalizer") + + s := &coreOperationExecutor{ + loc: &testExecutionEntity{ + f: func(items ...interface{}) { + require.Equal(t, req, items[0]) + require.Equal(t, p, items[1]) + }, + err: finErr, + }, + } + + require.EqualError(t, + s.executeOperation(ctx, req, p), + finErr.Error(), + ) + }) + }) +} + +func Test_localStoreExecutor(t *testing.T) { + ctx := context.TODO() + addr := testObjectAddress(t) + + t.Run("put", func(t *testing.T) { + epoch := uint64(100) + obj := new(Object) + putErr := internal.Error("test error for put") + + ls := &testExecutionEntity{ + f: func(items ...interface{}) { + t.Run("correct local store put params", func(t *testing.T) { + v, ok := items[0].(context.Context).Value(localstore.StoreEpochValue).(uint64) + require.True(t, ok) + require.Equal(t, epoch, v) + + require.Equal(t, obj, items[1].(*Object)) + }) + }, + } + + s := &localStoreExecutor{ + epochRecv: &testExecutionEntity{ + res: epoch, + }, + localStore: ls, + } + + require.NoError(t, s.putObject(ctx, obj)) + + ls.err = putErr + + require.EqualError(t, + s.putObject(ctx, obj), + errPutLocal.Error(), + ) + }) + + t.Run("get", func(t *testing.T) { + t.Run("error", func(t *testing.T) { + getErr := internal.Error("test error for get") + + ls := &testExecutionEntity{ + f: func(items ...interface{}) { + t.Run("correct local store get params", func(t *testing.T) { + require.Equal(t, addr, items[0].(Address)) + }) + }, + err: getErr, + } + + s := &localStoreExecutor{ + localStore: ls, + } + + res, err := s.getObject(ctx, addr) + require.EqualError(t, err, getErr.Error()) + require.Nil(t, res) + + ls.err = errors.Wrap(core.ErrNotFound, "wrap message") + + res, err = s.getObject(ctx, addr) + require.EqualError(t, err, errIncompleteOperation.Error()) + require.Nil(t, res) + }) + + t.Run("success", func(t *testing.T) { + obj := new(Object) + + s := &localStoreExecutor{ + localStore: &testExecutionEntity{ + res: obj, + }, + } + + res, err := s.getObject(ctx, addr) + require.NoError(t, err) + require.Equal(t, obj, res) + }) + }) + + t.Run("head", func(t *testing.T) { + t.Run("error", func(t *testing.T) { + headErr := internal.Error("test error for head") + + ls := &testExecutionEntity{ + err: headErr, + } + + s := &localStoreExecutor{ + localStore: ls, + } + + res, err := s.headObject(ctx, addr) + require.EqualError(t, err, headErr.Error()) + require.Nil(t, res) + + ls.err = errors.Wrap(core.ErrNotFound, "wrap message") + + res, err = s.headObject(ctx, addr) + require.EqualError(t, err, errIncompleteOperation.Error()) + require.Nil(t, res) + }) + + t.Run("success", func(t *testing.T) { + obj := new(Object) + + s := &localStoreExecutor{ + localStore: &testExecutionEntity{ + res: &Meta{Object: obj}, + }, + } + + res, err := s.headObject(ctx, addr) + require.NoError(t, err) + require.Equal(t, obj, res) + }) + }) + + t.Run("get range", func(t *testing.T) { + t.Run("error", func(t *testing.T) { + rngErr := internal.Error("test error for range reader") + + s := &localStoreExecutor{ + localStore: &testExecutionEntity{ + err: rngErr, + }, + } + + res, err := s.getRange(ctx, addr, Range{}) + require.EqualError(t, err, rngErr.Error()) + require.Empty(t, res) + }) + + t.Run("success", func(t *testing.T) { + rng := Range{Offset: 1, Length: 1} + + d := testData(t, 10) + + s := &localStoreExecutor{ + localStore: &testExecutionEntity{ + f: func(items ...interface{}) { + t.Run("correct local store pread params", func(t *testing.T) { + require.Equal(t, addr, items[0].(Address)) + require.Equal(t, rng, items[1].(Range)) + }) + }, + res: d, + }, + } + + res, err := s.getRange(ctx, addr, rng) + require.NoError(t, err) + require.Equal(t, d, res) + }) + }) + + t.Run("get range hash", func(t *testing.T) { + t.Run("empty range list", func(t *testing.T) { + s := &localStoreExecutor{ + localStore: new(testExecutionEntity), + } + + res, err := s.getHashes(ctx, addr, nil, nil) + require.NoError(t, err) + require.Empty(t, res) + }) + + t.Run("error", func(t *testing.T) { + rhErr := internal.Error("test error for range hasher") + + s := &localStoreExecutor{ + localStore: &testExecutionEntity{ + err: rhErr, + }, + } + + res, err := s.getHashes(ctx, addr, make([]Range, 1), nil) + require.EqualError(t, err, errors.Wrapf(rhErr, emRangeReadFail, 1).Error()) + require.Empty(t, res) + }) + + t.Run("success", func(t *testing.T) { + rngs := []Range{ + {Offset: 0, Length: 0}, + {Offset: 1, Length: 1}, + } + + d := testData(t, 64) + salt := testData(t, 20) + + callNum := 0 + + s := &localStoreExecutor{ + salitor: hash.SaltXOR, + localStore: &testExecutionEntity{ + f: func(items ...interface{}) { + t.Run("correct local store pread params", func(t *testing.T) { + require.Equal(t, addr, items[0].(Address)) + require.Equal(t, rngs[callNum], items[1].(Range)) + callNum++ + }) + }, + res: d, + }, + } + + res, err := s.getHashes(ctx, addr, rngs, salt) + require.NoError(t, err) + require.Len(t, res, len(rngs)) + for i := range rngs { + require.Equal(t, hash.Sum(hash.SaltXOR(d, salt)), res[i]) + } + }) + }) +} + +func Test_coreHandler_HandleResult(t *testing.T) { + ctx := context.TODO() + node := testNode(t, 1) + + t.Run("error", func(t *testing.T) { + handled := false + err := internal.Error("") + + s := &coreHandler{ + traverser: &testExecutionEntity{ + f: func(items ...interface{}) { + t.Run("correct traverser params", func(t *testing.T) { + require.Equal(t, node, items[0].(multiaddr.Multiaddr)) + require.False(t, items[1].(bool)) + }) + }, + }, + itemHandler: &testExecutionEntity{ + f: func(items ...interface{}) { + handled = true + }, + }, + resLogger: new(coreResultLogger), + } + + s.HandleResult(ctx, node, nil, err) + + require.False(t, handled) + }) + + t.Run("success", func(t *testing.T) { + handled := false + res := testData(t, 10) + + s := &coreHandler{ + traverser: &testExecutionEntity{ + f: func(items ...interface{}) { + t.Run("correct traverser params", func(t *testing.T) { + require.Equal(t, node, items[0].(multiaddr.Multiaddr)) + require.True(t, items[1].(bool)) + }) + }, + }, + itemHandler: &testExecutionEntity{ + f: func(items ...interface{}) { + require.Equal(t, res, items[0]) + }, + }, + resLogger: new(coreResultLogger), + } + + s.HandleResult(ctx, node, res, nil) + + require.False(t, handled) + }) +} + +func Test_localOperationExecutor_executeOperation(t *testing.T) { + ctx := context.TODO() + + addr := testObjectAddress(t) + + obj := &Object{ + SystemHeader: SystemHeader{ + ID: addr.ObjectID, + CID: addr.CID, + }, + } + + t.Run("wrong type", func(t *testing.T) { + req := &testExecutionEntity{ + res: object.RequestType(-1), + } + + require.EqualError(t, + new(localOperationExecutor).executeOperation(ctx, req, nil), + errors.Errorf(pmWrongRequestType, req).Error(), + ) + }) + + t.Run("put", func(t *testing.T) { + req := &putRequest{PutRequest: &object.PutRequest{ + R: &object.PutRequest_Header{ + Header: &object.PutRequest_PutHeader{ + Object: obj, + }, + }, + }} + + t.Run("error", func(t *testing.T) { + putErr := internal.Error("test error for put") + + s := &localOperationExecutor{ + objStore: &testExecutionEntity{ + f: func(items ...interface{}) { + require.Equal(t, obj, items[0].(*Object)) + }, + err: putErr, + }, + } + + require.EqualError(t, + s.executeOperation(ctx, req, nil), + putErr.Error(), + ) + }) + + t.Run("success", func(t *testing.T) { + h := &testExecutionEntity{ + f: func(items ...interface{}) { + require.Equal(t, addr, *items[0].(*Address)) + }, + } + + s := &localOperationExecutor{ + objStore: new(testExecutionEntity), + } + + require.NoError(t, s.executeOperation(ctx, req, h)) + }) + }) + + t.Run("get", func(t *testing.T) { + req := newRawGetInfo() + req.setAddress(addr) + + t.Run("error", func(t *testing.T) { + getErr := internal.Error("test error for get") + + s := &localOperationExecutor{ + objRecv: &testExecutionEntity{ + f: func(items ...interface{}) { + require.Equal(t, addr, items[0].(Address)) + }, + err: getErr, + }, + } + + require.EqualError(t, + s.executeOperation(ctx, req, nil), + getErr.Error(), + ) + }) + + t.Run("success", func(t *testing.T) { + h := &testExecutionEntity{ + f: func(items ...interface{}) { + require.Equal(t, obj, items[0].(*Object)) + }, + } + + s := &localOperationExecutor{ + objRecv: &testExecutionEntity{ + res: obj, + }, + } + + require.NoError(t, s.executeOperation(ctx, req, h)) + }) + }) + + t.Run("head", func(t *testing.T) { + req := &transportRequest{serviceRequest: &object.HeadRequest{ + Address: addr, + }} + + t.Run("error", func(t *testing.T) { + headErr := internal.Error("test error for head") + + s := &localOperationExecutor{ + headRecv: &testExecutionEntity{ + f: func(items ...interface{}) { + require.Equal(t, addr, items[0].(Address)) + }, + err: headErr, + }, + } + + require.EqualError(t, + s.executeOperation(ctx, req, nil), + headErr.Error(), + ) + }) + + t.Run("success", func(t *testing.T) { + h := &testExecutionEntity{ + f: func(items ...interface{}) { + require.Equal(t, obj, items[0].(*Object)) + }, + } + + s := &localOperationExecutor{ + headRecv: &testExecutionEntity{ + res: obj, + }, + } + + require.NoError(t, s.executeOperation(ctx, req, h)) + }) + }) + + t.Run("search", func(t *testing.T) { + cid := testObjectAddress(t).CID + testQuery := testData(t, 10) + + req := &transportRequest{serviceRequest: &object.SearchRequest{ + ContainerID: cid, + Query: testQuery, + }} + + t.Run("error", func(t *testing.T) { + searchErr := internal.Error("test error for search") + + s := &localOperationExecutor{ + queryImp: &testExecutionEntity{ + f: func(items ...interface{}) { + require.Equal(t, cid, items[0].(CID)) + require.Equal(t, testQuery, items[1].([]byte)) + require.Equal(t, 1, items[2].(int)) + }, + err: searchErr, + }, + } + + require.EqualError(t, + s.executeOperation(ctx, req, nil), + searchErr.Error(), + ) + }) + + t.Run("success", func(t *testing.T) { + addrList := testAddrList(t, 5) + + h := &testExecutionEntity{ + f: func(items ...interface{}) { + require.Equal(t, addrList, items[0].([]Address)) + }, + } + + s := &localOperationExecutor{ + queryImp: &testExecutionEntity{ + res: addrList, + }, + } + + require.NoError(t, s.executeOperation(ctx, req, h)) + }) + }) + + t.Run("get range", func(t *testing.T) { + rng := Range{Offset: 1, Length: 1} + + req := newRawRangeInfo() + req.setAddress(addr) + req.setRange(rng) + + t.Run("error", func(t *testing.T) { + rrErr := internal.Error("test error for range reader") + + s := &localOperationExecutor{ + rngReader: &testExecutionEntity{ + f: func(items ...interface{}) { + require.Equal(t, addr, items[0].(Address)) + require.Equal(t, rng, items[1].(Range)) + }, + err: rrErr, + }, + } + + require.EqualError(t, + s.executeOperation(ctx, req, nil), + rrErr.Error(), + ) + }) + + t.Run("success", func(t *testing.T) { + data := testData(t, 10) + + h := &testExecutionEntity{ + f: func(items ...interface{}) { + d, err := ioutil.ReadAll(items[0].(io.Reader)) + require.NoError(t, err) + require.Equal(t, data, d) + }, + } + + s := &localOperationExecutor{ + rngReader: &testExecutionEntity{ + res: data, + }, + } + + require.NoError(t, s.executeOperation(ctx, req, h)) + }) + }) + + t.Run("get range hash", func(t *testing.T) { + rngs := []Range{ + {Offset: 0, Length: 0}, + {Offset: 1, Length: 1}, + } + + salt := testData(t, 10) + + req := newRawRangeHashInfo() + req.setAddress(addr) + req.setRanges(rngs) + req.setSalt(salt) + + t.Run("error", func(t *testing.T) { + rhErr := internal.Error("test error for range hasher") + + s := &localOperationExecutor{ + rngHasher: &testExecutionEntity{ + f: func(items ...interface{}) { + require.Equal(t, addr, items[0].(Address)) + require.Equal(t, rngs, items[1].([]Range)) + require.Equal(t, salt, items[2].([]byte)) + }, + err: rhErr, + }, + } + + require.EqualError(t, + s.executeOperation(ctx, req, nil), + rhErr.Error(), + ) + }) + + t.Run("success", func(t *testing.T) { + hashes := []Hash{ + hash.Sum(testData(t, 10)), + hash.Sum(testData(t, 10)), + } + + h := &testExecutionEntity{ + f: func(items ...interface{}) { + require.Equal(t, hashes, items[0].([]Hash)) + }, + } + + s := &localOperationExecutor{ + rngHasher: &testExecutionEntity{ + res: hashes, + }, + } + + require.NoError(t, s.executeOperation(ctx, req, h)) + }) + }) +} + +func Test_coreOperationFinalizer_completeExecution(t *testing.T) { + ctx := context.TODO() + + t.Run("address store failure", func(t *testing.T) { + asErr := internal.Error("test error for address store") + + s := &coreOperationFinalizer{ + interceptorPreparer: &testExecutionEntity{ + err: asErr, + }, + } + + require.EqualError(t, s.completeExecution(ctx, operationParams{ + metaInfo: &transportRequest{serviceRequest: new(object.SearchRequest)}, + }), asErr.Error()) + }) + + t.Run("correct execution construction", func(t *testing.T) { + req := &transportRequest{ + serviceRequest: &object.SearchRequest{ + ContainerID: testObjectAddress(t).CID, + Query: testData(t, 10), + QueryVersion: 1, + }, + timeout: 10 * time.Second, + } + + req.SetTTL(10) + + itemHandler := new(testExecutionEntity) + opParams := operationParams{ + computableParams: computableParams{ + addr: testObjectAddress(t), + stopCount: 2, + allowPartialResult: false, + tryPreviousNetMap: false, + selfForward: true, + maxRecycleCount: 7, + }, + metaInfo: req, + itemHandler: itemHandler, + } + + curPl := new(testExecutionEntity) + prevPl := new(testExecutionEntity) + wp := new(testExecutionEntity) + s := &coreOperationFinalizer{ + curPlacementBuilder: curPl, + prevPlacementBuilder: prevPl, + interceptorPreparer: &testExecutionEntity{ + res: func(context.Context, multiaddr.Multiaddr) bool { return true }, + }, + workerPool: wp, + traverseExec: &testExecutionEntity{ + f: func(items ...interface{}) { + t.Run("correct traverse executor params", func(t *testing.T) { + p := items[0].(implementations.TraverseParams) + + require.True(t, p.ExecutionInterceptor(ctx, nil)) + require.Equal(t, req, p.TransportInfo) + require.Equal(t, wp, p.WorkerPool) + + tr := p.Traverser.(*coreTraverser) + require.Equal(t, opParams.addr, tr.addr) + require.Equal(t, opParams.tryPreviousNetMap, tr.tryPrevNM) + require.Equal(t, curPl, tr.curPlacementBuilder) + require.Equal(t, prevPl, tr.prevPlacementBuilder) + require.Equal(t, opParams.maxRecycleCount, tr.maxRecycleCount) + require.Equal(t, opParams.stopCount, tr.stopCount) + + h := p.Handler.(*coreHandler) + require.Equal(t, tr, h.traverser) + require.Equal(t, itemHandler, h.itemHandler) + }) + }, + }, + log: zap.L(), + } + + require.EqualError(t, s.completeExecution(ctx, opParams), errIncompleteOperation.Error()) + }) +} + +func Test_coreInterceptorPreparer_prepareInterceptor(t *testing.T) { + t.Run("address store failure", func(t *testing.T) { + asErr := internal.Error("test error for address store") + + s := &coreInterceptorPreparer{ + addressStore: &testExecutionEntity{ + err: asErr, + }, + } + + res, err := s.prepareInterceptor(interceptorItems{}) + require.EqualError(t, err, asErr.Error()) + require.Nil(t, res) + }) + + t.Run("correct interceptor", func(t *testing.T) { + ctx := context.TODO() + selfAddr := testNode(t, 0) + + t.Run("local node", func(t *testing.T) { + req := new(transportRequest) + itemHandler := new(testExecutionEntity) + + localErr := internal.Error("test error for local executor") + + p := interceptorItems{ + selfForward: true, + handler: &testExecutionEntity{ + f: func(items ...interface{}) { + t.Run("correct local executor params", func(t *testing.T) { + require.Equal(t, selfAddr, items[0].(multiaddr.Multiaddr)) + require.Nil(t, items[1]) + require.EqualError(t, items[2].(error), localErr.Error()) + }) + }, + }, + metaInfo: req, + itemHandler: itemHandler, + } + + s := &coreInterceptorPreparer{ + localExec: &testExecutionEntity{ + f: func(items ...interface{}) { + require.Equal(t, req, items[0].(transport.MetaInfo)) + require.Equal(t, itemHandler, items[1].(responseItemHandler)) + }, + err: localErr, + }, + addressStore: &testExecutionEntity{ + res: selfAddr, + }, + } + + res, err := s.prepareInterceptor(p) + require.NoError(t, err) + require.False(t, res(ctx, selfAddr)) + }) + + t.Run("remote node", func(t *testing.T) { + node := testNode(t, 1) + remoteNode := testNode(t, 2) + + p := interceptorItems{} + + s := &coreInterceptorPreparer{ + addressStore: &testExecutionEntity{ + res: remoteNode, + }, + } + + res, err := s.prepareInterceptor(p) + require.NoError(t, err) + require.False(t, res(ctx, node)) + }) + }) +} + +// testAddrList returns count random object addresses. +func testAddrList(t *testing.T, count int) (res []Address) { + for i := 0; i < count; i++ { + res = append(res, testObjectAddress(t)) + } + return +} diff --git a/services/public/object/filter.go b/services/public/object/filter.go new file mode 100644 index 000000000..dc8ddc6c9 --- /dev/null +++ b/services/public/object/filter.go @@ -0,0 +1,251 @@ +package object + +import ( + "context" + + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-api-go/storagegroup" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/nspcc-dev/neofs-node/lib/localstore" + "github.com/nspcc-dev/neofs-node/lib/objutil" + "github.com/pkg/errors" +) + +type ( + filterParams struct { + sgInfoRecv storagegroup.InfoReceiver + tsPresChecker tombstonePresenceChecker + maxProcSize uint64 + storageCap uint64 + localStore localstore.Localstore + epochRecv EpochReceiver + verifier objutil.Verifier + + maxPayloadSize uint64 + } + + filterConstructor func(p *filterParams) localstore.FilterFunc + + tombstonePresenceChecker interface { + hasLocalTombstone(addr Address) (bool, error) + } + + coreTSPresChecker struct { + localStore localstore.Localstore + } +) + +const ( + ttlValue = "TTL" +) + +const ( + commonObjectFN = "OBJECTS_OVERALL" + storageGroupFN = "STORAGE_GROUP" + tombstoneOverwriteFN = "TOMBSTONE_OVERWRITE" + objSizeFN = "OBJECT_SIZE" + creationEpochFN = "CREATION_EPOCH" + objIntegrityFN = "OBJECT_INTEGRITY" + payloadSizeFN = "PAYLOAD_SIZE" +) + +const ( + errObjectFilter = internal.Error("incoming object has not passed filter") +) + +var ( + _ tombstonePresenceChecker = (*coreTSPresChecker)(nil) +) + +var mFilters = map[string]filterConstructor{ + tombstoneOverwriteFN: tombstoneOverwriteFC, + storageGroupFN: storageGroupFC, + creationEpochFN: creationEpochFC, + objIntegrityFN: objectIntegrityFC, + payloadSizeFN: payloadSizeFC, +} + +var mBasicFilters = map[string]filterConstructor{ + objSizeFN: objectSizeFC, +} + +func newIncomingObjectFilter(p *Params) (Filter, error) { + filter, err := newFilter(p, readyObjectsCheckpointFilterName, mFilters) + if err != nil { + return nil, err + } + + return filter, nil +} + +func newFilter(p *Params, name string, m map[string]filterConstructor) (Filter, error) { + filter := localstore.NewFilter(&localstore.FilterParams{ + Name: name, + FilterFunc: localstore.SkippingFilterFunc, + }) + + fp := &filterParams{ + sgInfoRecv: p.SGInfoReceiver, + tsPresChecker: &coreTSPresChecker{localStore: p.LocalStore}, + maxProcSize: p.MaxProcessingSize, + storageCap: p.StorageCapacity, + localStore: p.LocalStore, + epochRecv: p.EpochReceiver, + verifier: p.Verifier, + + maxPayloadSize: p.MaxPayloadSize, + } + + items := make([]*localstore.FilterParams, 0, len(m)) + for fName, fCons := range m { + items = append(items, &localstore.FilterParams{Name: fName, FilterFunc: fCons(fp)}) + } + + f, err := localstore.AllPassIncludingFilter(commonObjectFN, items...) + if err != nil { + return nil, err + } + + if err := filter.PutSubFilter(localstore.SubFilterParams{ + PriorityFlag: localstore.PriorityValue, + FilterPipeline: f, + OnFail: localstore.CodeFail, + }); err != nil { + return nil, errors.Wrapf(err, "could not put filter %s in pipeline", f.GetName()) + } + + return filter, nil +} + +func (s *coreTSPresChecker) hasLocalTombstone(addr Address) (bool, error) { + m, err := s.localStore.Meta(addr) + if err != nil { + if errors.Is(errors.Cause(err), core.ErrNotFound) { + return false, nil + } + + return false, err + } + + return m.Object.IsTombstone(), nil +} + +func storageGroupFC(p *filterParams) localstore.FilterFunc { + return func(ctx context.Context, meta *Meta) *localstore.FilterResult { + if sgInfo, err := meta.Object.StorageGroup(); err != nil { + return localstore.ResultPass() + } else if group := meta.Object.Group(); len(group) == 0 { + return localstore.ResultFail() + } else if realSGInfo, err := p.sgInfoRecv.GetSGInfo(ctx, meta.Object.SystemHeader.CID, group); err != nil { + return localstore.ResultWithError(localstore.CodeFail, err) + } else if sgInfo.ValidationDataSize != realSGInfo.ValidationDataSize { + return localstore.ResultWithError( + localstore.CodeFail, + &detailedError{ + error: errWrongSGSize, + d: sgSizeDetails(sgInfo.ValidationDataSize, realSGInfo.ValidationDataSize), + }, + ) + } else if !sgInfo.ValidationHash.Equal(realSGInfo.ValidationHash) { + return localstore.ResultWithError( + localstore.CodeFail, + &detailedError{ + error: errWrongSGHash, + d: sgHashDetails(sgInfo.ValidationHash, realSGInfo.ValidationHash), + }, + ) + } + + return localstore.ResultPass() + } +} + +func tombstoneOverwriteFC(p *filterParams) localstore.FilterFunc { + return func(ctx context.Context, meta *Meta) *localstore.FilterResult { + if meta.Object.IsTombstone() { + return localstore.ResultPass() + } else if hasTombstone, err := p.tsPresChecker.hasLocalTombstone(*meta.Object.Address()); err != nil { + return localstore.ResultFail() + } else if hasTombstone { + return localstore.ResultFail() + } + + return localstore.ResultPass() + } +} + +func objectSizeFC(p *filterParams) localstore.FilterFunc { + return func(ctx context.Context, meta *Meta) *localstore.FilterResult { + if need := meta.Object.SystemHeader.PayloadLength; need > p.maxProcSize { + return localstore.ResultWithError( + localstore.CodeFail, + &detailedError{ // // TODO: NSPCC-1048 + error: errProcPayloadSize, + d: maxProcPayloadSizeDetails(p.maxProcSize), + }, + ) + } else if ctx.Value(ttlValue).(uint32) < service.NonForwardingTTL { + if left := p.storageCap - uint64(p.localStore.Size()); need > left { + return localstore.ResultWithError( + localstore.CodeFail, + errLocalStorageOverflow, + ) + } + } + + return localstore.ResultPass() + } +} + +func payloadSizeFC(p *filterParams) localstore.FilterFunc { + return func(ctx context.Context, meta *Meta) *localstore.FilterResult { + if meta.Object.SystemHeader.PayloadLength > p.maxPayloadSize { + return localstore.ResultWithError( + localstore.CodeFail, + &detailedError{ // TODO: NSPCC-1048 + error: errObjectPayloadSize, + d: maxObjectPayloadSizeDetails(p.maxPayloadSize), + }, + ) + } + + return localstore.ResultPass() + } +} + +func creationEpochFC(p *filterParams) localstore.FilterFunc { + return func(_ context.Context, meta *Meta) *localstore.FilterResult { + if current := p.epochRecv.Epoch(); meta.Object.SystemHeader.CreatedAt.Epoch > current { + return localstore.ResultWithError( + localstore.CodeFail, + &detailedError{ // TODO: NSPCC-1048 + error: errObjectFromTheFuture, + d: objectCreationEpochDetails(current), + }, + ) + } + + return localstore.ResultPass() + } +} + +func objectIntegrityFC(p *filterParams) localstore.FilterFunc { + return func(ctx context.Context, meta *Meta) *localstore.FilterResult { + if err := p.verifier.Verify(ctx, meta.Object); err != nil { + return localstore.ResultWithError( + localstore.CodeFail, + &detailedError{ + error: errObjectHeadersVerification, + d: objectHeadersVerificationDetails(err), + }, + ) + } + + return localstore.ResultPass() + } +} + +func basicFilter(p *Params) (Filter, error) { + return newFilter(p, allObjectsCheckpointFilterName, mBasicFilters) +} diff --git a/services/public/object/filter_test.go b/services/public/object/filter_test.go new file mode 100644 index 000000000..1b4084f05 --- /dev/null +++ b/services/public/object/filter_test.go @@ -0,0 +1,400 @@ +package object + +import ( + "context" + "testing" + + "github.com/nspcc-dev/neofs-api-go/hash" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-api-go/storagegroup" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/nspcc-dev/neofs-node/lib/localstore" + "github.com/nspcc-dev/neofs-node/lib/objutil" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" +) + +type ( + // Entity for mocking interfaces. + // Implementation of any interface intercepts arguments via f (if not nil). + // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. + testFilterEntity struct { + // Set of interfaces which entity must implement, but some methods from those does not call. + localstore.Localstore + + // Argument interceptor. Used for ascertain of correct parameter passage between components. + f func(...interface{}) + // Mocked result of any interface. + res interface{} + // Mocked error of any interface. + err error + } + + testFilterUnit struct { + obj *Object + exp localstore.FilterCode + } +) + +var ( + _ storagegroup.InfoReceiver = (*testFilterEntity)(nil) + _ objutil.Verifier = (*testFilterEntity)(nil) + _ EpochReceiver = (*testFilterEntity)(nil) + _ localstore.Localstore = (*testFilterEntity)(nil) + _ tombstonePresenceChecker = (*testFilterEntity)(nil) +) + +func (s *testFilterEntity) Meta(addr Address) (*Meta, error) { + if s.f != nil { + s.f(addr) + } + if s.err != nil { + return nil, s.err + } + return s.res.(*Meta), nil +} + +func (s *testFilterEntity) GetSGInfo(ctx context.Context, cid CID, group []ID) (*storagegroup.StorageGroup, error) { + if s.f != nil { + s.f(cid, group) + } + if s.err != nil { + return nil, s.err + } + return s.res.(*storagegroup.StorageGroup), nil +} + +func (s *testFilterEntity) hasLocalTombstone(addr Address) (bool, error) { + if s.f != nil { + s.f(addr) + } + if s.err != nil { + return false, s.err + } + return s.res.(bool), nil +} + +func (s *testFilterEntity) Size() int64 { return s.res.(int64) } + +func (s *testFilterEntity) Epoch() uint64 { return s.res.(uint64) } + +func (s *testFilterEntity) Verify(_ context.Context, obj *Object) error { + if s.f != nil { + s.f(obj) + } + return s.err +} + +func Test_creationEpochFC(t *testing.T) { + ctx := context.TODO() + localEpoch := uint64(100) + + ff := creationEpochFC(&filterParams{epochRecv: &testFilterEntity{res: localEpoch}}) + + valid := []Object{ + {SystemHeader: SystemHeader{CreatedAt: CreationPoint{Epoch: localEpoch - 1}}}, + {SystemHeader: SystemHeader{CreatedAt: CreationPoint{Epoch: localEpoch}}}, + } + + invalid := []Object{ + {SystemHeader: SystemHeader{CreatedAt: CreationPoint{Epoch: localEpoch + 1}}}, + {SystemHeader: SystemHeader{CreatedAt: CreationPoint{Epoch: localEpoch + 2}}}, + } + + testFilteringObjects(t, ctx, ff, valid, invalid, nil) +} + +func Test_objectSizeFC(t *testing.T) { + maxProcSize := uint64(100) + + t.Run("forwarding TTL", func(t *testing.T) { + var ( + ctx = context.WithValue(context.TODO(), ttlValue, uint32(service.SingleForwardingTTL)) + ff = objectSizeFC(&filterParams{maxProcSize: maxProcSize}) + ) + + valid := []Object{ + {SystemHeader: SystemHeader{PayloadLength: maxProcSize - 1}}, + {SystemHeader: SystemHeader{PayloadLength: maxProcSize}}, + } + + invalid := []Object{ + {SystemHeader: SystemHeader{PayloadLength: maxProcSize + 1}}, + {SystemHeader: SystemHeader{PayloadLength: maxProcSize + 2}}, + } + + testFilteringObjects(t, ctx, ff, valid, invalid, nil) + }) + + t.Run("non-forwarding TTL", func(t *testing.T) { + var ( + ctx = context.WithValue(context.TODO(), ttlValue, uint32(service.NonForwardingTTL-1)) + objSize = maxProcSize / 2 + ls = &testFilterEntity{res: int64(maxProcSize - objSize)} + ) + + ff := objectSizeFC(&filterParams{ + maxProcSize: maxProcSize, + storageCap: maxProcSize, + localStore: ls, + }) + + valid := []Object{{SystemHeader: SystemHeader{PayloadLength: objSize}}} + invalid := []Object{{SystemHeader: SystemHeader{PayloadLength: objSize + 1}}} + + testFilteringObjects(t, ctx, ff, valid, invalid, nil) + }) +} + +func Test_objectIntegrityFC(t *testing.T) { + var ( + ctx = context.TODO() + valid = &Object{SystemHeader: SystemHeader{ID: testObjectAddress(t).ObjectID}} + invalid = &Object{SystemHeader: SystemHeader{ID: testObjectAddress(t).ObjectID}} + ) + valid.Headers = append(valid.Headers, Header{Value: new(object.Header_PayloadChecksum)}) + + ver := new(testFilterEntity) + ver.f = func(items ...interface{}) { + if items[0].(*Object).SystemHeader.ID.Equal(valid.SystemHeader.ID) { + ver.err = nil + } else { + ver.err = internal.Error("") + } + } + + ff := objectIntegrityFC(&filterParams{verifier: ver}) + + testFilterFunc(t, ctx, ff, testFilterUnit{obj: valid, exp: localstore.CodePass}) + testFilterFunc(t, ctx, ff, testFilterUnit{obj: invalid, exp: localstore.CodeFail}) +} + +func Test_tombstoneOverwriteFC(t *testing.T) { + var ( + obj1 = Object{ + SystemHeader: SystemHeader{ID: testObjectAddress(t).ObjectID}, + Headers: []Header{{Value: new(object.Header_Tombstone)}}, + } + obj2 = Object{ + SystemHeader: SystemHeader{ID: testObjectAddress(t).ObjectID}, + } + obj3 = Object{ + SystemHeader: SystemHeader{ID: testObjectAddress(t).ObjectID}, + } + obj4 = Object{ + SystemHeader: SystemHeader{ID: testObjectAddress(t).ObjectID}, + } + ) + + ts := new(testFilterEntity) + ts.f = func(items ...interface{}) { + addr := items[0].(Address) + if addr.ObjectID.Equal(obj2.SystemHeader.ID) { + ts.res, ts.err = nil, internal.Error("") + } else if addr.ObjectID.Equal(obj3.SystemHeader.ID) { + ts.res, ts.err = true, nil + } else { + ts.res, ts.err = false, nil + } + } + + valid := []Object{obj1, obj4} + invalid := []Object{obj2, obj3} + + ff := tombstoneOverwriteFC(&filterParams{tsPresChecker: ts}) + + testFilteringObjects(t, context.TODO(), ff, valid, invalid, nil) +} + +func Test_storageGroupFC(t *testing.T) { + var ( + valid, invalid []Object + cid = testObjectAddress(t).CID + sgSize, sgHash = uint64(10), hash.Sum(testData(t, 10)) + + sg = &storagegroup.StorageGroup{ + ValidationDataSize: sgSize, + ValidationHash: sgHash, + } + + sgHeaders = []Header{ + {Value: &object.Header_StorageGroup{StorageGroup: sg}}, + {Value: &object.Header_Link{Link: &object.Link{Type: object.Link_StorageGroup}}}, + } + ) + + valid = append(valid, Object{ + SystemHeader: SystemHeader{ + CID: cid, + }, + }) + + valid = append(valid, Object{ + SystemHeader: SystemHeader{ + CID: cid, + }, + Headers: sgHeaders, + }) + + invalid = append(invalid, Object{ + SystemHeader: SystemHeader{ + CID: cid, + }, + Headers: sgHeaders[:1], + }) + + invalid = append(invalid, Object{ + SystemHeader: SystemHeader{ + CID: cid, + }, + Headers: []Header{ + { + Value: &object.Header_StorageGroup{ + StorageGroup: &storagegroup.StorageGroup{ + ValidationDataSize: sg.ValidationDataSize + 1, + }, + }, + }, + { + Value: &object.Header_Link{ + Link: &object.Link{ + Type: object.Link_StorageGroup, + }, + }, + }, + }, + }) + + invalid = append(invalid, Object{ + SystemHeader: SystemHeader{ + CID: cid, + }, + Headers: []Header{ + { + Value: &object.Header_StorageGroup{ + StorageGroup: &storagegroup.StorageGroup{ + ValidationDataSize: sg.ValidationDataSize, + ValidationHash: Hash{1, 2, 3}, + }, + }, + }, + { + Value: &object.Header_Link{ + Link: &object.Link{ + Type: object.Link_StorageGroup, + }, + }, + }, + }, + }) + + sr := &testFilterEntity{ + f: func(items ...interface{}) { + require.Equal(t, cid, items[0]) + }, + res: sg, + } + + ff := storageGroupFC(&filterParams{sgInfoRecv: sr}) + + testFilteringObjects(t, context.TODO(), ff, valid, invalid, nil) +} + +func Test_coreTSPresChecker(t *testing.T) { + addr := testObjectAddress(t) + + t.Run("local storage failure", func(t *testing.T) { + ls := &testFilterEntity{ + f: func(items ...interface{}) { + require.Equal(t, addr, items[0]) + }, + err: errors.Wrap(core.ErrNotFound, "some message"), + } + + s := &coreTSPresChecker{localStore: ls} + + res, err := s.hasLocalTombstone(addr) + require.NoError(t, err) + require.False(t, res) + + lsErr := internal.Error("test error for local storage") + ls.err = lsErr + + res, err = s.hasLocalTombstone(addr) + require.EqualError(t, err, lsErr.Error()) + }) + + t.Run("correct result", func(t *testing.T) { + m := &Meta{Object: new(Object)} + + ls := &testFilterEntity{res: m} + + s := &coreTSPresChecker{localStore: ls} + + res, err := s.hasLocalTombstone(addr) + require.NoError(t, err) + require.False(t, res) + + m.Object.AddHeader(&object.Header{Value: new(object.Header_Tombstone)}) + + res, err = s.hasLocalTombstone(addr) + require.NoError(t, err) + require.True(t, res) + }) +} + +func testFilteringObjects(t *testing.T, ctx context.Context, f localstore.FilterFunc, valid, invalid, ignored []Object) { + units := make([]testFilterUnit, 0, len(valid)+len(invalid)+len(ignored)) + + for i := range valid { + units = append(units, testFilterUnit{ + obj: &valid[i], + exp: localstore.CodePass, + }) + } + + for i := range invalid { + units = append(units, testFilterUnit{ + obj: &invalid[i], + exp: localstore.CodeFail, + }) + } + + for i := range ignored { + units = append(units, testFilterUnit{ + obj: &ignored[i], + exp: localstore.CodeIgnore, + }) + } + + testFilterFunc(t, ctx, f, units...) +} + +func testFilterFunc(t *testing.T, ctx context.Context, f localstore.FilterFunc, units ...testFilterUnit) { + for i := range units { + res := f(ctx, &Meta{Object: units[i].obj}) + require.Equal(t, units[i].exp, res.Code()) + } +} + +func Test_payloadSizeFC(t *testing.T) { + maxPayloadSize := uint64(100) + + valid := []Object{ + {SystemHeader: SystemHeader{PayloadLength: maxPayloadSize - 1}}, + {SystemHeader: SystemHeader{PayloadLength: maxPayloadSize}}, + } + + invalid := []Object{ + {SystemHeader: SystemHeader{PayloadLength: maxPayloadSize + 1}}, + {SystemHeader: SystemHeader{PayloadLength: maxPayloadSize + 2}}, + } + + ff := payloadSizeFC(&filterParams{ + maxPayloadSize: maxPayloadSize, + }) + + testFilteringObjects(t, context.TODO(), ff, valid, invalid, nil) +} diff --git a/services/public/object/get.go b/services/public/object/get.go new file mode 100644 index 000000000..666721283 --- /dev/null +++ b/services/public/object/get.go @@ -0,0 +1,111 @@ +package object + +import ( + "bytes" + "io" + + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +type ( + getServerWriter struct { + req *object.GetRequest + + srv object.Service_GetServer + + respPreparer responsePreparer + } +) + +const ( + maxGetPayloadSize = 3584 * 1024 // 3.5 MiB + + emSendObjectHead = "could not send object head" +) + +var _ io.Writer = (*getServerWriter)(nil) + +func (s *objectService) Get(req *object.GetRequest, server object.Service_GetServer) (err error) { + defer func() { + if r := recover(); r != nil { + s.log.Error(panicLogMsg, + zap.Stringer("request", object.RequestGet), + zap.Any("reason", r), + ) + + err = errServerPanic + } + + err = s.statusCalculator.make(requestError{ + t: object.RequestGet, + e: err, + }) + }() + + var r interface{} + + if r, err = s.requestHandler.handleRequest(server.Context(), handleRequestParams{ + request: req, + executor: s, + }); err != nil { + return err + } + + obj := r.(*objectData) + + var payload []byte + payload, obj.Payload = obj.Payload, nil + + resp := makeGetHeaderResponse(obj.Object) + if err = s.respPreparer.prepareResponse(server.Context(), req, resp); err != nil { + return + } + + if err = server.Send(resp); err != nil { + return errors.Wrap(err, emSendObjectHead) + } + + _, err = io.CopyBuffer( + &getServerWriter{ + req: req, + srv: server, + respPreparer: s.getChunkPreparer, + }, + io.MultiReader(bytes.NewReader(payload), obj.payload), + make([]byte, maxGetPayloadSize)) + + return err +} + +func splitBytes(data []byte, maxSize int) (result [][]byte) { + l := len(data) + if l == 0 { + return nil + } + + for i := 0; i < l; i += maxSize { + last := i + maxSize + if last > l { + last = l + } + + result = append(result, data[i:last]) + } + + return +} + +func (s *getServerWriter) Write(p []byte) (int, error) { + resp := makeGetChunkResponse(p) + if err := s.respPreparer.prepareResponse(s.srv.Context(), s.req, resp); err != nil { + return 0, err + } + + if err := s.srv.Send(resp); err != nil { + return 0, err + } + + return len(p), nil +} diff --git a/services/public/object/get_test.go b/services/public/object/get_test.go new file mode 100644 index 000000000..a78fde76c --- /dev/null +++ b/services/public/object/get_test.go @@ -0,0 +1,225 @@ +package object + +import ( + "context" + "testing" + + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/localstore" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" +) + +type ( + // Entity for mocking interfaces. + // Implementation of any interface intercepts arguments via f (if not nil). + // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. + testGetEntity struct { + // Set of interfaces which entity must implement, but some methods from those does not call. + localstore.Localstore + object.Service_GetServer + + // Argument interceptor. Used for ascertain of correct parameter passage between components. + f func(...interface{}) + // Mocked result of any interface. + res interface{} + // Mocked error of any interface. + err error + } +) + +var ( + _ object.Service_GetServer = (*testGetEntity)(nil) + _ requestHandler = (*testGetEntity)(nil) + _ responsePreparer = (*testGetEntity)(nil) +) + +func (s *testGetEntity) prepareResponse(_ context.Context, req serviceRequest, resp serviceResponse) error { + if s.f != nil { + s.f(req, resp) + } + return s.err +} + +func (s *testGetEntity) Context() context.Context { return context.TODO() } + +func (s *testGetEntity) Send(r *object.GetResponse) error { + if s.f != nil { + s.f(r) + } + return s.err +} + +func (s *testGetEntity) handleRequest(_ context.Context, p handleRequestParams) (interface{}, error) { + if s.f != nil { + s.f(p) + } + return s.res, s.err +} + +func Test_makeGetHeaderResponse(t *testing.T) { + obj := &Object{Payload: testData(t, 10)} + + require.Equal(t, &object.GetResponse{R: &object.GetResponse_Object{Object: obj}}, makeGetHeaderResponse(obj)) +} + +func Test_makeGetChunkResponse(t *testing.T) { + chunk := testData(t, 10) + + require.Equal(t, &object.GetResponse{R: &object.GetResponse_Chunk{Chunk: chunk}}, makeGetChunkResponse(chunk)) +} + +func Test_splitBytes(t *testing.T) { + t.Run("empty data", func(t *testing.T) { + testSplit(t, make([]byte, 0), 0) + testSplit(t, nil, 0) + }) + + t.Run("less size", func(t *testing.T) { + testSplit(t, make([]byte, 10), 20) + }) + + t.Run("equal size", func(t *testing.T) { + testSplit(t, make([]byte, 20), 20) + }) + + t.Run("oversize", func(t *testing.T) { + testSplit(t, make([]byte, 3), 17) + }) +} + +func testSplit(t *testing.T, initData []byte, maxSize int) { + res := splitBytes(initData, maxSize) + restored := make([]byte, 0, len(initData)) + for i := range res { + require.LessOrEqual(t, len(res[i]), maxSize) + restored = append(restored, res[i]...) + } + require.Len(t, restored, len(initData)) + if len(initData) > 0 { + require.Equal(t, initData, restored) + } +} + +func TestObjectService_Get(t *testing.T) { + req := &object.GetRequest{Address: testObjectAddress(t)} + + t.Run("request handler failure", func(t *testing.T) { + hErr := internal.Error("test error for request handler") + + s := &objectService{ + statusCalculator: newStatusCalculator(), + } + + s.requestHandler = &testGetEntity{ + f: func(items ...interface{}) { + t.Run("correct request handler params", func(t *testing.T) { + p := items[0].(handleRequestParams) + require.Equal(t, req, p.request) + require.Equal(t, s, p.executor) + }) + }, + err: hErr, + } + + require.EqualError(t, s.Get(req, new(testGetEntity)), hErr.Error()) + }) + + t.Run("send object head failure", func(t *testing.T) { + srvErr := internal.Error("test error for get server") + + obj := &Object{ + SystemHeader: SystemHeader{ + ID: testObjectAddress(t).ObjectID, + CID: testObjectAddress(t).CID, + }, + } + + s := objectService{ + requestHandler: &testGetEntity{res: &objectData{Object: obj}}, + respPreparer: &testGetEntity{ + f: func(items ...interface{}) { + require.Equal(t, req, items[0]) + require.Equal(t, makeGetHeaderResponse(obj), items[1]) + }, + res: new(object.GetResponse), + }, + + statusCalculator: newStatusCalculator(), + } + + require.EqualError(t, s.Get(req, &testGetEntity{err: srvErr}), errors.Wrap(srvErr, emSendObjectHead).Error()) + }) + + t.Run("send chunk failure", func(t *testing.T) { + srvErr := internal.Error("test error for get server") + payload := testData(t, 10) + + obj := &Object{ + SystemHeader: SystemHeader{ID: testObjectAddress(t).ObjectID}, + Headers: []Header{{ + Value: &object.Header_UserHeader{UserHeader: &UserHeader{Key: "key", Value: "value"}}, + }}, + Payload: payload, + } + + headResp := makeGetHeaderResponse(&Object{ + SystemHeader: obj.SystemHeader, + Headers: obj.Headers, + }) + + chunkResp := makeGetChunkResponse(payload) + + callNum := 0 + + respPrep := new(testGetEntity) + respPrep.f = func(items ...interface{}) { + if callNum == 0 { + respPrep.res = headResp + } else { + respPrep.res = chunkResp + } + } + + s := objectService{ + requestHandler: &testGetEntity{res: &objectData{Object: obj}}, + respPreparer: respPrep, + + getChunkPreparer: respPrep, + + statusCalculator: newStatusCalculator(), + } + + srv := new(testGetEntity) + srv.f = func(items ...interface{}) { + t.Run("correct get server params", func(t *testing.T) { + if callNum == 0 { + require.Equal(t, headResp, items[0]) + } else { + require.Equal(t, chunkResp, items[0]) + srv.err = srvErr + } + callNum++ + }) + } + + require.EqualError(t, s.Get(req, srv), srvErr.Error()) + }) + + t.Run("send success", func(t *testing.T) { + s := objectService{ + requestHandler: &testGetEntity{res: &objectData{ + Object: new(Object), + payload: new(emptyReader), + }}, + respPreparer: &testGetEntity{ + res: new(object.GetResponse), + }, + + statusCalculator: newStatusCalculator(), + } + + require.NoError(t, s.Get(req, new(testGetEntity))) + }) +} diff --git a/services/public/object/handler.go b/services/public/object/handler.go new file mode 100644 index 000000000..9d704239f --- /dev/null +++ b/services/public/object/handler.go @@ -0,0 +1,109 @@ +package object + +import ( + "context" + "fmt" + + "github.com/nspcc-dev/neofs-api-go/object" +) + +type ( + // requestHandler is an interface of Object service cross-request handler. + requestHandler interface { + // Handles request by parameter-bound logic. + handleRequest(context.Context, handleRequestParams) (interface{}, error) + } + + handleRequestParams struct { + // Processing request. + request serviceRequest + + // Processing request executor. + executor requestHandleExecutor + } + + // coreRequestHandler is an implementation of requestHandler interface used in Object service production. + coreRequestHandler struct { + // Request preprocessor. + preProc requestPreProcessor + + // Request postprocessor. + postProc requestPostProcessor + } + + // requestHandleExecutor is an interface of universal Object operation executor. + requestHandleExecutor interface { + // Executes actions parameter-bound logic and returns execution result. + executeRequest(context.Context, serviceRequest) (interface{}, error) + } +) + +var _ requestHandler = (*coreRequestHandler)(nil) + +// requestHandler method implementation. +// +// If internal requestPreProcessor returns non-nil error for request argument, it returns. +// Otherwise, requestHandleExecutor argument performs actions. Received error is passed to requestPoistProcessor routine. +// Returned results of requestHandleExecutor are return. +func (s *coreRequestHandler) handleRequest(ctx context.Context, p handleRequestParams) (interface{}, error) { + if err := s.preProc.preProcess(ctx, p.request); err != nil { + return nil, err + } + + res, err := p.executor.executeRequest(ctx, p.request) + + go s.postProc.postProcess(ctx, p.request, err) + + return res, err +} + +// TODO: separate executors for each operation +// requestHandleExecutor method implementation. +func (s *objectService) executeRequest(ctx context.Context, req serviceRequest) (interface{}, error) { + switch r := req.(type) { + case *object.SearchRequest: + return s.objSearcher.searchObjects(ctx, &transportRequest{ + serviceRequest: r, + timeout: s.pSrch.Timeout, + }) + case *putRequest: + addr, err := s.objStorer.putObject(ctx, r) + if err != nil { + return nil, err + } + + resp := makePutResponse(*addr) + if err := s.respPreparer.prepareResponse(ctx, r.PutRequest, resp); err != nil { + return nil, err + } + + return nil, r.srv.SendAndClose(resp) + case *object.DeleteRequest: + return nil, s.objRemover.delete(ctx, &transportRequest{ + serviceRequest: r, + timeout: s.pDel.Timeout, + }) + case *object.GetRequest: + return s.objRecv.getObject(ctx, &transportRequest{ + serviceRequest: r, + timeout: s.pGet.Timeout, + }) + case *object.HeadRequest: + return s.objRecv.getObject(ctx, &transportRequest{ + serviceRequest: r, + timeout: s.pHead.Timeout, + }) + case *GetRangeRequest: + return s.payloadRngRecv.getRangeData(ctx, &transportRequest{ + serviceRequest: r, + timeout: s.pRng.Timeout, + }) + case *object.GetRangeHashRequest: + return s.rngRecv.getRange(ctx, &transportRequest{ + serviceRequest: r, + timeout: s.pRng.Timeout, + }) + default: + panic(fmt.Sprintf(pmWrongRequestType, r)) + } +} diff --git a/services/public/object/handler_test.go b/services/public/object/handler_test.go new file mode 100644 index 000000000..abc0c7ce0 --- /dev/null +++ b/services/public/object/handler_test.go @@ -0,0 +1,442 @@ +package object + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "testing" + "time" + + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/transport" + "github.com/stretchr/testify/require" +) + +type ( + // Entity for mocking interfaces. + // Implementation of any interface intercepts arguments via f (if not nil). + // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. + testHandlerEntity struct { + // Set of interfaces which testCommonEntity must implement, but some methods from those does not call. + serviceRequest + object.Service_PutServer + + // Argument interceptor. Used for ascertain of correct parameter passage between components. + f func(...interface{}) + // Mocked result of any interface. + res interface{} + // Mocked error of any interface. + err error + } +) + +var ( + _ requestPreProcessor = (*testHandlerEntity)(nil) + _ requestPostProcessor = (*testHandlerEntity)(nil) + _ requestHandleExecutor = (*testHandlerEntity)(nil) + _ objectSearcher = (*testHandlerEntity)(nil) + _ objectStorer = (*testHandlerEntity)(nil) + _ object.Service_PutServer = (*testHandlerEntity)(nil) + _ objectRemover = (*testHandlerEntity)(nil) + _ objectReceiver = (*testHandlerEntity)(nil) + _ objectRangeReceiver = (*testHandlerEntity)(nil) + _ payloadRangeReceiver = (*testHandlerEntity)(nil) + _ responsePreparer = (*testHandlerEntity)(nil) +) + +func (s *testHandlerEntity) prepareResponse(_ context.Context, req serviceRequest, resp serviceResponse) error { + if s.f != nil { + s.f(req, resp) + } + return s.err +} + +func (s *testHandlerEntity) getRangeData(_ context.Context, info transport.RangeInfo, l ...Object) (io.Reader, error) { + if s.f != nil { + s.f(info, l) + } + if s.err != nil { + return nil, s.err + } + return s.res.(io.Reader), nil +} + +func (s *testHandlerEntity) getRange(_ context.Context, r rangeTool) (interface{}, error) { + if s.f != nil { + s.f(r) + } + return s.res, s.err +} + +func (s *testHandlerEntity) getObject(_ context.Context, r ...transport.GetInfo) (*objectData, error) { + if s.f != nil { + s.f(r) + } + if s.err != nil { + return nil, s.err + } + return s.res.(*objectData), nil +} + +func (s *testHandlerEntity) delete(_ context.Context, r deleteInfo) error { + if s.f != nil { + s.f(r) + } + return s.err +} + +func (s *testHandlerEntity) SendAndClose(r *object.PutResponse) error { + if s.f != nil { + s.f(r) + } + return s.err +} + +func (s *testHandlerEntity) putObject(_ context.Context, r transport.PutInfo) (*Address, error) { + if s.f != nil { + s.f(r) + } + if s.err != nil { + return nil, s.err + } + return s.res.(*Address), nil +} + +func (s *testHandlerEntity) searchObjects(_ context.Context, r transport.SearchInfo) ([]Address, error) { + if s.f != nil { + s.f(r) + } + if s.err != nil { + return nil, s.err + } + return s.res.([]Address), nil +} + +func (s *testHandlerEntity) preProcess(_ context.Context, req serviceRequest) error { + if s.f != nil { + s.f(req) + } + return s.err +} + +func (s *testHandlerEntity) postProcess(_ context.Context, req serviceRequest, e error) { + if s.f != nil { + s.f(req, e) + } +} + +func (s *testHandlerEntity) executeRequest(_ context.Context, req serviceRequest) (interface{}, error) { + if s.f != nil { + s.f(req) + } + return s.res, s.err +} + +func TestCoreRequestHandler_HandleRequest(t *testing.T) { + ctx := context.TODO() + + // create custom serviceRequest + req := new(testHandlerEntity) + + t.Run("pre processor error", func(t *testing.T) { + // create custom error + pErr := internal.Error("test error for pre-processor") + + s := &coreRequestHandler{ + preProc: &testHandlerEntity{ + f: func(items ...interface{}) { + t.Run("correct pre processor params", func(t *testing.T) { + require.Equal(t, req, items[0].(serviceRequest)) + }) + }, + err: pErr, // force requestPreProcessor to return pErr + }, + } + + res, err := s.handleRequest(ctx, handleRequestParams{request: req}) + + // ascertain that error returns as expected + require.EqualError(t, err, pErr.Error()) + + // ascertain that nil result returns as expected + require.Nil(t, res) + }) + + t.Run("correct behavior", func(t *testing.T) { + // create custom error + eErr := internal.Error("test error for request executor") + + // create custom result + eRes := testData(t, 10) + + // create channel for requestPostProcessor + ch := make(chan struct{}) + + executor := &testHandlerEntity{ + f: func(items ...interface{}) { + t.Run("correct executor params", func(t *testing.T) { + require.Equal(t, req, items[0].(serviceRequest)) + }) + }, + res: eRes, // force requestHandleExecutor to return created result + err: eErr, // force requestHandleExecutor to return created error + } + + s := &coreRequestHandler{ + preProc: &testHandlerEntity{ + err: nil, // force requestPreProcessor to return nil error + }, + postProc: &testHandlerEntity{ + f: func(items ...interface{}) { + t.Run("correct pre processor params", func(t *testing.T) { + require.Equal(t, req, items[0].(serviceRequest)) + require.Equal(t, eErr, items[1].(error)) + }) + ch <- struct{}{} // write to channel + }, + }, + } + + res, err := s.handleRequest(ctx, handleRequestParams{ + request: req, + executor: executor, + }) + + // ascertain that results return as expected + require.EqualError(t, err, eErr.Error()) + require.Equal(t, eRes, res) + + <-ch // read from channel + }) +} + +func Test_objectService_executeRequest(t *testing.T) { + ctx := context.TODO() + + t.Run("invalid request", func(t *testing.T) { + req := new(testHandlerEntity) + require.PanicsWithValue(t, fmt.Sprintf(pmWrongRequestType, req), func() { + _, _ = new(objectService).executeRequest(ctx, req) + }) + }) + + t.Run("search request", func(t *testing.T) { + var ( + timeout = 3 * time.Second + req = &object.SearchRequest{ContainerID: testObjectAddress(t).CID} + addrList = testAddrList(t, 3) + ) + + s := &objectService{ + pSrch: OperationParams{Timeout: timeout}, + objSearcher: &testHandlerEntity{ + f: func(items ...interface{}) { + require.Equal(t, &transportRequest{ + serviceRequest: req, + timeout: timeout, + }, items[0]) + }, + res: addrList, + }, + } + + res, err := s.executeRequest(ctx, req) + require.NoError(t, err) + require.Equal(t, addrList, res) + }) + + t.Run("put request", func(t *testing.T) { + t.Run("storer error", func(t *testing.T) { + sErr := internal.Error("test error for object storer") + + req := &putRequest{ + PutRequest: new(object.PutRequest), + srv: new(testHandlerEntity), + timeout: 3 * time.Second, + } + + s := &objectService{ + objStorer: &testHandlerEntity{ + f: func(items ...interface{}) { + require.Equal(t, req, items[0]) + }, + err: sErr, + }, + respPreparer: &testHandlerEntity{ + res: serviceResponse(nil), + }, + } + + _, err := s.executeRequest(ctx, req) + require.EqualError(t, err, sErr.Error()) + }) + + t.Run("correct result", func(t *testing.T) { + addr := testObjectAddress(t) + + srvErr := internal.Error("test error for stream server") + + resp := &object.PutResponse{Address: addr} + + pReq := new(object.PutRequest) + + s := &objectService{ + objStorer: &testHandlerEntity{ + res: &addr, + }, + respPreparer: &testHandlerEntity{ + f: func(items ...interface{}) { + require.Equal(t, pReq, items[0]) + require.Equal(t, makePutResponse(addr), items[1]) + }, + res: resp, + }, + } + + req := &putRequest{ + PutRequest: pReq, + srv: &testHandlerEntity{ + f: func(items ...interface{}) { + require.Equal(t, resp, items[0]) + }, + err: srvErr, + }, + } + + res, err := s.executeRequest(ctx, req) + require.EqualError(t, err, srvErr.Error()) + require.Nil(t, res) + }) + }) + + t.Run("delete request", func(t *testing.T) { + var ( + timeout = 3 * time.Second + dErr = internal.Error("test error for object remover") + req = &object.DeleteRequest{Address: testObjectAddress(t)} + ) + + s := &objectService{ + objRemover: &testHandlerEntity{ + f: func(items ...interface{}) { + require.Equal(t, &transportRequest{ + serviceRequest: req, + timeout: timeout, + }, items[0]) + }, + err: dErr, + }, + pDel: OperationParams{Timeout: timeout}, + } + + res, err := s.executeRequest(ctx, req) + require.EqualError(t, err, dErr.Error()) + require.Nil(t, res) + }) + + t.Run("get request", func(t *testing.T) { + var ( + timeout = 3 * time.Second + obj = &objectData{Object: &Object{Payload: testData(t, 10)}} + req = &object.GetRequest{Address: testObjectAddress(t)} + ) + + s := &objectService{ + objRecv: &testHandlerEntity{ + f: func(items ...interface{}) { + require.Equal(t, []transport.GetInfo{&transportRequest{ + serviceRequest: req, + timeout: timeout, + }}, items[0]) + }, + res: obj, + }, + pGet: OperationParams{Timeout: timeout}, + } + + res, err := s.executeRequest(ctx, req) + require.NoError(t, err) + require.Equal(t, obj, res) + }) + + t.Run("head request", func(t *testing.T) { + var ( + timeout = 3 * time.Second + hErr = internal.Error("test error for head receiver") + req = &object.HeadRequest{Address: testObjectAddress(t)} + ) + + s := &objectService{ + objRecv: &testHandlerEntity{ + f: func(items ...interface{}) { + require.Equal(t, []transport.GetInfo{&transportRequest{ + serviceRequest: req, + timeout: timeout, + }}, items[0]) + }, + err: hErr, + }, + pHead: OperationParams{Timeout: timeout}, + } + + _, err := s.executeRequest(ctx, req) + require.EqualError(t, err, hErr.Error()) + }) + + t.Run("range requests", func(t *testing.T) { + t.Run("data", func(t *testing.T) { + var ( + timeout = 3 * time.Second + rData = testData(t, 10) + req = &GetRangeRequest{Address: testObjectAddress(t)} + ) + + s := &objectService{ + payloadRngRecv: &testHandlerEntity{ + f: func(items ...interface{}) { + require.Equal(t, &transportRequest{ + serviceRequest: req, + timeout: timeout, + }, items[0]) + require.Empty(t, items[1]) + }, + res: bytes.NewReader(rData), + }, + pRng: OperationParams{Timeout: timeout}, + } + + res, err := s.executeRequest(ctx, req) + require.NoError(t, err) + d, err := ioutil.ReadAll(res.(io.Reader)) + require.NoError(t, err) + require.Equal(t, rData, d) + }) + + t.Run("hashes", func(t *testing.T) { + var ( + timeout = 3 * time.Second + rErr = internal.Error("test error for range receiver") + req = &object.GetRangeHashRequest{Address: testObjectAddress(t)} + ) + + s := &objectService{ + rngRecv: &testHandlerEntity{ + f: func(items ...interface{}) { + require.Equal(t, &transportRequest{ + serviceRequest: req, + timeout: timeout, + }, items[0]) + }, + err: rErr, + }, + pRng: OperationParams{Timeout: timeout}, + } + + _, err := s.executeRequest(ctx, req) + require.EqualError(t, err, rErr.Error()) + }) + }) +} diff --git a/services/public/object/head.go b/services/public/object/head.go new file mode 100644 index 000000000..2eb89ce43 --- /dev/null +++ b/services/public/object/head.go @@ -0,0 +1,640 @@ +package object + +import ( + "context" + "fmt" + "io" + "sync" + "time" + + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/nspcc-dev/neofs-node/lib/objio" + "github.com/nspcc-dev/neofs-node/lib/transformer" + "github.com/nspcc-dev/neofs-node/lib/transport" + "github.com/panjf2000/ants/v2" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +type ( + objectData struct { + *Object + payload io.Reader + } + + objectReceiver interface { + getObject(context.Context, ...transport.GetInfo) (*objectData, error) + } + + rangeDataReceiver interface { + recvData(context.Context, transport.RangeInfo, io.Writer) error + } + + rangeReaderAccumulator interface { + responseItemHandler + rangeData() io.Reader + } + + rangeRdrAccum struct { + *sync.Once + r io.Reader + } + + straightRangeDataReceiver struct { + executor operationExecutor + } + + coreObjectReceiver struct { + straightObjRecv objectReceiver + childLister objectChildrenLister + ancestralRecv ancestralObjectsReceiver + + log *zap.Logger + } + + straightObjectReceiver struct { + executor operationExecutor + } + + objectRewinder interface { + rewind(context.Context, ...Object) (*Object, error) + } + + payloadPartReceiver interface { + recvPayload(context.Context, []transport.RangeInfo) (io.Reader, error) + } + + corePayloadPartReceiver struct { + rDataRecv rangeDataReceiver + windowController slidingWindowController + } + + slidingWindowController interface { + newWindow() (WorkerPool, error) + } + + simpleWindowController struct { + windowSize int + } + + coreObjectRewinder struct { + transformer transformer.ObjectRestorer + } + + objectAccumulator interface { + responseItemHandler + object() *Object + } + + coreObjAccum struct { + *sync.Once + obj *Object + } + + rawGetInfo struct { + *rawAddrInfo + } + + rawHeadInfo struct { + rawGetInfo + fullHeaders bool + } + + childrenReceiver interface { + getChildren(context.Context, Address, []ID) ([]Object, error) + } + + coreChildrenReceiver struct { + coreObjRecv objectReceiver + timeout time.Duration + } + + payloadRangeReceiver interface { + getRangeData(context.Context, transport.RangeInfo, ...Object) (io.Reader, error) + } + + corePayloadRangeReceiver struct { + chopTable objio.ChopperTable + relRecv objio.RelativeReceiver + payloadRecv payloadPartReceiver + + // Set of errors that won't be converted to errPayloadRangeNotFound + mErr map[error]struct{} + + log *zap.Logger + } + + ancestralObjectsReceiver interface { + getFromChildren(context.Context, Address, []ID, bool) (*objectData, error) + } + + coreAncestralReceiver struct { + childrenRecv childrenReceiver + objRewinder objectRewinder + pRangeRecv payloadRangeReceiver + timeout time.Duration + } + + emptyReader struct{} +) + +const ( + emHeadRecvFail = "could not receive %d of %d object head" + + childrenNotFound = internal.Error("could not find child objects") + errNonAssembly = internal.Error("node is not capable to assemble the object") +) + +var ( + _ objectReceiver = (*straightObjectReceiver)(nil) + _ objectReceiver = (*coreObjectReceiver)(nil) + _ objectRewinder = (*coreObjectRewinder)(nil) + _ objectAccumulator = (*coreObjAccum)(nil) + _ transport.HeadInfo = (*transportRequest)(nil) + _ transport.HeadInfo = (*rawHeadInfo)(nil) + _ transport.GetInfo = (*transportRequest)(nil) + _ transport.GetInfo = (*rawGetInfo)(nil) + + _ payloadPartReceiver = (*corePayloadPartReceiver)(nil) + + _ ancestralObjectsReceiver = (*coreAncestralReceiver)(nil) + + _ childrenReceiver = (*coreChildrenReceiver)(nil) + + _ payloadRangeReceiver = (*corePayloadRangeReceiver)(nil) + + _ rangeDataReceiver = (*straightRangeDataReceiver)(nil) + + _ slidingWindowController = (*simpleWindowController)(nil) + + _ io.Reader = (*emptyReader)(nil) + + _ rangeReaderAccumulator = (*rangeRdrAccum)(nil) +) + +func (s *objectService) Head(ctx context.Context, req *object.HeadRequest) (res *object.HeadResponse, err error) { + defer func() { + if r := recover(); r != nil { + s.log.Error(panicLogMsg, + zap.Stringer("request", object.RequestHead), + zap.Any("reason", r), + ) + + err = errServerPanic + } + + err = s.statusCalculator.make(requestError{ + t: object.RequestHead, + e: err, + }) + }() + + var r interface{} + + if r, err = s.requestHandler.handleRequest(ctx, handleRequestParams{ + request: req, + executor: s, + }); err != nil { + return + } + + obj := r.(*objectData).Object + if !req.FullHeaders { + obj.Headers = nil + } + + res = makeHeadResponse(obj) + err = s.respPreparer.prepareResponse(ctx, req, res) + + return res, err +} + +func (s *coreObjectReceiver) getObject(ctx context.Context, info ...transport.GetInfo) (*objectData, error) { + var ( + childCount int + children []ID + ) + + obj, err := s.straightObjRecv.getObject(ctx, s.sendingRequest(info[0])) + + if info[0].GetRaw() { + return obj, err + } else if err == nil { + children = obj.Links(object.Link_Child) + if childCount = len(children); childCount <= 0 { + return obj, nil + } + } + + if s.ancestralRecv == nil { + return nil, errNonAssembly + } + + ctx = contextWithValues(ctx, + transformer.PublicSessionToken, info[0].GetSessionToken(), + implementations.BearerToken, info[0].GetBearerToken(), + implementations.ExtendedHeaders, info[0].ExtendedHeaders(), + ) + + if childCount <= 0 { + if children = s.childLister.children(ctx, info[0].GetAddress()); len(children) == 0 { + return nil, childrenNotFound + } + } + + res, err := s.ancestralRecv.getFromChildren(ctx, info[0].GetAddress(), children, info[0].Type() == object.RequestHead) + if err != nil { + s.log.Error("could not get object from children", + zap.String("error", err.Error()), + ) + + return nil, errIncompleteOperation + } + + return res, nil +} + +func (s *coreObjectReceiver) sendingRequest(src transport.GetInfo) transport.GetInfo { + if s.ancestralRecv == nil || src.GetRaw() { + return src + } + + getInfo := *newRawGetInfo() + getInfo.setTimeout(src.GetTimeout()) + getInfo.setAddress(src.GetAddress()) + getInfo.setRaw(true) + getInfo.setSessionToken(src.GetSessionToken()) + getInfo.setBearerToken(src.GetBearerToken()) + getInfo.setExtendedHeaders(src.ExtendedHeaders()) + getInfo.setTTL( + maxu32( + src.GetTTL(), + service.NonForwardingTTL, + ), + ) + + if src.Type() == object.RequestHead { + headInfo := newRawHeadInfo() + headInfo.setGetInfo(getInfo) + headInfo.setFullHeaders(true) + + return headInfo + } + + return getInfo +} + +func (s *coreAncestralReceiver) getFromChildren(ctx context.Context, addr Address, children []ID, head bool) (*objectData, error) { + var ( + err error + childObjs []Object + res = new(objectData) + ) + + if childObjs, err = s.childrenRecv.getChildren(ctx, addr, children); err != nil { + return nil, err + } else if res.Object, err = s.objRewinder.rewind(ctx, childObjs...); err != nil { + return nil, err + } + + if head { + return res, nil + } + + rngInfo := newRawRangeInfo() + rngInfo.setTTL(service.NonForwardingTTL) + rngInfo.setTimeout(s.timeout) + rngInfo.setAddress(addr) + rngInfo.setSessionToken(tokenFromContext(ctx)) + rngInfo.setBearerToken(bearerFromContext(ctx)) + rngInfo.setExtendedHeaders(extendedHeadersFromContext(ctx)) + rngInfo.setRange(Range{ + Length: res.SystemHeader.PayloadLength, + }) + + res.payload, err = s.pRangeRecv.getRangeData(ctx, rngInfo, childObjs...) + + return res, err +} + +func (s *corePayloadRangeReceiver) getRangeData(ctx context.Context, info transport.RangeInfo, selection ...Object) (res io.Reader, err error) { + defer func() { + if err != nil { + if _, ok := s.mErr[errors.Cause(err)]; !ok { + s.log.Error("get payload range data failure", + zap.String("error", err.Error()), + ) + + err = errPayloadRangeNotFound + } + } + }() + + var ( + chopper RangeChopper + addr = info.GetAddress() + ) + + chopper, err = s.chopTable.GetChopper(addr, objio.RCCharybdis) + if err != nil || !chopper.Closed() { + if len(selection) == 0 { + if chopper, err = s.chopTable.GetChopper(addr, objio.RCScylla); err != nil { + if chopper, err = objio.NewScylla(&objio.ChopperParams{ + RelativeReceiver: s.relRecv, + Addr: addr, + }); err != nil { + return + } + } + } else { + rs := make([]RangeDescriptor, 0, len(selection)) + for i := range selection { + rs = append(rs, RangeDescriptor{ + Size: int64(selection[i].SystemHeader.PayloadLength), + Addr: *selection[i].Address(), + + LeftBound: i == 0, + RightBound: i == len(selection)-1, + }) + } + + if chopper, err = objio.NewCharybdis(&objio.CharybdisParams{ + Addr: addr, + ReadySelection: rs, + }); err != nil { + return + } + } + } + + _ = s.chopTable.PutChopper(addr, chopper) + + r := info.GetRange() + + ctx = contextWithValues(ctx, + transformer.PublicSessionToken, info.GetSessionToken(), + implementations.BearerToken, info.GetBearerToken(), + implementations.ExtendedHeaders, info.ExtendedHeaders(), + ) + + var rList []RangeDescriptor + + if rList, err = chopper.Chop(ctx, int64(r.Length), int64(r.Offset), true); err != nil { + return + } + + return s.payloadRecv.recvPayload(ctx, newRangeInfoList(info, rList)) +} + +func newRangeInfoList(src transport.RangeInfo, rList []RangeDescriptor) []transport.RangeInfo { + var infoList []transport.RangeInfo + if l := len(rList); l == 1 && src.GetAddress().Equal(&rList[0].Addr) { + infoList = []transport.RangeInfo{src} + } else { + infoList = make([]transport.RangeInfo, 0, l) + for i := range rList { + rngInfo := newRawRangeInfo() + + rngInfo.setTTL(src.GetTTL()) + rngInfo.setTimeout(src.GetTimeout()) + rngInfo.setAddress(rList[i].Addr) + rngInfo.setSessionToken(src.GetSessionToken()) + rngInfo.setBearerToken(src.GetBearerToken()) + rngInfo.setExtendedHeaders(src.ExtendedHeaders()) + rngInfo.setRange(Range{ + Offset: uint64(rList[i].Offset), + Length: uint64(rList[i].Size), + }) + + infoList = append(infoList, rngInfo) + } + } + + return infoList +} + +func (s *corePayloadPartReceiver) recvPayload(ctx context.Context, rList []transport.RangeInfo) (io.Reader, error) { + pool, err := s.windowController.newWindow() + if err != nil { + return nil, err + } + + var ( + readers = make([]io.Reader, 0, len(rList)) + writers = make([]*io.PipeWriter, 0, len(rList)) + ) + + for range rList { + r, w := io.Pipe() + readers = append(readers, r) + writers = append(writers, w) + } + + ctx, cancel := context.WithCancel(ctx) + + go func() { + for i := range rList { + select { + case <-ctx.Done(): + return + default: + } + + rd, w := rList[i], writers[i] + + if err := pool.Submit(func() { + err := s.rDataRecv.recvData(ctx, rd, w) + if err != nil { + cancel() + } + _ = w.CloseWithError(err) + }); err != nil { + _ = w.CloseWithError(err) + + cancel() + + break + } + } + }() + + return io.MultiReader(readers...), nil +} + +func (s *simpleWindowController) newWindow() (WorkerPool, error) { return ants.NewPool(s.windowSize) } + +func (s *straightRangeDataReceiver) recvData(ctx context.Context, info transport.RangeInfo, w io.Writer) error { + rAccum := newRangeReaderAccumulator() + err := s.executor.executeOperation(ctx, info, rAccum) + + if err == nil { + _, err = io.Copy(w, rAccum.rangeData()) + } + + return err +} + +func maxu32(a, b uint32) uint32 { + if a > b { + return a + } + + return b +} + +func (s *straightObjectReceiver) getObject(ctx context.Context, info ...transport.GetInfo) (*objectData, error) { + accum := newObjectAccumulator() + if err := s.executor.executeOperation(ctx, info[0], accum); err != nil { + return nil, err + } + + return &objectData{ + Object: accum.object(), + payload: new(emptyReader), + }, nil +} + +func (s *coreChildrenReceiver) getChildren(ctx context.Context, parent Address, children []ID) ([]Object, error) { + objList := make([]Object, 0, len(children)) + + headInfo := newRawHeadInfo() + headInfo.setTTL(service.NonForwardingTTL) + headInfo.setTimeout(s.timeout) + headInfo.setFullHeaders(true) + headInfo.setSessionToken(tokenFromContext(ctx)) + headInfo.setBearerToken(bearerFromContext(ctx)) + headInfo.setExtendedHeaders(extendedHeadersFromContext(ctx)) + + for i := range children { + headInfo.setAddress(Address{ + ObjectID: children[i], + CID: parent.CID, + }) + + obj, err := s.coreObjRecv.getObject(ctx, headInfo) + if err != nil { + return nil, errors.Errorf(emHeadRecvFail, i+1, len(children)) + } + + objList = append(objList, *obj.Object) + } + + return transformer.GetChain(objList...) +} + +func tokenFromContext(ctx context.Context) service.SessionToken { + if v, ok := ctx.Value(transformer.PublicSessionToken).(service.SessionToken); ok { + return v + } + + return nil +} + +func bearerFromContext(ctx context.Context) service.BearerToken { + if v, ok := ctx.Value(implementations.BearerToken).(service.BearerToken); ok { + return v + } + + return nil +} + +func extendedHeadersFromContext(ctx context.Context) []service.ExtendedHeader { + if v, ok := ctx.Value(implementations.ExtendedHeaders).([]service.ExtendedHeader); ok { + return v + } + + return nil +} + +func (s *coreObjectRewinder) rewind(ctx context.Context, objs ...Object) (*Object, error) { + objList, err := s.transformer.Restore(ctx, objs...) + if err != nil { + return nil, err + } + + return &objList[0], nil +} + +func (s *coreObjAccum) handleItem(v interface{}) { s.Do(func() { s.obj = v.(*Object) }) } + +func (s *coreObjAccum) object() *Object { return s.obj } + +func newObjectAccumulator() objectAccumulator { return &coreObjAccum{Once: new(sync.Once)} } + +func (s *rawGetInfo) getAddrInfo() *rawAddrInfo { + return s.rawAddrInfo +} + +func (s *rawGetInfo) setAddrInfo(v *rawAddrInfo) { + s.rawAddrInfo = v + s.setType(object.RequestGet) +} + +func newRawGetInfo() *rawGetInfo { + res := new(rawGetInfo) + + res.setAddrInfo(newRawAddressInfo()) + + return res +} + +func (s rawHeadInfo) GetFullHeaders() bool { + return s.fullHeaders +} + +func (s *rawHeadInfo) setFullHeaders(v bool) { + s.fullHeaders = v +} + +func (s rawHeadInfo) getGetInfo() rawGetInfo { + return s.rawGetInfo +} + +func (s *rawHeadInfo) setGetInfo(v rawGetInfo) { + s.rawGetInfo = v + s.setType(object.RequestHead) +} + +func newRawHeadInfo() *rawHeadInfo { + res := new(rawHeadInfo) + + res.setGetInfo(*newRawGetInfo()) + + return res +} + +func (s *transportRequest) GetAddress() Address { + switch t := s.serviceRequest.(type) { + case *object.HeadRequest: + return t.Address + case *GetRangeRequest: + return t.Address + case *object.GetRangeHashRequest: + return t.Address + case *object.DeleteRequest: + return t.Address + case *object.GetRequest: + return t.Address + default: + panic(fmt.Sprintf(pmWrongRequestType, t)) + } +} + +func (s *transportRequest) GetFullHeaders() bool { + return s.serviceRequest.(*object.HeadRequest).GetFullHeaders() +} + +func (s *transportRequest) Raw() bool { + return s.serviceRequest.GetRaw() +} + +func (s *emptyReader) Read([]byte) (int, error) { return 0, io.EOF } + +func newRangeReaderAccumulator() rangeReaderAccumulator { return &rangeRdrAccum{Once: new(sync.Once)} } + +func (s *rangeRdrAccum) rangeData() io.Reader { return s.r } + +func (s *rangeRdrAccum) handleItem(r interface{}) { s.Do(func() { s.r = r.(io.Reader) }) } diff --git a/services/public/object/head_test.go b/services/public/object/head_test.go new file mode 100644 index 000000000..fcf2be7bb --- /dev/null +++ b/services/public/object/head_test.go @@ -0,0 +1,595 @@ +package object + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/transformer" + "github.com/nspcc-dev/neofs-node/lib/transport" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +type ( + // Entity for mocking interfaces. + // Implementation of any interface intercepts arguments via f (if not nil). + // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. + testHeadEntity struct { + // Set of interfaces which entity must implement, but some methods from those does not call. + transformer.ObjectRestorer + + // Argument interceptor. Used for ascertain of correct parameter passage between components. + f func(...interface{}) + // Mocked result of any interface. + res interface{} + // Mocked error of any interface. + err error + } +) + +var ( + _ ancestralObjectsReceiver = (*testHeadEntity)(nil) + _ objectChildrenLister = (*testHeadEntity)(nil) + _ objectReceiver = (*testHeadEntity)(nil) + _ requestHandler = (*testHeadEntity)(nil) + _ operationExecutor = (*testHeadEntity)(nil) + _ objectRewinder = (*testHeadEntity)(nil) + _ transformer.ObjectRestorer = (*testHeadEntity)(nil) + _ responsePreparer = (*testHeadEntity)(nil) +) + +func (s *testHeadEntity) prepareResponse(_ context.Context, req serviceRequest, resp serviceResponse) error { + if s.f != nil { + s.f(req, resp) + } + return s.err +} + +func (s *testHeadEntity) getFromChildren(ctx context.Context, addr Address, ids []ID, h bool) (*objectData, error) { + if s.f != nil { + s.f(addr, ids, h, ctx) + } + if s.err != nil { + return nil, s.err + } + return s.res.(*objectData), nil +} + +func (s *testHeadEntity) Restore(_ context.Context, objs ...Object) ([]Object, error) { + if s.f != nil { + s.f(objs) + } + if s.err != nil { + return nil, s.err + } + return s.res.([]Object), nil +} + +func (s *testHeadEntity) rewind(ctx context.Context, objs ...Object) (*Object, error) { + if s.f != nil { + s.f(objs) + } + return s.res.(*Object), s.err +} + +func (s *testHeadEntity) executeOperation(_ context.Context, i transport.MetaInfo, h responseItemHandler) error { + if s.f != nil { + s.f(i, h) + } + return s.err +} + +func (s *testHeadEntity) children(ctx context.Context, addr Address) []ID { + if s.f != nil { + s.f(addr, ctx) + } + return s.res.([]ID) +} + +func (s *testHeadEntity) getObject(_ context.Context, p ...transport.GetInfo) (*objectData, error) { + if s.f != nil { + s.f(p) + } + if s.err != nil { + return nil, s.err + } + return s.res.(*objectData), nil +} + +func (s *testHeadEntity) handleRequest(_ context.Context, p handleRequestParams) (interface{}, error) { + if s.f != nil { + s.f(p) + } + return s.res, s.err +} + +func Test_transportRequest_HeadInfo(t *testing.T) { + t.Run("address", func(t *testing.T) { + t.Run("valid request", func(t *testing.T) { + addr := testObjectAddress(t) + + reqs := []transportRequest{ + {serviceRequest: &object.HeadRequest{Address: addr}}, + {serviceRequest: &object.GetRequest{Address: addr}}, + {serviceRequest: &GetRangeRequest{Address: addr}}, + {serviceRequest: &object.GetRangeHashRequest{Address: addr}}, + {serviceRequest: &object.DeleteRequest{Address: addr}}, + } + + for i := range reqs { + require.Equal(t, addr, reqs[i].GetAddress()) + } + }) + + t.Run("unknown request", func(t *testing.T) { + req := new(object.SearchRequest) + + r := &transportRequest{ + serviceRequest: req, + } + + require.PanicsWithValue(t, fmt.Sprintf(pmWrongRequestType, req), func() { + _ = r.GetAddress() + }) + }) + }) + + t.Run("full headers", func(t *testing.T) { + r := &transportRequest{ + serviceRequest: &object.HeadRequest{ + FullHeaders: true, + }, + } + + require.True(t, r.GetFullHeaders()) + }) + + t.Run("raw", func(t *testing.T) { + hReq := new(object.HeadRequest) + hReq.SetRaw(true) + + r := &transportRequest{ + serviceRequest: hReq, + } + require.True(t, r.Raw()) + + hReq.SetRaw(false) + require.False(t, r.Raw()) + }) +} + +func Test_rawHeadInfo(t *testing.T) { + t.Run("address", func(t *testing.T) { + addr := testObjectAddress(t) + + r := newRawHeadInfo() + r.setAddress(addr) + + require.Equal(t, addr, r.GetAddress()) + }) + + t.Run("full headers", func(t *testing.T) { + r := newRawHeadInfo() + r.setFullHeaders(true) + + require.True(t, r.GetFullHeaders()) + }) +} + +func Test_coreObjAccum(t *testing.T) { + t.Run("new", func(t *testing.T) { + s := newObjectAccumulator() + v := s.(*coreObjAccum) + require.Nil(t, v.obj) + require.NotNil(t, v.Once) + }) + + t.Run("handle/object", func(t *testing.T) { + obj1 := new(Object) + + s := newObjectAccumulator() + + // add first object + s.handleItem(obj1) + + // ascertain tha object was added + require.Equal(t, obj1, s.object()) + + obj2 := new(Object) + + // add second object + s.handleItem(obj2) + + // ascertain that second object was ignored + require.Equal(t, obj1, s.object()) + }) +} + +func Test_objectService_Head(t *testing.T) { + ctx := context.TODO() + + t.Run("request handler error", func(t *testing.T) { + // create custom error for test + rhErr := internal.Error("test error for request handler") + + // create custom request for test + req := new(object.HeadRequest) + + s := &objectService{ + statusCalculator: newStatusCalculator(), + } + + s.requestHandler = &testHeadEntity{ + f: func(items ...interface{}) { + t.Run("correct request handler params", func(t *testing.T) { + p := items[0].(handleRequestParams) + require.Equal(t, s, p.executor) + require.Equal(t, req, p.request) + }) + }, + err: rhErr, // force requestHandler to return rhErr + } + + res, err := s.Head(ctx, req) + require.EqualError(t, err, rhErr.Error()) + require.Nil(t, res) + }) + + t.Run("correct resulst", func(t *testing.T) { + obj := &objectData{Object: new(Object)} + + resp := &object.HeadResponse{Object: obj.Object} + + req := new(object.HeadRequest) + + s := &objectService{ + requestHandler: &testHeadEntity{ + res: obj, // force request handler to return obj + }, + respPreparer: &testHeadEntity{ + f: func(items ...interface{}) { + require.Equal(t, req, items[0]) + require.Equal(t, makeHeadResponse(obj.Object), items[1]) + }, + res: resp, + }, + + statusCalculator: newStatusCalculator(), + } + + res, err := s.Head(ctx, new(object.HeadRequest)) + require.NoError(t, err) + require.Equal(t, resp, res) + }) +} + +func Test_coreHeadReceiver_head(t *testing.T) { + ctx := context.TODO() + + t.Run("raw handling", func(t *testing.T) { + // create custom head info for test + hInfo := newRawHeadInfo() + hInfo.setRaw(true) + + // create custom error for test + srErr := internal.Error("test error for straight object receiver") + + s := &coreObjectReceiver{ + straightObjRecv: &testHeadEntity{ + err: srErr, // force straightObjectReceiver to return srErr + }, + } + + _, err := s.getObject(ctx, hInfo) + // ascertain that straightObjectReceiver result returns in raw case as expected + require.EqualError(t, err, srErr.Error()) + }) + + t.Run("straight receive of non-linking object", func(t *testing.T) { + // create custom head info for test + hInfo := newRawHeadInfo() + + // create object w/o children for test + obj := &objectData{Object: new(Object)} + + s := &coreObjectReceiver{ + straightObjRecv: &testHeadEntity{ + f: func(items ...interface{}) { + t.Run("correct straight receiver params", func(t *testing.T) { + require.Equal(t, []transport.GetInfo{hInfo}, items[0]) + }) + }, + res: obj, + }, + } + + res, err := s.getObject(ctx, hInfo) + require.NoError(t, err) + require.Equal(t, obj, res) + }) + + t.Run("linking object/non-assembly", func(t *testing.T) { + // create custom head info for test + hInfo := newRawHeadInfo() + + // create object w/ children for test + obj := &objectData{ + Object: &Object{Headers: []Header{{Value: &object.Header_Link{Link: &object.Link{Type: object.Link_Child}}}}}, + } + + s := &coreObjectReceiver{ + straightObjRecv: &testHeadEntity{ + res: obj, // force straightObjectReceiver to return obj + }, + ancestralRecv: nil, // make component to be non-assembly + } + + res, err := s.getObject(ctx, hInfo) + require.EqualError(t, err, errNonAssembly.Error()) + require.Nil(t, res) + }) + + t.Run("children search failure", func(t *testing.T) { + addr := testObjectAddress(t) + + hInfo := newRawHeadInfo() + hInfo.setAddress(addr) + hInfo.setSessionToken(new(service.Token)) + + s := &coreObjectReceiver{ + straightObjRecv: &testHeadEntity{ + err: internal.Error(""), // force straightObjectReceiver to return non-empty error + }, + childLister: &testHeadEntity{ + f: func(items ...interface{}) { + t.Run("correct child lister params", func(t *testing.T) { + require.Equal(t, addr, items[0]) + require.Equal(t, + hInfo.GetSessionToken(), + items[1].(context.Context).Value(transformer.PublicSessionToken), + ) + }) + }, + res: make([]ID, 0), // force objectChildren lister to return empty list + }, + ancestralRecv: new(testHeadEntity), + } + + res, err := s.getObject(ctx, hInfo) + require.EqualError(t, err, childrenNotFound.Error()) + require.Nil(t, res) + }) + + t.Run("correct result", func(t *testing.T) { + var ( + childCount = 5 + rErr = internal.Error("test error for rewinding receiver") + children = make([]ID, 0, childCount) + ) + + for i := 0; i < childCount; i++ { + id := testObjectAddress(t).ObjectID + children = append(children, id) + } + + // create custom head info + hInfo := newRawHeadInfo() + hInfo.setTTL(5) + hInfo.setTimeout(3 * time.Second) + hInfo.setAddress(testObjectAddress(t)) + hInfo.setSessionToken(new(service.Token)) + + t.Run("error/children from straight receiver", func(t *testing.T) { + obj := &objectData{Object: new(Object)} + + for i := range children { + // add child reference to object + obj.Headers = append(obj.Headers, Header{ + Value: &object.Header_Link{Link: &object.Link{Type: object.Link_Child, ID: children[i]}}, + }) + } + + s := &coreObjectReceiver{ + straightObjRecv: &testHeadEntity{ + res: obj, // force straight receiver to return obj + }, + ancestralRecv: &testHeadEntity{ + f: func(items ...interface{}) { + t.Run("correct rewinding receiver", func(t *testing.T) { + require.Equal(t, hInfo.GetAddress(), items[0]) + require.Equal(t, children, items[1]) + require.True(t, items[2].(bool)) + require.Equal(t, + hInfo.GetSessionToken(), + items[3].(context.Context).Value(transformer.PublicSessionToken), + ) + }) + }, + err: rErr, // force rewinding receiver to return rErr + }, + log: zap.L(), + } + + res, err := s.getObject(ctx, hInfo) + require.EqualError(t, err, errIncompleteOperation.Error()) + require.Nil(t, res) + }) + + t.Run("success/children from child lister", func(t *testing.T) { + obj := &objectData{Object: new(Object)} + + s := &coreObjectReceiver{ + straightObjRecv: &testHeadEntity{ + err: internal.Error(""), // force straight receiver to return non-nil error + }, + ancestralRecv: &testHeadEntity{ + f: func(items ...interface{}) { + t.Run("correct rewinding receiver", func(t *testing.T) { + require.Equal(t, hInfo.GetAddress(), items[0]) + require.Equal(t, children, items[1]) + require.True(t, items[2].(bool)) + }) + }, + res: obj, // force rewinding receiver to return obj + }, + childLister: &testHeadEntity{ + res: children, // force objectChildrenLister to return particular list + }, + } + + res, err := s.getObject(ctx, hInfo) + require.NoError(t, err, rErr.Error()) + require.Equal(t, obj, res) + }) + }) +} + +func Test_straightHeadReceiver_head(t *testing.T) { + ctx := context.TODO() + + hInfo := newRawHeadInfo() + hInfo.setFullHeaders(true) + + t.Run("executor error", func(t *testing.T) { + exErr := internal.Error("test error for operation executor") + + s := &straightObjectReceiver{ + executor: &testHeadEntity{ + f: func(items ...interface{}) { + t.Run("correct operation executor params", func(t *testing.T) { + require.Equal(t, hInfo, items[0]) + _ = items[1].(objectAccumulator) + }) + }, + err: exErr, // force operationExecutor to return exErr + }, + } + + _, err := s.getObject(ctx, hInfo) + require.EqualError(t, err, exErr.Error()) + + hInfo = newRawHeadInfo() + hInfo.setFullHeaders(true) + + _, err = s.getObject(ctx, hInfo) + require.EqualError(t, err, exErr.Error()) + }) + + t.Run("correct result", func(t *testing.T) { + obj := &objectData{Object: new(Object), payload: new(emptyReader)} + + s := &straightObjectReceiver{ + executor: &testHeadEntity{ + f: func(items ...interface{}) { + items[1].(objectAccumulator).handleItem(obj.Object) + }, + }, + } + + res, err := s.getObject(ctx, hInfo) + require.NoError(t, err) + require.Equal(t, obj, res) + }) +} + +func Test_coreObjectRewinder_rewind(t *testing.T) { + ctx := context.TODO() + + t.Run("transformer failure", func(t *testing.T) { + tErr := internal.Error("test error for object transformer") + objs := []Object{*new(Object), *new(Object)} + + s := &coreObjectRewinder{ + transformer: &testHeadEntity{ + f: func(items ...interface{}) { + t.Run("correct transformer params", func(t *testing.T) { + require.Equal(t, objs, items[0]) + }) + }, + err: tErr, // force transformer to return tErr + }, + } + + res, err := s.rewind(ctx, objs...) + require.EqualError(t, err, tErr.Error()) + require.Empty(t, res) + }) + + t.Run("correct result", func(t *testing.T) { + objs := []Object{ + {SystemHeader: SystemHeader{ID: testObjectAddress(t).ObjectID}}, + {SystemHeader: SystemHeader{ID: testObjectAddress(t).ObjectID}}, + } + + s := &coreObjectRewinder{ + transformer: &testHeadEntity{ + res: objs, // force transformer to return objs + }, + } + + res, err := s.rewind(ctx, objs...) + require.NoError(t, err) + require.Equal(t, &objs[0], res) + }) +} + +func Test_coreObjectReceiver_sendingRequest(t *testing.T) { + t.Run("non-assembly", func(t *testing.T) { + src := &transportRequest{serviceRequest: new(object.GetRequest)} + // ascertain that request not changed if node is non-assembled + require.Equal(t, src, new(coreObjectReceiver).sendingRequest(src)) + }) + + t.Run("assembly", func(t *testing.T) { + s := &coreObjectReceiver{ancestralRecv: new(testHeadEntity)} + + t.Run("raw request", func(t *testing.T) { + src := newRawGetInfo() + src.setRaw(true) + // ascertain that request not changed if request is raw + require.Equal(t, src, s.sendingRequest(src)) + }) + + t.Run("non-raw request", func(t *testing.T) { + getInfo := *newRawGetInfo() + getInfo.setTTL(uint32(5)) + getInfo.setTimeout(3 * time.Second) + getInfo.setAddress(testObjectAddress(t)) + getInfo.setRaw(false) + getInfo.setSessionToken(new(service.Token)) + + t.Run("get", func(t *testing.T) { + res := s.sendingRequest(getInfo) + require.Equal(t, getInfo.GetTimeout(), res.GetTimeout()) + require.Equal(t, getInfo.GetAddress(), res.GetAddress()) + require.Equal(t, getInfo.GetTTL(), res.GetTTL()) + require.Equal(t, getInfo.GetSessionToken(), res.GetSessionToken()) + require.True(t, res.GetRaw()) + + t.Run("zero ttl", func(t *testing.T) { + res := s.sendingRequest(newRawGetInfo()) + require.Equal(t, uint32(service.NonForwardingTTL), res.GetTTL()) + }) + }) + + t.Run("head", func(t *testing.T) { + hInfo := newRawHeadInfo() + hInfo.setGetInfo(getInfo) + hInfo.setFullHeaders(false) + + res := s.sendingRequest(hInfo) + require.Equal(t, getInfo.GetTimeout(), res.GetTimeout()) + require.Equal(t, getInfo.GetAddress(), res.GetAddress()) + require.Equal(t, getInfo.GetTTL(), res.GetTTL()) + require.Equal(t, getInfo.GetSessionToken(), res.GetSessionToken()) + require.True(t, res.GetRaw()) + require.True(t, res.(transport.HeadInfo).GetFullHeaders()) + }) + }) + }) +} diff --git a/services/public/object/implementations.go b/services/public/object/implementations.go new file mode 100644 index 000000000..2f4b3715a --- /dev/null +++ b/services/public/object/implementations.go @@ -0,0 +1,32 @@ +package object + +import ( + "context" + + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-node/lib/peers" + "github.com/pkg/errors" +) + +type ( + remoteService struct { + ps peers.Interface + } +) + +// NewRemoteService is a remote service controller's constructor. +func NewRemoteService(ps peers.Interface) RemoteService { + return &remoteService{ + ps: ps, + } +} + +func (rs remoteService) Remote(ctx context.Context, addr multiaddr.Multiaddr) (object.ServiceClient, error) { + con, err := rs.ps.GRPCConnection(ctx, addr, false) + if err != nil { + return nil, errors.Wrapf(err, "remoteService.Remote failed on GRPCConnection to %s", addr) + } + + return object.NewServiceClient(con), nil +} diff --git a/services/public/object/listing.go b/services/public/object/listing.go new file mode 100644 index 000000000..9967fc543 --- /dev/null +++ b/services/public/object/listing.go @@ -0,0 +1,286 @@ +package object + +import ( + "context" + "time" + + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-api-go/object" + v1 "github.com/nspcc-dev/neofs-api-go/query" + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/nspcc-dev/neofs-node/lib/objio" + "github.com/nspcc-dev/neofs-node/lib/transport" + "go.uber.org/zap" +) + +type ( + objectChildrenLister interface { + children(context.Context, Address) []ID + } + + coreChildrenLister struct { + queryFn relationQueryFunc + objSearcher objectSearcher + log *zap.Logger + timeout time.Duration + } + + relationQueryFunc func(Address) ([]byte, error) + + rawSearchInfo struct { + *rawMetaInfo + cid CID + query []byte + } + + neighborReceiver struct { + firstChildQueryFn relationQueryFunc + leftNeighborQueryFn relationQueryFunc + rightNeighborQueryFn relationQueryFunc + rangeDescRecv selectiveRangeReceiver + } + + selectiveRangeReceiver interface { + rangeDescriptor(context.Context, Address, relationQueryFunc) (RangeDescriptor, error) + } + + selectiveRangeRecv struct { + executor implementations.SelectiveContainerExecutor + } +) + +const ( + lmQueryMarshalFail = "marshal search query failure" + lmListFail = "searching inside children listing failure" + + errRelationNotFound = internal.Error("relation not found") +) + +var ( + _ relationQueryFunc = coreChildrenQueryFunc + _ transport.SearchInfo = (*rawSearchInfo)(nil) + _ objectChildrenLister = (*coreChildrenLister)(nil) + _ objio.RelativeReceiver = (*neighborReceiver)(nil) + _ selectiveRangeReceiver = (*selectiveRangeRecv)(nil) +) + +func (s *neighborReceiver) Base(ctx context.Context, addr Address) (RangeDescriptor, error) { + if res, err := s.rangeDescRecv.rangeDescriptor(ctx, addr, s.firstChildQueryFn); err == nil { + return res, nil + } + + return s.rangeDescRecv.rangeDescriptor(ctx, addr, nil) +} + +func (s *neighborReceiver) Neighbor(ctx context.Context, addr Address, left bool) (res RangeDescriptor, err error) { + if left { + res, err = s.rangeDescRecv.rangeDescriptor(ctx, addr, s.leftNeighborQueryFn) + } else { + res, err = s.rangeDescRecv.rangeDescriptor(ctx, addr, s.rightNeighborQueryFn) + } + + return +} + +func (s *selectiveRangeRecv) rangeDescriptor(ctx context.Context, addr Address, fn relationQueryFunc) (res RangeDescriptor, err error) { + b := false + + p := &implementations.HeadParams{ + GetParams: implementations.GetParams{ + SelectiveParams: implementations.SelectiveParams{ + CID: addr.CID, + ServeLocal: true, + TTL: service.SingleForwardingTTL, + Token: tokenFromContext(ctx), + Bearer: bearerFromContext(ctx), + + ExtendedHeaders: extendedHeadersFromContext(ctx), + }, + Handler: func(_ multiaddr.Multiaddr, obj *Object) { + res.Addr = *obj.Address() + res.Offset = 0 + res.Size = int64(obj.SystemHeader.PayloadLength) + + sameID := res.Addr.ObjectID.Equal(addr.ObjectID) + bound := boundaryChild(obj) + res.LeftBound = sameID || bound == boundBoth || bound == boundLeft + res.RightBound = sameID || bound == boundBoth || bound == boundRight + + b = true + }, + }, + FullHeaders: true, + } + + if fn != nil { + if p.Query, err = fn(addr); err != nil { + return + } + } else { + p.IDList = []ID{addr.ObjectID} + } + + if err = s.executor.Head(ctx, p); err != nil { + return + } else if !b { + err = errRelationNotFound + } + + return res, err +} + +const ( + boundBoth = iota + boundLeft + boundRight + boundMid +) + +func boundaryChild(obj *Object) (res int) { + splitInd, _ := obj.LastHeader(object.HeaderType(object.TransformHdr)) + if splitInd < 0 { + return + } + + for i := len(obj.Headers) - 1; i > splitInd; i-- { + hVal := obj.Headers[i].GetValue() + if hVal == nil { + continue + } + + hLink, ok := hVal.(*object.Header_Link) + if !ok || hLink == nil || hLink.Link == nil { + continue + } + + linkType := hLink.Link.GetType() + if linkType != object.Link_Previous && linkType != object.Link_Next { + continue + } + + res = boundMid + + if hLink.Link.ID.Empty() { + if linkType == object.Link_Next { + res = boundRight + } else if linkType == object.Link_Previous { + res = boundLeft + } + + return + } + } + + return res +} + +func firstChildQueryFunc(addr Address) ([]byte, error) { + return (&v1.Query{ + Filters: append(parentFilters(addr), QueryFilter{ + Type: v1.Filter_Exact, + Name: KeyPrev, + Value: ID{}.String(), + }), + }).Marshal() +} + +func leftNeighborQueryFunc(addr Address) ([]byte, error) { + return idQueryFunc(KeyNext, addr.ObjectID) +} + +func rightNeighborQueryFunc(addr Address) ([]byte, error) { + return idQueryFunc(KeyPrev, addr.ObjectID) +} + +func idQueryFunc(key string, id ID) ([]byte, error) { + return (&v1.Query{Filters: []QueryFilter{ + { + Type: v1.Filter_Exact, + Name: key, + Value: id.String(), + }, + }}).Marshal() +} + +func coreChildrenQueryFunc(addr Address) ([]byte, error) { + return (&v1.Query{Filters: parentFilters(addr)}).Marshal() +} + +func (s *coreChildrenLister) children(ctx context.Context, parent Address) []ID { + query, err := s.queryFn(parent) + if err != nil { + s.log.Error(lmQueryMarshalFail, zap.Error(err)) + return nil + } + + sInfo := newRawSearchInfo() + sInfo.setTTL(service.NonForwardingTTL) + sInfo.setTimeout(s.timeout) + sInfo.setCID(parent.CID) + sInfo.setQuery(query) + sInfo.setSessionToken(tokenFromContext(ctx)) + sInfo.setBearerToken(bearerFromContext(ctx)) + sInfo.setExtendedHeaders(extendedHeadersFromContext(ctx)) + + children, err := s.objSearcher.searchObjects(ctx, sInfo) + if err != nil { + s.log.Error(lmListFail, zap.Error(err)) + return nil + } + + res := make([]ID, 0, len(children)) + for i := range children { + res = append(res, children[i].ObjectID) + } + + return res +} + +func (s *rawSearchInfo) GetCID() CID { + return s.cid +} + +func (s *rawSearchInfo) setCID(v CID) { + s.cid = v +} + +func (s *rawSearchInfo) GetQuery() []byte { + return s.query +} + +func (s *rawSearchInfo) setQuery(v []byte) { + s.query = v +} + +func (s *rawSearchInfo) getMetaInfo() *rawMetaInfo { + return s.rawMetaInfo +} + +func (s *rawSearchInfo) setMetaInfo(v *rawMetaInfo) { + s.rawMetaInfo = v + s.setType(object.RequestSearch) +} + +func newRawSearchInfo() *rawSearchInfo { + res := new(rawSearchInfo) + + res.setMetaInfo(newRawMetaInfo()) + + return res +} + +func parentFilters(addr Address) []QueryFilter { + return []QueryFilter{ + { + Type: v1.Filter_Exact, + Name: transport.KeyHasParent, + }, + { + Type: v1.Filter_Exact, + Name: transport.KeyParent, + Value: addr.ObjectID.String(), + }, + } +} diff --git a/services/public/object/listing_test.go b/services/public/object/listing_test.go new file mode 100644 index 000000000..7c27ca794 --- /dev/null +++ b/services/public/object/listing_test.go @@ -0,0 +1,513 @@ +package object + +import ( + "context" + "testing" + "time" + + "github.com/nspcc-dev/neofs-api-go/query" + v1 "github.com/nspcc-dev/neofs-api-go/query" + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/nspcc-dev/neofs-node/lib/test" + "github.com/nspcc-dev/neofs-node/lib/transport" + "github.com/stretchr/testify/require" +) + +type ( + // Entity for mocking interfaces. + // Implementation of any interface intercepts arguments via f (if not nil). + // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. + testListingEntity struct { + // Set of interfaces which entity must implement, but some methods from those does not call. + implementations.SelectiveContainerExecutor + + // Argument interceptor. Used for ascertain of correct parameter passage between components. + f func(...interface{}) + // Mocked result of any interface. + res interface{} + // Mocked error of any interface. + err error + } +) + +var ( + _ objectSearcher = (*testListingEntity)(nil) + _ selectiveRangeReceiver = (*testListingEntity)(nil) + + _ implementations.SelectiveContainerExecutor = (*testListingEntity)(nil) +) + +func (s *testListingEntity) rangeDescriptor(_ context.Context, a Address, f relationQueryFunc) (RangeDescriptor, error) { + if s.f != nil { + s.f(a, f) + } + if s.err != nil { + return RangeDescriptor{}, s.err + } + return s.res.(RangeDescriptor), nil +} + +func (s *testListingEntity) Head(_ context.Context, p *implementations.HeadParams) error { + if s.f != nil { + s.f(p) + } + return s.err +} + +func (s *testListingEntity) searchObjects(ctx context.Context, i transport.SearchInfo) ([]Address, error) { + if s.f != nil { + s.f(i) + } + if s.err != nil { + return nil, s.err + } + return s.res.([]Address), nil +} + +func Test_rawSeachInfo(t *testing.T) { + t.Run("TTL", func(t *testing.T) { + ttl := uint32(3) + + r := newRawSearchInfo() + r.setTTL(ttl) + + require.Equal(t, ttl, r.GetTTL()) + }) + + t.Run("timeout", func(t *testing.T) { + timeout := 3 * time.Second + + r := newRawSearchInfo() + r.setTimeout(timeout) + + require.Equal(t, timeout, r.GetTimeout()) + }) + + t.Run("CID", func(t *testing.T) { + cid := testObjectAddress(t).CID + + r := newRawSearchInfo() + r.setCID(cid) + + require.Equal(t, cid, r.GetCID()) + }) + + t.Run("query", func(t *testing.T) { + query := testData(t, 10) + + r := newRawSearchInfo() + r.setQuery(query) + + require.Equal(t, query, r.GetQuery()) + }) +} + +func Test_coreChildrenQueryFunc(t *testing.T) { + t.Run("correct query composition", func(t *testing.T) { + // create custom address for test + addr := testObjectAddress(t) + + res, err := coreChildrenQueryFunc(addr) + require.NoError(t, err) + + // unmarshal query + q := v1.Query{} + require.NoError(t, q.Unmarshal(res)) + + // ascertain that filter list composed correctly + require.Len(t, q.Filters, 2) + + require.Contains(t, q.Filters, QueryFilter{ + Type: v1.Filter_Exact, + Name: transport.KeyHasParent, + }) + + require.Contains(t, q.Filters, QueryFilter{ + Type: v1.Filter_Exact, + Name: transport.KeyParent, + Value: addr.ObjectID.String(), + }) + }) +} + +func Test_coreChildrenLister_children(t *testing.T) { + ctx := context.TODO() + addr := testObjectAddress(t) + + t.Run("query function failure", func(t *testing.T) { + s := &coreChildrenLister{ + queryFn: func(v Address) ([]byte, error) { + t.Run("correct query function params", func(t *testing.T) { + require.Equal(t, addr, v) + }) + return nil, internal.Error("") // force relationQueryFunc to return some non-nil error + }, + log: test.NewTestLogger(false), + } + + require.Empty(t, s.children(ctx, addr)) + }) + + t.Run("object searcher failure", func(t *testing.T) { + // create custom timeout for test + sErr := internal.Error("test error for object searcher") + // create custom timeout for test + timeout := 3 * time.Second + // create custom query for test + query := testData(t, 10) + + s := &coreChildrenLister{ + queryFn: func(v Address) ([]byte, error) { + return query, nil // force relationQueryFunc to return created query + }, + objSearcher: &testListingEntity{ + f: func(items ...interface{}) { + t.Run("correct object searcher params", func(t *testing.T) { + p := items[0].(transport.SearchInfo) + require.Equal(t, timeout, p.GetTimeout()) + require.Equal(t, query, p.GetQuery()) + require.Equal(t, addr.CID, p.GetCID()) + require.Equal(t, uint32(service.NonForwardingTTL), p.GetTTL()) + }) + }, + err: sErr, // force objectSearcher to return sErr + }, + log: test.NewTestLogger(false), + timeout: timeout, + } + + require.Empty(t, s.children(ctx, addr)) + }) + + t.Run("correct result", func(t *testing.T) { + // create custom child list + addrList := testAddrList(t, 5) + idList := make([]ID, 0, len(addrList)) + for i := range addrList { + idList = append(idList, addrList[i].ObjectID) + } + + s := &coreChildrenLister{ + queryFn: func(address Address) ([]byte, error) { + return nil, nil // force relationQueryFunc to return nil error + }, + objSearcher: &testListingEntity{ + res: addrList, + }, + } + + require.Equal(t, idList, s.children(ctx, addr)) + }) +} + +func Test_queryGenerators(t *testing.T) { + t.Run("object ID", func(t *testing.T) { + var ( + q = new(query.Query) + key = "key for test" + id = testObjectAddress(t).ObjectID + ) + + res, err := idQueryFunc(key, id) + require.NoError(t, err) + + require.NoError(t, q.Unmarshal(res)) + require.Len(t, q.Filters, 1) + + require.Equal(t, query.Filter{ + Type: v1.Filter_Exact, + Name: key, + Value: id.String(), + }, q.Filters[0]) + }) + + t.Run("left neighbor", func(t *testing.T) { + var ( + q = new(query.Query) + addr = testObjectAddress(t) + ) + + res, err := leftNeighborQueryFunc(addr) + require.NoError(t, err) + + require.NoError(t, q.Unmarshal(res)) + require.Len(t, q.Filters, 1) + + require.Equal(t, query.Filter{ + Type: v1.Filter_Exact, + Name: KeyNext, + Value: addr.ObjectID.String(), + }, q.Filters[0]) + }) + + t.Run("right neighbor", func(t *testing.T) { + var ( + q = new(query.Query) + addr = testObjectAddress(t) + ) + + res, err := rightNeighborQueryFunc(addr) + require.NoError(t, err) + + require.NoError(t, q.Unmarshal(res)) + require.Len(t, q.Filters, 1) + + require.Equal(t, query.Filter{ + Type: v1.Filter_Exact, + Name: KeyPrev, + Value: addr.ObjectID.String(), + }, q.Filters[0]) + }) + + t.Run("first child", func(t *testing.T) { + var ( + q = new(query.Query) + addr = testObjectAddress(t) + ) + + res, err := firstChildQueryFunc(addr) + require.NoError(t, err) + + require.NoError(t, q.Unmarshal(res)) + require.Len(t, q.Filters, 3) + + require.Contains(t, q.Filters, query.Filter{ + Type: v1.Filter_Exact, + Name: transport.KeyHasParent, + }) + require.Contains(t, q.Filters, query.Filter{ + Type: v1.Filter_Exact, + Name: transport.KeyParent, + Value: addr.ObjectID.String(), + }) + require.Contains(t, q.Filters, query.Filter{ + Type: v1.Filter_Exact, + Name: KeyPrev, + Value: ID{}.String(), + }) + }) +} + +func Test_selectiveRangeRecv(t *testing.T) { + ctx := context.TODO() + addr := testObjectAddress(t) + + t.Run("query function failure", func(t *testing.T) { + qfErr := internal.Error("test error for query function") + _, err := new(selectiveRangeRecv).rangeDescriptor(ctx, testObjectAddress(t), func(Address) ([]byte, error) { + return nil, qfErr + }) + require.EqualError(t, err, qfErr.Error()) + }) + + t.Run("correct executor params", func(t *testing.T) { + t.Run("w/ query function", func(t *testing.T) { + qBytes := testData(t, 10) + + s := &selectiveRangeRecv{ + executor: &testListingEntity{ + f: func(items ...interface{}) { + p := items[0].(*implementations.HeadParams) + require.Equal(t, addr.CID, p.CID) + require.True(t, p.ServeLocal) + require.Equal(t, uint32(service.SingleForwardingTTL), p.TTL) + require.True(t, p.FullHeaders) + require.Equal(t, qBytes, p.Query) + require.Empty(t, p.IDList) + }, + }, + } + + _, _ = s.rangeDescriptor(ctx, addr, func(Address) ([]byte, error) { return qBytes, nil }) + }) + + t.Run("w/o query function", func(t *testing.T) { + s := &selectiveRangeRecv{ + executor: &testListingEntity{ + f: func(items ...interface{}) { + p := items[0].(*implementations.HeadParams) + require.Equal(t, addr.CID, p.CID) + require.True(t, p.ServeLocal) + require.Equal(t, uint32(service.SingleForwardingTTL), p.TTL) + require.True(t, p.FullHeaders) + require.Empty(t, p.Query) + require.Equal(t, []ID{addr.ObjectID}, p.IDList) + }, + }, + } + + _, _ = s.rangeDescriptor(ctx, addr, nil) + }) + }) + + t.Run("correct result", func(t *testing.T) { + t.Run("failure", func(t *testing.T) { + t.Run("executor failure", func(t *testing.T) { + exErr := internal.Error("test error for executor") + + s := &selectiveRangeRecv{ + executor: &testListingEntity{ + err: exErr, + }, + } + + _, err := s.rangeDescriptor(ctx, addr, nil) + require.EqualError(t, err, exErr.Error()) + }) + + t.Run("not found", func(t *testing.T) { + s := &selectiveRangeRecv{ + executor: new(testListingEntity), + } + + _, err := s.rangeDescriptor(ctx, addr, nil) + require.EqualError(t, err, errRelationNotFound.Error()) + }) + }) + + t.Run("success", func(t *testing.T) { + foundAddr := testObjectAddress(t) + + obj := &Object{ + SystemHeader: SystemHeader{ + PayloadLength: 100, + ID: foundAddr.ObjectID, + CID: foundAddr.CID, + }, + } + + s := &selectiveRangeRecv{ + executor: &testListingEntity{ + SelectiveContainerExecutor: nil, + f: func(items ...interface{}) { + p := items[0].(*implementations.HeadParams) + p.Handler(nil, obj) + }, + }, + } + + res, err := s.rangeDescriptor(ctx, addr, nil) + require.NoError(t, err) + require.Equal(t, RangeDescriptor{ + Size: int64(obj.SystemHeader.PayloadLength), + Offset: 0, + Addr: foundAddr, + + LeftBound: true, + RightBound: true, + }, res) + }) + }) +} + +func Test_neighborReceiver(t *testing.T) { + ctx := context.TODO() + addr := testObjectAddress(t) + + t.Run("neighbor", func(t *testing.T) { + t.Run("correct internal logic", func(t *testing.T) { + rightCalled, leftCalled := false, false + + s := &neighborReceiver{ + leftNeighborQueryFn: func(a Address) ([]byte, error) { + require.Equal(t, addr, a) + leftCalled = true + return nil, nil + }, + rightNeighborQueryFn: func(a Address) ([]byte, error) { + require.Equal(t, addr, a) + rightCalled = true + return nil, nil + }, + rangeDescRecv: &testListingEntity{ + f: func(items ...interface{}) { + require.Equal(t, addr, items[0]) + _, _ = items[1].(relationQueryFunc)(addr) + }, + err: internal.Error(""), + }, + } + + _, _ = s.Neighbor(ctx, addr, true) + require.False(t, rightCalled) + require.True(t, leftCalled) + + leftCalled = false + + _, _ = s.Neighbor(ctx, addr, false) + require.False(t, leftCalled) + require.True(t, rightCalled) + }) + + t.Run("correct result", func(t *testing.T) { + rErr := internal.Error("test error for range receiver") + + rngRecv := &testListingEntity{err: rErr} + s := &neighborReceiver{rangeDescRecv: rngRecv} + + _, err := s.Neighbor(ctx, addr, false) + require.EqualError(t, err, rErr.Error()) + + rngRecv.err = errRelationNotFound + + _, err = s.Neighbor(ctx, addr, false) + require.EqualError(t, err, errRelationNotFound.Error()) + + rd := RangeDescriptor{Size: 1, Offset: 2, Addr: addr} + rngRecv.res, rngRecv.err = rd, nil + + res, err := s.Neighbor(ctx, addr, false) + require.NoError(t, err) + require.Equal(t, rd, res) + }) + }) + + t.Run("base", func(t *testing.T) { + rd := RangeDescriptor{Size: 1, Offset: 2, Addr: addr} + + t.Run("first child exists", func(t *testing.T) { + called := false + + s := &neighborReceiver{ + firstChildQueryFn: func(a Address) ([]byte, error) { + require.Equal(t, addr, a) + called = true + return nil, nil + }, + rangeDescRecv: &testListingEntity{ + f: func(items ...interface{}) { + require.Equal(t, addr, items[0]) + _, _ = items[1].(relationQueryFunc)(addr) + }, + res: rd, + }, + } + + res, err := s.Base(ctx, addr) + require.NoError(t, err) + require.Equal(t, rd, res) + require.True(t, called) + }) + + t.Run("first child doesn't exist", func(t *testing.T) { + called := false + + recv := &testListingEntity{err: internal.Error("")} + + recv.f = func(...interface{}) { + if called { + recv.res, recv.err = rd, nil + } + called = true + } + + s := &neighborReceiver{rangeDescRecv: recv} + + res, err := s.Base(ctx, addr) + require.NoError(t, err) + require.Equal(t, rd, res) + }) + }) +} diff --git a/services/public/object/postprocessor.go b/services/public/object/postprocessor.go new file mode 100644 index 000000000..52d474d4c --- /dev/null +++ b/services/public/object/postprocessor.go @@ -0,0 +1,47 @@ +package object + +import ( + "context" +) + +type ( + // requestPostProcessor is an interface of RPC call outcome handler. + requestPostProcessor interface { + // Performs actions based on the outcome of request processing. + postProcess(context.Context, serviceRequest, error) + } + + // complexPostProcessor is an implementation of requestPostProcessor interface. + complexPostProcessor struct { + // Sequence of requestPostProcessor instances. + list []requestPostProcessor + } +) + +var _ requestPostProcessor = (*complexPostProcessor)(nil) + +// requestPostProcessor method implementation. +// +// Panics with pmEmptyServiceRequest on nil request argument. +// +// Passes request through the sequence of requestPostProcessor instances. +// +// Warn: adding instance to list itself provoke endless recursion. +func (s *complexPostProcessor) postProcess(ctx context.Context, req serviceRequest, e error) { + if req == nil { + panic(pmEmptyServiceRequest) + } + + for i := range s.list { + s.list[i].postProcess(ctx, req, e) + } +} + +// Creates requestPostProcessor based on Params. +// +// Uses complexPostProcessor instance as a result implementation. +func newPostProcessor() requestPostProcessor { + return &complexPostProcessor{ + list: []requestPostProcessor{}, + } +} diff --git a/services/public/object/postprocessor_test.go b/services/public/object/postprocessor_test.go new file mode 100644 index 000000000..f114fc982 --- /dev/null +++ b/services/public/object/postprocessor_test.go @@ -0,0 +1,83 @@ +package object + +import ( + "context" + "testing" + + "github.com/nspcc-dev/neofs-node/internal" + "github.com/stretchr/testify/require" +) + +type ( + // Entity for mocking interfaces. + // Implementation of any interface intercepts arguments via f (if not nil). + // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. + testPostProcessorEntity struct { + // Set of interfaces which testCommonEntity must implement, but some methods from those does not call. + serviceRequest + + // Argument interceptor. Used for ascertain of correct parameter passage between components. + f func(...interface{}) + // Mocked result of any interface. + res interface{} + // Mocked error of any interface. + err error + } +) + +var _ requestPostProcessor = (*testPostProcessorEntity)(nil) + +func (s *testPostProcessorEntity) postProcess(_ context.Context, req serviceRequest, e error) { + if s.f != nil { + s.f(req, e) + } +} + +func TestComplexPostProcessor_PostProcess(t *testing.T) { + ctx := context.TODO() + + t.Run("empty request argument", func(t *testing.T) { + require.PanicsWithValue(t, pmEmptyServiceRequest, func() { + // ascertain that nil request causes panic + new(complexPostProcessor).postProcess(ctx, nil, nil) + }) + }) + + t.Run("correct behavior", func(t *testing.T) { + // create serviceRequest instance. + req := new(testPostProcessorEntity) + + // create custom error + pErr := internal.Error("test error for post processor") + + // create list of post processors + postProcCount := 10 + postProcessors := make([]requestPostProcessor, 0, postProcCount) + + postProcessorCalls := make([]struct{}, 0, postProcCount) + + for i := 0; i < postProcCount; i++ { + postProcessors = append(postProcessors, &testPostProcessorEntity{ + f: func(items ...interface{}) { + t.Run("correct arguments", func(t *testing.T) { + postProcessorCalls = append(postProcessorCalls, struct{}{}) + }) + }, + }) + } + + s := &complexPostProcessor{list: postProcessors} + + s.postProcess(ctx, req, pErr) + + // ascertain all internal requestPostProcessor instances were called + require.Len(t, postProcessorCalls, postProcCount) + }) +} + +func Test_newPostProcessor(t *testing.T) { + res := newPostProcessor() + + pp := res.(*complexPostProcessor) + require.Len(t, pp.list, 0) +} diff --git a/services/public/object/preprocessor.go b/services/public/object/preprocessor.go new file mode 100644 index 000000000..620cae5a2 --- /dev/null +++ b/services/public/object/preprocessor.go @@ -0,0 +1,163 @@ +package object + +import ( + "context" + "crypto/ecdsa" + + "github.com/nspcc-dev/neofs-api-go/service" + "go.uber.org/zap" +) + +type ( + // requestPreProcessor is an interface of Object service request installer. + requestPreProcessor interface { + // Performs preliminary request validation and preparation. + preProcess(context.Context, serviceRequest) error + } + + // complexPreProcessor is an implementation of requestPreProcessor interface. + complexPreProcessor struct { + // Sequence of requestPreProcessor instances. + list []requestPreProcessor + } + + signingPreProcessor struct { + preProc requestPreProcessor + key *ecdsa.PrivateKey + + log *zap.Logger + } +) + +const pmEmptyServiceRequest = "empty service request" + +var ( + _ requestPreProcessor = (*signingPreProcessor)(nil) + _ requestPreProcessor = (*complexPreProcessor)(nil) +) + +// requestPreProcessor method implementation. +// +// Passes request through internal requestPreProcessor. +// If internal requestPreProcessor returns non-nil error, this error returns. +// Returns result of signRequest function. +func (s *signingPreProcessor) preProcess(ctx context.Context, req serviceRequest) (err error) { + if err = s.preProc.preProcess(ctx, req); err != nil { + return + } else if err = signRequest(s.key, req); err != nil { + s.log.Error("could not re-sign request", + zap.Error(err), + ) + err = errReSigning + } + + return +} + +// requestPreProcessor method implementation. +// +// Panics with pmEmptyServiceRequest on nil request argument. +// +// Passes request through the sequence of requestPreProcessor instances. +// Any non-nil error returned by some instance returns. +// +// Warn: adding instance to list itself provoke endless recursion. +func (s *complexPreProcessor) preProcess(ctx context.Context, req serviceRequest) error { + if req == nil { + panic(pmEmptyServiceRequest) + } + + for i := range s.list { + if err := s.list[i].preProcess(ctx, req); err != nil { + return err + } + } + + return nil +} + +// Creates requestPreProcessor based on Params. +// +// Uses complexPreProcessor instance as a result implementation. +// +// Adds to next preprocessors to list: +// * verifyPreProcessor; +// * ttlPreProcessor; +// * epochPreProcessor, if CheckEpochSync flag is set in params. +// * aclPreProcessor, if CheckAcl flag is set in params. +func newPreProcessor(p *Params) requestPreProcessor { + preProcList := make([]requestPreProcessor, 0) + + if p.CheckACL { + preProcList = append(preProcList, &aclPreProcessor{ + log: p.Logger, + + aclInfoReceiver: p.aclInfoReceiver, + + basicChecker: p.BasicACLChecker, + + reqActionCalc: p.requestActionCalculator, + + localStore: p.LocalStore, + + extACLSource: p.ExtendedACLSource, + + bearerVerifier: &complexBearerVerifier{ + items: []bearerTokenVerifier{ + &bearerActualityVerifier{ + epochRecv: p.EpochReceiver, + }, + new(bearerSignatureVerifier), + &bearerOwnershipVerifier{ + cnrOwnerChecker: p.ACLHelper, + }, + }, + }, + }) + } + + preProcList = append(preProcList, + &verifyPreProcessor{ + fVerify: requestVerifyFunc, + }, + + &ttlPreProcessor{ + staticCond: []service.TTLCondition{ + validTTLCondition, + }, + condPreps: []ttlConditionPreparer{ + &coreTTLCondPreparer{ + curAffChecker: &corePlacementUtil{ + prevNetMap: false, + localAddrStore: p.AddressStore, + placementBuilder: p.Placer, + log: p.Logger, + }, + prevAffChecker: &corePlacementUtil{ + prevNetMap: true, + localAddrStore: p.AddressStore, + placementBuilder: p.Placer, + log: p.Logger, + }, + }, + }, + fProc: processTTLConditions, + }, + + &tokenPreProcessor{ + keyVerifier: p.OwnerKeyVerifier, + staticVerifier: newComplexTokenVerifier( + &tokenEpochsVerifier{ + epochRecv: p.EpochReceiver, + }, + ), + }, + + new(decTTLPreProcessor), + ) + + return &signingPreProcessor{ + preProc: &complexPreProcessor{list: preProcList}, + key: p.Key, + } +} diff --git a/services/public/object/preprocessor_test.go b/services/public/object/preprocessor_test.go new file mode 100644 index 000000000..7a1509285 --- /dev/null +++ b/services/public/object/preprocessor_test.go @@ -0,0 +1,142 @@ +package object + +import ( + "context" + "testing" + + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/nspcc-dev/neofs-node/lib/test" + "github.com/stretchr/testify/require" +) + +type ( + // Entity for mocking interfaces. + // Implementation of any interface intercepts arguments via f (if not nil). + // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. + testPreProcessorEntity struct { + // Set of interfaces which testCommonEntity must implement, but some methods from those does not call. + serviceRequest + Placer + implementations.AddressStoreComponent + EpochReceiver + core.OwnerKeyVerifier + + // Argument interceptor. Used for ascertain of correct parameter passage between components. + f func(...interface{}) + // Mocked result of any interface. + res interface{} + // Mocked error of any interface. + err error + } +) + +var _ requestPreProcessor = (*testPreProcessorEntity)(nil) + +func (s *testPreProcessorEntity) preProcess(_ context.Context, req serviceRequest) error { + if s.f != nil { + s.f(req) + } + return s.err +} + +func TestSigningPreProcessor_preProcess(t *testing.T) { + ctx := context.TODO() + + req := new(object.SearchRequest) + + t.Run("internal pre-processor error", func(t *testing.T) { + ppErr := internal.Error("test error for pre-processor") + + s := &signingPreProcessor{ + preProc: &testPreProcessorEntity{ + f: func(items ...interface{}) { + t.Run("correct internal pre-processor params", func(t *testing.T) { + require.Equal(t, req, items[0].(serviceRequest)) + }) + }, + err: ppErr, + }, + } + + require.EqualError(t, s.preProcess(ctx, req), ppErr.Error()) + }) + + t.Run("correct result", func(t *testing.T) { + key := test.DecodeKey(0) + + exp := signRequest(key, req) + + s := &signingPreProcessor{ + preProc: new(testPreProcessorEntity), + key: key, + } + + require.Equal(t, exp, s.preProcess(ctx, req)) + }) +} + +func TestComplexPreProcessor_PreProcess(t *testing.T) { + ctx := context.TODO() + + t.Run("empty request argument", func(t *testing.T) { + require.PanicsWithValue(t, pmEmptyServiceRequest, func() { + // ascertain that nil request causes panic + _ = new(complexPreProcessor).preProcess(ctx, nil) + }) + }) + + // create serviceRequest instance. + req := new(testPreProcessorEntity) + + t.Run("empty list", func(t *testing.T) { + require.NoError(t, new(complexPreProcessor).preProcess(ctx, req)) + }) + + t.Run("non-empty list", func(t *testing.T) { + firstCalled := false + p1 := &testPreProcessorEntity{ + f: func(items ...interface{}) { + t.Run("correct nested pre processor params", func(t *testing.T) { + require.Equal(t, req, items[0].(serviceRequest)) + }) + + firstCalled = true // mark first requestPreProcessor call + }, + err: nil, // force requestPreProcessor to return nil error + } + + // create custom error + pErr := internal.Error("pre processor error for test") + p2 := &testPreProcessorEntity{ + err: pErr, // force second requestPreProcessor to return created error + } + + thirdCalled := false + p3 := &testPreProcessorEntity{ + f: func(_ ...interface{}) { + thirdCalled = true // mark third requestPreProcessor call + }, + err: nil, // force requestPreProcessor to return nil error + } + + // create complex requestPreProcessor + p := &complexPreProcessor{ + list: []requestPreProcessor{p1, p2, p3}, // order is important + } + + // ascertain error returns as expected + require.EqualError(t, + p.preProcess(ctx, req), + pErr.Error(), + ) + + // ascertain first requestPreProcessor was called + require.True(t, firstCalled) + + // ascertain first requestPreProcessor was not called + require.False(t, thirdCalled) + }) +} diff --git a/services/public/object/put.go b/services/public/object/put.go new file mode 100644 index 000000000..b1cc519ca --- /dev/null +++ b/services/public/object/put.go @@ -0,0 +1,437 @@ +package object + +import ( + "context" + "io" + "sync" + "time" + + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-api-go/session" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/nspcc-dev/neofs-node/lib/localstore" + "github.com/nspcc-dev/neofs-node/lib/objutil" + "github.com/nspcc-dev/neofs-node/lib/transformer" + "github.com/nspcc-dev/neofs-node/lib/transport" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +type ( + objectStorer interface { + putObject(context.Context, transport.PutInfo) (*Address, error) + } + + bifurcatingObjectStorer struct { + straightStorer objectStorer + tokenStorer objectStorer + } + + receivingObjectStorer struct { + straightStorer objectStorer + vPayload objutil.Verifier + } + + filteringObjectStorer struct { + filter Filter + objStorer objectStorer + } + + tokenObjectStorer struct { + tokenStore session.PrivateTokenStore + objStorer objectStorer + } + + transformingObjectStorer struct { + transformer transformer.Transformer + objStorer objectStorer + + // Set of errors that won't be converted to errTransformer + mErr map[error]struct{} + } + + straightObjectStorer struct { + executor operationExecutor + } + + putRequest struct { + *object.PutRequest + srv object.Service_PutServer + timeout time.Duration + } + + addressAccumulator interface { + responseItemHandler + address() *Address + } + + coreAddrAccum struct { + *sync.Once + addr *Address + } + + rawPutInfo struct { + *rawMetaInfo + obj *Object + r io.Reader + copyNum uint32 + } + + putStreamReader struct { + tail []byte + srv object.Service_PutServer + } +) + +type transformerHandlerErr struct { + error +} + +const ( + errObjectExpected = internal.Error("missing object") + errChunkExpected = internal.Error("empty chunk received") +) + +const ( + errMissingOwnerKeys = internal.Error("missing owner keys") + errBrokenToken = internal.Error("broken token structure") + errNilToken = internal.Error("missing session token") + errWrongTokenAddress = internal.Error("wrong object address in token") +) + +const errTransformer = internal.Error("could not transform the object") + +var ( + _ transport.PutInfo = (*rawPutInfo)(nil) + _ addressAccumulator = (*coreAddrAccum)(nil) + _ objectStorer = (*straightObjectStorer)(nil) + _ transport.PutInfo = (*putRequest)(nil) + _ io.Reader = (*putStreamReader)(nil) + _ objectStorer = (*filteringObjectStorer)(nil) + _ objectStorer = (*transformingObjectStorer)(nil) + _ objectStorer = (*tokenObjectStorer)(nil) + _ objectStorer = (*receivingObjectStorer)(nil) +) + +func (s *objectService) Put(srv object.Service_PutServer) (err error) { + defer func() { + if r := recover(); r != nil { + s.log.Error(panicLogMsg, + zap.Stringer("request", object.RequestPut), + zap.Any("reason", r), + ) + + err = errServerPanic + } + + err = s.statusCalculator.make(requestError{ + t: object.RequestPut, + e: err, + }) + }() + + var req *object.PutRequest + + if req, err = recvPutHeaderMsg(srv); err != nil { + return + } + + _, err = s.requestHandler.handleRequest(srv.Context(), handleRequestParams{ + request: &putRequest{ + PutRequest: req, + srv: srv, + }, + executor: s, + }) + + return err +} + +func (s *bifurcatingObjectStorer) putObject(ctx context.Context, info transport.PutInfo) (*Address, error) { + if withTokenFromOwner(info) { + return s.tokenStorer.putObject(ctx, info) + } + + return s.straightStorer.putObject(ctx, info) +} + +func withTokenFromOwner(src service.SessionTokenSource) bool { + if src == nil { + return false + } + + token := src.GetSessionToken() + if token == nil { + return false + } + + signedReq, ok := src.(service.SignKeyPairSource) + if !ok { + return false + } + + signKeyPairs := signedReq.GetSignKeyPairs() + if len(signKeyPairs) == 0 { + return false + } + + firstKey := signKeyPairs[0].GetPublicKey() + if firstKey == nil { + return false + } + + reqOwner, err := refs.NewOwnerID(firstKey) + if err != nil { + return false + } + + return reqOwner.Equal(token.GetOwnerID()) +} + +func (s *tokenObjectStorer) putObject(ctx context.Context, info transport.PutInfo) (*Address, error) { + token := info.GetSessionToken() + + key := session.PrivateTokenKey{} + key.SetOwnerID(token.GetOwnerID()) + key.SetTokenID(token.GetID()) + + pToken, err := s.tokenStore.Fetch(key) + if err != nil { + return nil, &detailedError{ + error: errTokenRetrieval, + d: privateTokenRecvDetails(token.GetID(), token.GetOwnerID()), + } + } + + return s.objStorer.putObject( + contextWithValues(ctx, + transformer.PrivateSessionToken, pToken, + transformer.PublicSessionToken, token, + implementations.BearerToken, info.GetBearerToken(), + implementations.ExtendedHeaders, info.ExtendedHeaders(), + ), + info, + ) +} + +func (s *filteringObjectStorer) putObject(ctx context.Context, info transport.PutInfo) (*Address, error) { + if res := s.filter.Pass( + contextWithValues(ctx, ttlValue, info.GetTTL()), + &Meta{Object: info.GetHead()}, + ); res.Code() != localstore.CodePass { + if err := res.Err(); err != nil { + return nil, err + } + + return nil, errObjectFilter + } + + return s.objStorer.putObject(ctx, info) +} + +func (s *receivingObjectStorer) putObject(ctx context.Context, src transport.PutInfo) (*Address, error) { + obj := src.GetHead() + obj.Payload = make([]byte, obj.SystemHeader.PayloadLength) + + if _, err := io.ReadFull(src.Payload(), obj.Payload); err != nil && err != io.EOF { + if errors.Is(err, io.ErrUnexpectedEOF) { + err = transformer.ErrPayloadEOF + } + + return nil, err + } else if err = s.vPayload.Verify(ctx, obj); err != nil { + return nil, errPayloadChecksum + } + + putInfo := newRawPutInfo() + putInfo.setTimeout(src.GetTimeout()) + putInfo.setTTL(src.GetTTL()) + putInfo.setCopiesNumber(src.CopiesNumber()) + putInfo.setHead(obj) + putInfo.setSessionToken(src.GetSessionToken()) + putInfo.setBearerToken(src.GetBearerToken()) + putInfo.setExtendedHeaders(src.ExtendedHeaders()) + + return s.straightStorer.putObject(ctx, putInfo) +} + +func (s *transformingObjectStorer) putObject(ctx context.Context, src transport.PutInfo) (res *Address, err error) { + var ( + ttl = src.GetTTL() + timeout = src.GetTimeout() + copyNum = src.CopiesNumber() + token = src.GetSessionToken() + bearer = src.GetBearerToken() + extHdrs = src.ExtendedHeaders() + ) + + err = s.transformer.Transform(ctx, + transformer.ProcUnit{ + Head: src.GetHead(), + Payload: src.Payload(), + }, func(ctx context.Context, unit transformer.ProcUnit) error { + res = unit.Head.Address() + + putInfo := newRawPutInfo() + putInfo.setHead(unit.Head) + putInfo.setPayload(unit.Payload) + putInfo.setTimeout(timeout) + putInfo.setTTL(ttl) + putInfo.setCopiesNumber(copyNum) + putInfo.setSessionToken(token) + putInfo.setBearerToken(bearer) + putInfo.setExtendedHeaders(extHdrs) + + _, err := s.objStorer.putObject(ctx, putInfo) + if err != nil { + err = &transformerHandlerErr{ + error: err, + } + } + return err + }, + ) + + if e := errors.Cause(err); e != nil { + if v, ok := e.(*transformerHandlerErr); ok { + err = v.error + } else if _, ok := s.mErr[e]; !ok { + err = errTransformer + } + } + + return res, err +} + +func (s *putStreamReader) Read(p []byte) (n int, err error) { + if s.srv == nil { + return 0, io.EOF + } + + n += copy(p, s.tail) + if n > 0 { + s.tail = s.tail[n:] + return + } + + var msg *object.PutRequest + + if msg, err = s.srv.Recv(); err != nil { + return + } + + chunk := msg.GetChunk() + if len(chunk) == 0 { + return 0, errChunkExpected + } + + r := copy(p, chunk) + + s.tail = chunk[r:] + + n += r + + return +} + +func (s *straightObjectStorer) putObject(ctx context.Context, pInfo transport.PutInfo) (*Address, error) { + addrAccum := newAddressAccumulator() + if err := s.executor.executeOperation(ctx, pInfo, addrAccum); err != nil { + return nil, err + } + + return addrAccum.address(), nil +} + +func recvPutHeaderMsg(srv object.Service_PutServer) (*object.PutRequest, error) { + req, err := srv.Recv() + if err != nil { + return nil, err + } else if req == nil { + return nil, errHeaderExpected + } else if h := req.GetHeader(); h == nil { + return nil, errHeaderExpected + } else if h.GetObject() == nil { + return nil, errObjectExpected + } + + return req, nil +} + +func contextWithValues(parentCtx context.Context, items ...interface{}) context.Context { + fCtx := parentCtx + for i := 0; i < len(items); i += 2 { + fCtx = context.WithValue(fCtx, items[i], items[i+1]) + } + + return fCtx +} + +func (s *putRequest) GetTimeout() time.Duration { return s.timeout } + +func (s *putRequest) GetHead() *Object { return s.GetHeader().GetObject() } + +func (s *putRequest) CopiesNumber() uint32 { + h := s.GetHeader() + if h == nil { + return 0 + } + + return h.GetCopiesNumber() +} + +func (s *putRequest) Payload() io.Reader { + return &putStreamReader{ + srv: s.srv, + } +} + +func (s *rawPutInfo) GetHead() *Object { + return s.obj +} + +func (s *rawPutInfo) setHead(obj *Object) { + s.obj = obj +} + +func (s *rawPutInfo) Payload() io.Reader { + return s.r +} + +func (s *rawPutInfo) setPayload(r io.Reader) { + s.r = r +} + +func (s *rawPutInfo) CopiesNumber() uint32 { + return s.copyNum +} + +func (s *rawPutInfo) setCopiesNumber(v uint32) { + s.copyNum = v +} + +func (s *rawPutInfo) getMetaInfo() *rawMetaInfo { + return s.rawMetaInfo +} + +func (s *rawPutInfo) setMetaInfo(v *rawMetaInfo) { + s.rawMetaInfo = v + s.setType(object.RequestPut) +} + +func newRawPutInfo() *rawPutInfo { + res := new(rawPutInfo) + + res.setMetaInfo(newRawMetaInfo()) + + return res +} + +func (s *coreAddrAccum) handleItem(item interface{}) { s.Do(func() { s.addr = item.(*Address) }) } + +func (s *coreAddrAccum) address() *Address { return s.addr } + +func newAddressAccumulator() addressAccumulator { return &coreAddrAccum{Once: new(sync.Once)} } diff --git a/services/public/object/put_test.go b/services/public/object/put_test.go new file mode 100644 index 000000000..80fe33838 --- /dev/null +++ b/services/public/object/put_test.go @@ -0,0 +1,958 @@ +package object + +import ( + "bytes" + "context" + "io" + "testing" + "time" + + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-api-go/session" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/nspcc-dev/neofs-node/lib/localstore" + "github.com/nspcc-dev/neofs-node/lib/test" + "github.com/nspcc-dev/neofs-node/lib/transformer" + "github.com/nspcc-dev/neofs-node/lib/transport" + "github.com/stretchr/testify/require" +) + +type ( + // Entity for mocking interfaces. + // Implementation of any interface intercepts arguments via f (if not nil). + // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. + testPutEntity struct { + // Set of interfaces which entity must implement, but some methods from those does not call. + object.Service_PutServer + transport.PutInfo + Filter + session.PrivateTokenStore + implementations.SelectiveContainerExecutor + + // Argument interceptor. Used for ascertain of correct parameter passage between components. + f func(...interface{}) + // Mocked result of any interface. + res interface{} + // Mocked error of any interface. + err error + } +) + +var ( + _ object.Service_PutServer = (*testPutEntity)(nil) + _ requestHandler = (*testPutEntity)(nil) + _ objectStorer = (*testPutEntity)(nil) + _ transport.PutInfo = (*testPutEntity)(nil) + _ Filter = (*testPutEntity)(nil) + _ operationExecutor = (*testPutEntity)(nil) + _ session.PrivateTokenStore = (*testPutEntity)(nil) + _ EpochReceiver = (*testPutEntity)(nil) + _ transformer.Transformer = (*testPutEntity)(nil) +) + +func (s *testPutEntity) Verify(_ context.Context, obj *Object) error { + if s.f != nil { + s.f(obj) + } + return s.err +} + +func (s *testPutEntity) Transform(_ context.Context, u transformer.ProcUnit, h ...transformer.ProcUnitHandler) error { + if s.f != nil { + s.f(u, h) + } + return s.err +} + +func (s *testPutEntity) verify(_ context.Context, token *session.Token, obj *Object) error { + if s.f != nil { + s.f(token, obj) + } + return s.err +} + +func (s *testPutEntity) Epoch() uint64 { return s.res.(uint64) } + +func (s *testPutEntity) Direct(ctx context.Context, objs ...Object) ([]Object, error) { + if s.f != nil { + s.f(ctx, objs) + } + return s.res.([]Object), s.err +} + +func (s *testPutEntity) Fetch(id session.PrivateTokenKey) (session.PrivateToken, error) { + if s.f != nil { + s.f(id) + } + if s.err != nil { + return nil, s.err + } + return s.res.(session.PrivateToken), nil +} + +func (s *testPutEntity) executeOperation(_ context.Context, m transport.MetaInfo, h responseItemHandler) error { + if s.f != nil { + s.f(m, h) + } + return s.err +} + +func (s *testPutEntity) Pass(ctx context.Context, m *Meta) *localstore.FilterResult { + if s.f != nil { + s.f(ctx, m) + } + items := s.res.([]interface{}) + return items[0].(*localstore.FilterResult) +} + +func (s *testPutEntity) GetTTL() uint32 { return s.res.(uint32) } + +func (s *testPutEntity) GetToken() *session.Token { return s.res.(*session.Token) } + +func (s *testPutEntity) GetHead() *Object { return s.res.(*Object) } + +func (s *testPutEntity) putObject(ctx context.Context, p transport.PutInfo) (*Address, error) { + if s.f != nil { + s.f(p, ctx) + } + if s.err != nil { + return nil, s.err + } + return s.res.(*Address), nil +} + +func (s *testPutEntity) handleRequest(_ context.Context, p handleRequestParams) (interface{}, error) { + if s.f != nil { + s.f(p) + } + return s.res, s.err +} + +func (s *testPutEntity) Recv() (*object.PutRequest, error) { + if s.f != nil { + s.f() + } + if s.err != nil { + return nil, s.err + } else if s.res == nil { + return nil, nil + } + return s.res.(*object.PutRequest), nil +} + +func (s *testPutEntity) Context() context.Context { return context.TODO() } + +func Test_objectService_Put(t *testing.T) { + + t.Run("stream error", func(t *testing.T) { + // create custom error for test + psErr := internal.Error("test error for put stream server") + + s := &testPutEntity{ + err: psErr, // force server to return psErr + } + + srv := &objectService{ + statusCalculator: newStatusCalculator(), + } + + // ascertain that error returns as expected + require.EqualError(t, + srv.Put(s), + psErr.Error(), + ) + }) + + t.Run("request handling", func(t *testing.T) { + // create custom request for test + req := &object.PutRequest{R: &object.PutRequest_Header{ + Header: &object.PutRequest_PutHeader{ + Object: new(Object), + }, + }} + + // create custom error for test + hErr := internal.Error("test error for request handler") + + srv := &testPutEntity{ + res: req, // force server to return req + } + + s := &objectService{ + statusCalculator: newStatusCalculator(), + } + + s.requestHandler = &testPutEntity{ + f: func(items ...interface{}) { + t.Run("correct request handler params", func(t *testing.T) { + p := items[0].(handleRequestParams) + require.Equal(t, s, p.executor) + require.Equal(t, &putRequest{ + PutRequest: req, + srv: srv, + }, p.request) + }) + }, + err: hErr, // force requestHandler to return hErr + } + + // ascertain that error returns as expected + require.EqualError(t, + s.Put(srv), + hErr.Error(), + ) + }) +} + +func Test_straightObjectStorer_putObject(t *testing.T) { + ctx := context.TODO() + + t.Run("executor error", func(t *testing.T) { + // create custom error for test + exErr := internal.Error("test error for operation executor") + + // create custom meta info for test + info := new(testPutEntity) + + s := &straightObjectStorer{ + executor: &testPutEntity{ + f: func(items ...interface{}) { + t.Run("correct operation executor params", func(t *testing.T) { + require.Equal(t, info, items[0]) + acc := items[1].(*coreAddrAccum) + require.NotNil(t, acc.Once) + }) + }, + err: exErr, + }, + } + + _, err := s.putObject(ctx, info) + + // ascertain that error returns as expected + require.EqualError(t, err, exErr.Error()) + }) + + t.Run("correct result", func(t *testing.T) { + addr := testObjectAddress(t) + + s := &straightObjectStorer{ + executor: &testPutEntity{ + f: func(items ...interface{}) { + // add address to accumulator + items[1].(addressAccumulator).handleItem(&addr) + }, + }, + } + + res, err := s.putObject(ctx, new(testPutEntity)) + require.NoError(t, err) + + // ascertain that result returns as expected + require.Equal(t, &addr, res) + }) +} + +func Test_recvPutHeaderMsg(t *testing.T) { + t.Run("server error", func(t *testing.T) { + // create custom error for test + srvErr := internal.Error("test error for put server") + + srv := &testPutEntity{ + err: srvErr, // force put server to return srvErr + } + + res, err := recvPutHeaderMsg(srv) + + // ascertain that error returns as expected + require.EqualError(t, err, srvErr.Error()) + require.Nil(t, res) + }) + + t.Run("empty message", func(t *testing.T) { + srv := &testPutEntity{ + res: nil, // force put server to return nil, nil + } + + res, err := recvPutHeaderMsg(srv) + + // ascertain that error returns as expected + require.EqualError(t, err, errHeaderExpected.Error()) + require.Nil(t, res) + }) + + t.Run("empty put header in message", func(t *testing.T) { + srv := &testPutEntity{ + res: new(object.PutRequest), // force put server to return message w/o put header + } + + res, err := recvPutHeaderMsg(srv) + + // ascertain that error returns as expected + require.EqualError(t, err, object.ErrHeaderExpected.Error()) + require.Nil(t, res) + }) + + t.Run("empty object in put header", func(t *testing.T) { + srv := &testPutEntity{ + res: object.MakePutRequestHeader(nil), // force put server to return message w/ nil object + } + + res, err := recvPutHeaderMsg(srv) + + // ascertain that error returns as expected + require.EqualError(t, err, errObjectExpected.Error()) + require.Nil(t, res) + }) +} + +func Test_putRequest(t *testing.T) { + t.Run("timeout", func(t *testing.T) { + timeout := 3 * time.Second + + req := &putRequest{ + timeout: timeout, + } + + // ascertain that timeout returns as expected + require.Equal(t, timeout, req.GetTimeout()) + }) + + t.Run("head", func(t *testing.T) { + // create custom object for test + obj := new(Object) + + req := &putRequest{ + PutRequest: object.MakePutRequestHeader(obj), // wrap object to test message + } + + // ascertain that head returns as expected + require.Equal(t, obj, req.GetHead()) + }) + + t.Run("payload", func(t *testing.T) { + req := &putRequest{ + srv: new(testPutEntity), + } + + require.Equal(t, &putStreamReader{srv: req.srv}, req.Payload()) + }) + + t.Run("copies number", func(t *testing.T) { + cn := uint32(5) + + req := &putRequest{ + PutRequest: &object.PutRequest{ + R: &object.PutRequest_Header{ + Header: &object.PutRequest_PutHeader{ + CopiesNumber: cn, + }, + }, + }, + } + + require.Equal(t, cn, req.CopiesNumber()) + }) +} + +func Test_coreAddrAccum(t *testing.T) { + t.Run("new", func(t *testing.T) { + s := newAddressAccumulator() + // ascertain that type is correct and Once entity initialize + require.NotNil(t, s.(*coreAddrAccum).Once) + }) + + t.Run("address", func(t *testing.T) { + addr := testObjectAddress(t) + + s := &coreAddrAccum{addr: &addr} + + // ascertain that address returns as expected + require.Equal(t, &addr, s.address()) + }) + + t.Run("handle", func(t *testing.T) { + addr := testObjectAddress(t) + + s := newAddressAccumulator() + + s.handleItem(&addr) + + // ascertain that address saved + require.Equal(t, &addr, s.address()) + + // create another address for test + addr2 := testObjectAddress(t) + + s.handleItem(&addr2) + + // ascertain that second address is ignored + require.Equal(t, &addr, s.address()) + }) +} + +func Test_rawPutInfo(t *testing.T) { + t.Run("TTL", func(t *testing.T) { + ttl := uint32(3) + + s := newRawPutInfo() + s.setTTL(ttl) + + require.Equal(t, ttl, s.GetTTL()) + }) + + t.Run("head", func(t *testing.T) { + obj := new(Object) + + s := newRawPutInfo() + s.setHead(obj) + + require.Equal(t, obj, s.GetHead()) + }) + + t.Run("payload", func(t *testing.T) { + // ascertain that nil chunk returns as expected + r := bytes.NewBuffer(nil) + + req := newRawPutInfo() + req.setPayload(r) + + require.Equal(t, r, req.Payload()) + }) + + t.Run("token", func(t *testing.T) { + // ascertain that nil token returns as expected + require.Nil(t, newRawPutInfo().GetSessionToken()) + }) + + t.Run("copies number", func(t *testing.T) { + cn := uint32(100) + + s := newRawPutInfo() + s.setCopiesNumber(cn) + + require.Equal(t, cn, s.CopiesNumber()) + }) +} + +func Test_contextWithValues(t *testing.T) { + k1, k2 := "key 1", "key2" + v1, v2 := "value 1", "value 2" + + ctx := contextWithValues(context.TODO(), k1, v1, k2, v2) + + // ascertain that all values added + require.Equal(t, v1, ctx.Value(k1)) + require.Equal(t, v2, ctx.Value(k2)) +} + +func Test_bifurcatingObjectStorer(t *testing.T) { + ctx := context.TODO() + + // create custom error for test + sErr := internal.Error("test error for object storer") + + t.Run("w/ token", func(t *testing.T) { + // create custom request w/ token + sk := test.DecodeKey(0) + + owner, err := refs.NewOwnerID(&sk.PublicKey) + require.NoError(t, err) + + token := new(service.Token) + token.SetOwnerID(owner) + + req := &putRequest{ + PutRequest: object.MakePutRequestHeader(new(Object)), + } + req.SetToken(token) + require.NoError(t, requestSignFunc(sk, req)) + + s := &bifurcatingObjectStorer{ + tokenStorer: &testPutEntity{ + f: func(items ...interface{}) { + t.Run("correct token storer params", func(t *testing.T) { + require.Equal(t, req, items[0]) + }) + }, + err: sErr, // force token storer to return sErr + }, + } + + _, err = s.putObject(ctx, req) + require.EqualError(t, err, sErr.Error()) + }) + + t.Run("w/o token", func(t *testing.T) { + // create custom request w/o token + req := newRawPutInfo() + require.Nil(t, req.GetSessionToken()) + + s := &bifurcatingObjectStorer{ + straightStorer: &testPutEntity{ + f: func(items ...interface{}) { + t.Run("correct token storer params", func(t *testing.T) { + require.Equal(t, req, items[0]) + }) + }, + err: sErr, // force token storer to return sErr + }, + } + + _, err := s.putObject(ctx, req) + require.EqualError(t, err, sErr.Error()) + }) +} + +func TestWithTokenFromOwner(t *testing.T) { + // nil request + require.False(t, withTokenFromOwner(nil)) + + // create test request + req := &putRequest{ + PutRequest: new(object.PutRequest), + } + + // w/o session token + require.Nil(t, req.GetSessionToken()) + require.False(t, withTokenFromOwner(req)) + + // create test session token and add it to request + token := new(service.Token) + req.SetToken(token) + + // w/o signatures + require.False(t, withTokenFromOwner(req)) + + // create test public key + pk := &test.DecodeKey(0).PublicKey + + // add key-signature pair + req.AddSignKey(nil, pk) + + // wrong token owner + require.False(t, withTokenFromOwner(req)) + + // set correct token owner + owner, err := refs.NewOwnerID(pk) + require.NoError(t, err) + + token.SetOwnerID(owner) + + require.True(t, withTokenFromOwner(req)) +} + +func Test_tokenObjectStorer(t *testing.T) { + ctx := context.TODO() + + token := new(service.Token) + token.SetID(session.TokenID{1, 2, 3}) + token.SetSignature(testData(t, 10)) + + // create custom request w/ token and object for test + req := newRawPutInfo() + req.setSessionToken(token) + req.setHead(&Object{ + Payload: testData(t, 10), + }) + + t.Run("token store failure", func(t *testing.T) { + s := &tokenObjectStorer{ + tokenStore: &testPutEntity{ + err: internal.Error(""), // force token store to return a non-nil error + }, + } + + _, err := s.putObject(ctx, req) + require.EqualError(t, err, errTokenRetrieval.Error()) + }) + + t.Run("correct result", func(t *testing.T) { + addr := testObjectAddress(t) + + pToken, err := session.NewPrivateToken(0) + require.NoError(t, err) + + s := &tokenObjectStorer{ + tokenStore: &testPutEntity{ + res: pToken, + }, + objStorer: &testPutEntity{ + f: func(items ...interface{}) { + t.Run("correct object storer params", func(t *testing.T) { + require.Equal(t, req, items[0]) + ctx := items[1].(context.Context) + require.Equal(t, pToken, ctx.Value(transformer.PrivateSessionToken)) + require.Equal(t, token, ctx.Value(transformer.PublicSessionToken)) + }) + }, + res: &addr, + }, + } + + res, err := s.putObject(ctx, req) + require.NoError(t, err) + require.Equal(t, addr, *res) + }) +} + +func Test_filteringObjectStorer(t *testing.T) { + ctx := context.TODO() + + t.Run("filter failure", func(t *testing.T) { + var ( + ttl = uint32(5) + obj = &Object{Payload: testData(t, 10)} + ) + + req := newRawPutInfo() + req.setHead(obj) + req.setTTL(ttl) + + s := &filteringObjectStorer{ + filter: &testPutEntity{ + f: func(items ...interface{}) { + t.Run("correct filter params", func(t *testing.T) { + require.Equal(t, &Meta{Object: obj}, items[1]) + ctx := items[0].(context.Context) + require.Equal(t, ttl, ctx.Value(ttlValue)) + }) + }, + res: []interface{}{localstore.ResultFail()}, + }, + } + + _, err := s.putObject(ctx, req) + require.EqualError(t, err, errObjectFilter.Error()) + }) + + t.Run("correct result", func(t *testing.T) { + req := newRawPutInfo() + req.setHead(&Object{ + Payload: testData(t, 10), + }) + + addr := testObjectAddress(t) + + s := &filteringObjectStorer{ + filter: &testPutEntity{ + res: []interface{}{localstore.ResultPass()}, + }, + objStorer: &testPutEntity{ + f: func(items ...interface{}) { + t.Run("correct object storer params", func(t *testing.T) { + require.Equal(t, req, items[0]) + }) + }, + res: &addr, + }, + } + + res, err := s.putObject(ctx, req) + require.NoError(t, err) + require.Equal(t, &addr, res) + }) +} + +func Test_receivingObjectStorer(t *testing.T) { + ctx := context.TODO() + + t.Run("cut payload", func(t *testing.T) { + payload := testData(t, 10) + + req := newRawPutInfo() + req.setHead(&Object{ + SystemHeader: SystemHeader{ + PayloadLength: uint64(len(payload)) + 1, + }, + }) + req.setPayload(bytes.NewBuffer(payload)) + + _, err := new(receivingObjectStorer).putObject(ctx, req) + require.EqualError(t, err, transformer.ErrPayloadEOF.Error()) + }) + + t.Run("payload verification failure", func(t *testing.T) { + vErr := internal.Error("payload verification error for test") + + req := newRawPutInfo() + req.setHead(&Object{ + Payload: testData(t, 10), + }) + + s := &receivingObjectStorer{ + vPayload: &testPutEntity{ + f: func(items ...interface{}) { + require.Equal(t, req.obj, items[0]) + }, + err: vErr, + }, + } + + _, err := s.putObject(ctx, req) + + require.EqualError(t, err, errPayloadChecksum.Error()) + }) + + t.Run("correct result", func(t *testing.T) { + var ( + cn = uint32(10) + ttl = uint32(5) + timeout = 3 * time.Second + payload = testData(t, 10) + addr = testObjectAddress(t) + ) + + obj := &Object{ + SystemHeader: SystemHeader{ + PayloadLength: uint64(len(payload)), + ID: addr.ObjectID, + CID: addr.CID, + }, + } + + req := newRawPutInfo() + req.setHead(obj) + req.setPayload(bytes.NewBuffer(payload)) + req.setTimeout(timeout) + req.setTTL(ttl) + req.setCopiesNumber(cn) + req.setSessionToken(new(service.Token)) + + s := &receivingObjectStorer{ + straightStorer: &testPutEntity{ + f: func(items ...interface{}) { + t.Run("correct straight storer params", func(t *testing.T) { + exp := newRawPutInfo() + exp.setHead(obj) + exp.setTimeout(timeout) + exp.setTTL(ttl) + exp.setCopiesNumber(cn) + exp.setSessionToken(req.GetSessionToken()) + + require.Equal(t, exp, items[0]) + }) + }, + res: &addr, + }, + vPayload: new(testPutEntity), + } + + res, err := s.putObject(ctx, req) + require.NoError(t, err) + require.Equal(t, &addr, res) + }) +} + +func Test_transformingObjectStorer(t *testing.T) { + ctx := context.TODO() + + t.Run("correct behavior", func(t *testing.T) { + var ( + tErr = internal.Error("test error for transformer") + addr = testObjectAddress(t) + obj = &Object{ + SystemHeader: SystemHeader{ + ID: addr.ObjectID, + CID: addr.CID, + }, + Payload: testData(t, 10), + } + ) + + req := newRawPutInfo() + req.setHead(obj) + req.setPayload(bytes.NewBuffer(obj.Payload)) + req.setTimeout(3 * time.Second) + req.setTTL(5) + req.setCopiesNumber(100) + req.setSessionToken(new(service.Token)) + + tr := &testPutEntity{ + f: func(items ...interface{}) { + t.Run("correct transformer params", func(t *testing.T) { + require.Equal(t, transformer.ProcUnit{ + Head: req.obj, + Payload: req.r, + }, items[0]) + fns := items[1].([]transformer.ProcUnitHandler) + require.Len(t, fns, 1) + _ = fns[0](ctx, transformer.ProcUnit{ + Head: req.obj, + Payload: req.r, + }) + }) + }, + } + + s := &transformingObjectStorer{ + transformer: tr, + objStorer: &testPutEntity{ + f: func(items ...interface{}) { + t.Run("correct object storer params", func(t *testing.T) { + exp := newRawPutInfo() + exp.setHead(req.GetHead()) + exp.setPayload(req.Payload()) + exp.setTimeout(req.GetTimeout()) + exp.setTTL(req.GetTTL()) + exp.setCopiesNumber(req.CopiesNumber()) + exp.setSessionToken(req.GetSessionToken()) + + require.Equal(t, exp, items[0]) + }) + }, + err: internal.Error(""), + }, + mErr: map[error]struct{}{ + tErr: {}, + }, + } + + res, err := s.putObject(ctx, req) + require.NoError(t, err) + require.Equal(t, &addr, res) + + tr.err = tErr + + _, err = s.putObject(ctx, req) + require.EqualError(t, err, tErr.Error()) + + tr.err = internal.Error("some other error") + + _, err = s.putObject(ctx, req) + require.EqualError(t, err, errTransformer.Error()) + + e := &transformerHandlerErr{ + error: internal.Error("transformer handler error"), + } + + tr.err = e + + _, err = s.putObject(ctx, req) + require.EqualError(t, err, e.error.Error()) + }) +} + +func Test_putStreamReader(t *testing.T) { + t.Run("empty server", func(t *testing.T) { + s := new(putStreamReader) + n, err := s.Read(make([]byte, 1)) + require.EqualError(t, err, io.EOF.Error()) + require.Zero(t, n) + }) + + t.Run("fail presence", func(t *testing.T) { + initTail := testData(t, 10) + + s := putStreamReader{ + tail: initTail, + srv: new(testPutEntity), + } + + buf := make([]byte, len(s.tail)/2) + + n, err := s.Read(buf) + require.NoError(t, err) + require.Equal(t, len(buf), n) + require.Equal(t, buf, initTail[:n]) + require.Equal(t, initTail[n:], s.tail) + }) + + t.Run("receive message failure", func(t *testing.T) { + t.Run("stream problem", func(t *testing.T) { + srvErr := internal.Error("test error for stream server") + + s := &putStreamReader{ + srv: &testPutEntity{ + err: srvErr, + }, + } + + n, err := s.Read(make([]byte, 1)) + require.EqualError(t, err, srvErr.Error()) + require.Zero(t, n) + }) + + t.Run("incorrect chunk", func(t *testing.T) { + t.Run("empty data", func(t *testing.T) { + s := &putStreamReader{ + srv: &testPutEntity{ + res: object.MakePutRequestChunk(make([]byte, 0)), + }, + } + + n, err := s.Read(make([]byte, 1)) + require.EqualError(t, err, errChunkExpected.Error()) + require.Zero(t, n) + }) + + t.Run("wrong message type", func(t *testing.T) { + s := &putStreamReader{ + srv: &testPutEntity{ + res: object.MakePutRequestHeader(new(Object)), + }, + } + + n, err := s.Read(make([]byte, 1)) + require.EqualError(t, err, errChunkExpected.Error()) + require.Zero(t, n) + }) + }) + }) + + t.Run("correct read", func(t *testing.T) { + chunk := testData(t, 10) + buf := make([]byte, len(chunk)/2) + + s := &putStreamReader{ + srv: &testPutEntity{ + res: object.MakePutRequestChunk(chunk), + }, + } + + n, err := s.Read(buf) + require.NoError(t, err) + require.Equal(t, chunk[:n], buf) + require.Equal(t, chunk[n:], s.tail) + }) + + t.Run("ful read", func(t *testing.T) { + var ( + callNum = 0 + chunk1, chunk2 = testData(t, 100), testData(t, 88) + ) + + srv := new(testPutEntity) + srv.f = func(items ...interface{}) { + if callNum == 0 { + srv.res = object.MakePutRequestChunk(chunk1) + } else if callNum == 1 { + srv.res = object.MakePutRequestChunk(chunk2) + } else { + srv.res, srv.err = 0, io.EOF + } + callNum++ + } + + s := &putStreamReader{ + srv: srv, + } + + var ( + n int + err error + res = make([]byte, 0) + buf = make([]byte, 10) + ) + + for err != io.EOF { + n, err = s.Read(buf) + res = append(res, buf[:n]...) + } + + require.Equal(t, append(chunk1, chunk2...), res) + }) +} diff --git a/services/public/object/query.go b/services/public/object/query.go new file mode 100644 index 000000000..79fddde47 --- /dev/null +++ b/services/public/object/query.go @@ -0,0 +1,234 @@ +package object + +import ( + "context" + "fmt" + "regexp" + + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/query" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/localstore" + "github.com/nspcc-dev/neofs-node/lib/transport" + "go.uber.org/zap" +) + +type ( + queryVersionController struct { + m map[int]localQueryImposer + } + + coreQueryImposer struct { + fCreator filterCreator + lsLister localstore.Iterator + + log *zap.Logger + } + + filterCreator interface { + createFilter(query.Query) Filter + } + + coreFilterCreator struct{} +) + +const ( + queryFilterName = "QUERY_FILTER" + + pmUndefinedFilterType = "undefined filter type %d" + + errUnsupportedQueryVersion = internal.Error("unsupported query version number") +) + +const errSearchQueryUnmarshal = internal.Error("query unmarshal failure") + +const errLocalQueryImpose = internal.Error("local query imposing failure") + +var ( + _ filterCreator = (*coreFilterCreator)(nil) + _ localQueryImposer = (*queryVersionController)(nil) + _ localQueryImposer = (*coreQueryImposer)(nil) +) + +func (s *queryVersionController) imposeQuery(ctx context.Context, c CID, d []byte, v int) ([]Address, error) { + imp := s.m[v] + if imp == nil { + return nil, errUnsupportedQueryVersion + } + + return imp.imposeQuery(ctx, c, d, v) +} + +func (s *coreQueryImposer) imposeQuery(ctx context.Context, cid CID, qData []byte, _ int) (res []Address, err error) { + defer func() { + switch err { + case nil, errSearchQueryUnmarshal: + default: + s.log.Error("local query imposing failure", + zap.String("error", err.Error()), + ) + + err = errLocalQueryImpose + } + }() + + var q query.Query + + if err = q.Unmarshal(qData); err != nil { + s.log.Error("could not unmarshal search query", + zap.String("error", err.Error()), + ) + + return nil, errSearchQueryUnmarshal + } else if err = mouldQuery(cid, &q); err != nil { + return + } + + err = s.lsLister.Iterate( + s.fCreator.createFilter(q), + func(meta *Meta) (stop bool) { + res = append(res, Address{ + CID: meta.Object.SystemHeader.CID, + ObjectID: meta.Object.SystemHeader.ID, + }) + return + }, + ) + + return res, err +} + +func (s *coreFilterCreator) createFilter(q query.Query) Filter { + f, err := localstore.AllPassIncludingFilter(queryFilterName, &localstore.FilterParams{ + FilterFunc: func(_ context.Context, o *Meta) *localstore.FilterResult { + if !imposeQuery(q, o.Object) { + return localstore.ResultFail() + } + return localstore.ResultPass() + }, + }) + if err != nil { + panic(err) // TODO: test panic occasion + } + + return f +} + +func mouldQuery(cid CID, q *query.Query) error { + var ( + withCID bool + cidStr = cid.String() + ) + + for i := range q.Filters { + if q.Filters[i].Name == KeyCID { + if q.Filters[i].Value != cidStr { + return errInvalidCIDFilter + } + + withCID = true + } + } + + if !withCID { + q.Filters = append(q.Filters, QueryFilter{ + Type: query.Filter_Exact, + Name: KeyCID, + Value: cidStr, + }) + } + + return nil +} + +func imposeQuery(q query.Query, o *Object) bool { + fs := make(map[string]*QueryFilter) + + for i := range q.Filters { + switch q.Filters[i].Name { + case transport.KeyTombstone: + if !o.IsTombstone() { + return false + } + default: + fs[q.Filters[i].Name] = &q.Filters[i] + } + } + + if !filterSystemHeader(fs, &o.SystemHeader) { + return false + } + + orphan := true + + for i := range o.Headers { + var key, value string + + switch h := o.Headers[i].Value.(type) { + case *object.Header_Link: + switch h.Link.Type { + case object.Link_Parent: + delete(fs, transport.KeyHasParent) + key = transport.KeyParent + orphan = false + case object.Link_Previous: + key = KeyPrev + case object.Link_Next: + key = KeyNext + case object.Link_Child: + if _, ok := fs[transport.KeyNoChildren]; ok { + return false + } + + key = KeyChild + default: + continue + } + + value = h.Link.ID.String() + case *object.Header_UserHeader: + key, value = h.UserHeader.Key, h.UserHeader.Value + case *object.Header_StorageGroup: + key = transport.KeyStorageGroup + default: + continue + } + + if !applyFilter(fs, key, value) { + return false + } + } + + if _, ok := fs[KeyRootObject]; ok && orphan { // we think that object without parents is a root or user's object + delete(fs, KeyRootObject) + } + + delete(fs, transport.KeyNoChildren) + + return len(fs) == 0 +} + +func filterSystemHeader(fs map[string]*QueryFilter, sysHead *SystemHeader) bool { + return applyFilter(fs, KeyID, sysHead.ID.String()) && + applyFilter(fs, KeyCID, sysHead.CID.String()) && + applyFilter(fs, KeyOwnerID, sysHead.OwnerID.String()) +} + +func applyFilter(fs map[string]*QueryFilter, key, value string) bool { + f := fs[key] + if f == nil { + return true + } + + delete(fs, key) + + switch f.Type { + case query.Filter_Exact: + return value == f.Value + case query.Filter_Regex: + regex, err := regexp.Compile(f.Value) + return err == nil && regex.MatchString(value) + default: + panic(fmt.Sprintf(pmUndefinedFilterType, f.Type)) + } +} diff --git a/services/public/object/query_test.go b/services/public/object/query_test.go new file mode 100644 index 000000000..ee6a5dea3 --- /dev/null +++ b/services/public/object/query_test.go @@ -0,0 +1,828 @@ +package object + +import ( + "context" + "fmt" + "testing" + + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/query" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-api-go/storagegroup" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/localstore" + "github.com/nspcc-dev/neofs-node/lib/transport" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +type ( + // Entity for mocking interfaces. + // Implementation of any interface intercepts arguments via f (if not nil). + // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. + testQueryEntity struct { + // Set of interfaces which testQueryEntity must implement, but some methods from those does not call. + Filter + + // Argument interceptor. Used for ascertain of correct parameter passage between components. + f func(...interface{}) + // Mocked result of any interface. + res interface{} + // Mocked error of any interface. + err error + } +) + +var ( + _ filterCreator = (*testQueryEntity)(nil) + _ localQueryImposer = (*testQueryEntity)(nil) +) + +func (s *testQueryEntity) imposeQuery(_ context.Context, c CID, q []byte, v int) ([]Address, error) { + if s.f != nil { + s.f(c, q, v) + } + if s.err != nil { + return nil, s.err + } + return s.res.([]Address), nil +} + +func (s *testQueryEntity) createFilter(p query.Query) Filter { + if s.f != nil { + s.f(p) + } + return s +} + +func (s *testQueryEntity) Iterate(p Filter, h localstore.MetaHandler) error { + if s.f != nil { + s.f(p) + } + if s.err != nil { + return s.err + } + for _, item := range s.res.([]localstore.ListItem) { + h(&item.ObjectMeta) + } + return nil +} + +func Test_queryVersionController_imposeQuery(t *testing.T) { + ctx := context.TODO() + cid := testObjectAddress(t).CID + + t.Run("unsupported version", func(t *testing.T) { + qImp := &queryVersionController{ + m: make(map[int]localQueryImposer), + } + + res, err := qImp.imposeQuery(ctx, cid, nil, 1) + require.EqualError(t, err, errUnsupportedQueryVersion.Error()) + require.Empty(t, res) + }) + + t.Run("correct imposer choose", func(t *testing.T) { + m := make(map[int]localQueryImposer) + qData := testData(t, 10) + + qImp := &queryVersionController{m: m} + + m[0] = &testQueryEntity{ + f: func(items ...interface{}) { + t.Run("correct imposer params", func(t *testing.T) { + require.Equal(t, cid, items[0].(CID)) + require.Equal(t, qData, items[1].([]byte)) + require.Equal(t, 0, items[2].(int)) + }) + }, + err: internal.Error(""), // just to prevent panic + } + + _, _ = qImp.imposeQuery(ctx, cid, qData, 0) + }) + + t.Run("correct imposer result", func(t *testing.T) { + t.Run("error", func(t *testing.T) { + m := make(map[int]localQueryImposer) + qImp := &queryVersionController{m: m} + + impErr := internal.Error("test error for query imposer") + + m[0] = &testQueryEntity{ + err: impErr, // force localQueryImposer to return impErr + } + + res, err := qImp.imposeQuery(ctx, cid, nil, 0) + + // ascertain that error returns as expected + require.EqualError(t, err, impErr.Error()) + // ascertain that result is empty + require.Empty(t, res) + + // create test address list + addrList := testAddrList(t, 5) + + m[1] = &testQueryEntity{ + res: addrList, // force localQueryImposer to return addrList + } + + res, err = qImp.imposeQuery(ctx, cid, nil, 1) + require.NoError(t, err) + + // ascertain that result returns as expected + require.Equal(t, addrList, res) + }) + }) +} + +func Test_coreQueryImposer_imposeQuery(t *testing.T) { + v := 1 + ctx := context.TODO() + cid := testObjectAddress(t).CID + log := zap.L() + + t.Run("query unmarshal failure", func(t *testing.T) { + var ( + qErr error + data []byte + ) + + // create invalid query binary representation + for { + data = testData(t, 1024) + if qErr = new(query.Query).Unmarshal(data); qErr != nil { + break + } + } + + s := &coreQueryImposer{ + log: zap.L(), + } + + // trying to impose invalid query data + res, err := s.imposeQuery(ctx, cid, data, v) + + // ascertain that reached error exactly like in unmarshal + require.EqualError(t, err, errSearchQueryUnmarshal.Error()) + + // ascertain that empty result returned + require.Nil(t, res) + }) + + t.Run("mould query failure", func(t *testing.T) { + // create testQuery with CID filter with value other than cid + testQuery := &query.Query{Filters: []QueryFilter{{Type: query.Filter_Exact, Name: KeyCID, Value: cid.String() + "1"}}} + + // try to mould this testQuery + mErr := mouldQuery(cid, testQuery) + + // ascertain that testQuery mould failed + require.Error(t, mErr) + + // ascertain that testQuery marshals normally + d, err := testQuery.Marshal() + require.NoError(t, err) + + s := &coreQueryImposer{ + log: log, + } + + // try to impose testQuery + res, err := s.imposeQuery(ctx, cid, d, v) + + // ascertain that impose fails with same error as mould + require.EqualError(t, err, errLocalQueryImpose.Error()) + + // ascertain that result is empty + require.Nil(t, res) + }) + + t.Run("local store listing", func(t *testing.T) { + // create testQuery and object which matches to it + testQuery, obj := testFullObjectWithQuery(t) + + // ascertain testQuery marshals normally + qBytes, err := testQuery.Marshal() + require.NoError(t, err) + + t.Run("listing error", func(t *testing.T) { + // create new error for test + lsErr := internal.Error("test error of local store listing") + + // create test query imposer with mocked always failing lister + qImposer := &coreQueryImposer{ + fCreator: new(coreFilterCreator), + lsLister: &testQueryEntity{err: lsErr}, + log: log, + } + + // try to impose testQuery + list, err := qImposer.imposeQuery(ctx, obj.SystemHeader.CID, qBytes, v) + + // ascertain that impose fails same error as lister + require.EqualError(t, err, errLocalQueryImpose.Error()) + + // ascertain that result is empty + require.Empty(t, list) + }) + + t.Run("correct parameter", func(t *testing.T) { + // create new mocked filter creator + fc := new(testQueryEntity) + fc.res = fc + + // create testQuery imposer + qImposer := &coreQueryImposer{ + fCreator: fc, + lsLister: &testQueryEntity{ + f: func(p ...interface{}) { + // intercept lister arguments + // ascertain that argument is as expected + require.Equal(t, fc, p[0].(Filter)) + }, + err: internal.Error(""), + }, + log: log, + } + + _, _ = qImposer.imposeQuery(ctx, obj.SystemHeader.CID, qBytes, v) + }) + + t.Run("correct result", func(t *testing.T) { + // create list of random address items + addrList := testAddrList(t, 10) + items := make([]localstore.ListItem, 0, len(addrList)) + for i := range addrList { + items = append(items, localstore.ListItem{ + ObjectMeta: Meta{ + Object: &Object{ + SystemHeader: SystemHeader{ + ID: addrList[i].ObjectID, + CID: addrList[i].CID, + }, + }, + }, + }) + } + + // create imposer with mocked lister + qImposer := &coreQueryImposer{ + fCreator: new(coreFilterCreator), + lsLister: &testQueryEntity{res: items}, + } + + // try to impose testQuery + list, err := qImposer.imposeQuery(ctx, obj.SystemHeader.CID, qBytes, v) + + // ascertain that imposing finished normally + require.NoError(t, err) + + // ascertain that resulting list size as expected + require.Len(t, list, len(addrList)) + + // ascertain that all source items are presented in result + for i := range addrList { + require.Contains(t, list, addrList[i]) + } + }) + }) +} + +func Test_coreFilterCreator_createFilter(t *testing.T) { + ctx := context.TODO() + fCreator := new(coreFilterCreator) + + t.Run("composing correct filter", func(t *testing.T) { + var f Filter + + // ascertain filter creation does not panic + require.NotPanics(t, func() { f = fCreator.createFilter(query.Query{}) }) + + // ascertain that created filter is not empty + require.NotNil(t, f) + + // ascertain that created filter has expected name + require.Equal(t, queryFilterName, f.GetName()) + }) + + t.Run("passage on matching query", func(t *testing.T) { + // create testQuery and object which matches to it + testQuery, obj := testFullObjectWithQuery(t) + + // create filter for testQuery and pass object to it + res := fCreator.createFilter(testQuery).Pass(ctx, &Meta{Object: obj}) + + // ascertain that filter is passed + require.Equal(t, localstore.CodePass, res.Code()) + }) + + t.Run("failure on mismatching query", func(t *testing.T) { + testQuery, obj := testFullObjectWithQuery(t) + obj.SystemHeader.ID[0]++ + require.False(t, imposeQuery(testQuery, obj)) + + res := fCreator.createFilter(testQuery).Pass(ctx, &Meta{Object: obj}) + + require.Equal(t, localstore.CodeFail, res.Code()) + }) +} + +func Test_mouldQuery(t *testing.T) { + cid := testObjectAddress(t).CID + + t.Run("invalid CID filter", func(t *testing.T) { + // create query with CID filter with other than cid value + query := &query.Query{Filters: []QueryFilter{{Type: query.Filter_Exact, Name: KeyCID, Value: cid.String() + "1"}}} + + // try to mould this query for cid + err := mouldQuery(cid, query) + + // ascertain wrong CID value is not allowed + require.EqualError(t, err, errInvalidCIDFilter.Error()) + }) + + t.Run("correct CID filter", func(t *testing.T) { + // create testQuery with CID filter with cid value + cidF := QueryFilter{Type: query.Filter_Exact, Name: KeyCID, Value: cid.String()} + testQuery := &query.Query{Filters: []QueryFilter{cidF}} + + // ascertain mould is processed + require.NoError(t, mouldQuery(cid, testQuery)) + + // ascertain filter is still in testQuery + require.Contains(t, testQuery.Filters, cidF) + }) + + t.Run("missing CID filter", func(t *testing.T) { + // create CID filter with cid value + expF := QueryFilter{Type: query.Filter_Exact, Name: KeyCID, Value: cid.String()} + + // create empty testQuery + testQuery := new(query.Query) + + // ascertain mould is processed + require.NoError(t, mouldQuery(cid, testQuery)) + + // ascertain exact CID filter added to testQuery + require.Contains(t, testQuery.Filters, expF) + }) +} + +func Test_applyFilter(t *testing.T) { + k, v := "key", "value" + + t.Run("empty map", func(t *testing.T) { + // ascertain than applyFilter always return true on empty filter map + require.True(t, applyFilter(nil, k, v)) + }) + + t.Run("passage on missing key", func(t *testing.T) { + t.Run("exact", func(t *testing.T) { + require.True(t, applyFilter(map[string]*QueryFilter{k: {Type: query.Filter_Exact, Value: v + "1"}}, k+"1", v)) + }) + + t.Run("regex", func(t *testing.T) { + require.True(t, applyFilter(map[string]*QueryFilter{k: {Type: query.Filter_Regex, Value: v + "1"}}, k+"1", v)) + }) + }) + + t.Run("passage on key presence and matching value", func(t *testing.T) { + t.Run("exact", func(t *testing.T) { + require.True(t, applyFilter(map[string]*QueryFilter{k: {Type: query.Filter_Exact, Value: v}}, k, v)) + }) + + t.Run("regex", func(t *testing.T) { + require.True(t, applyFilter(map[string]*QueryFilter{k: {Type: query.Filter_Regex, Value: v + "|" + v + "1"}}, k, v)) + }) + }) + + t.Run("failure on key presence and mismatching value", func(t *testing.T) { + t.Run("exact", func(t *testing.T) { + require.False(t, applyFilter(map[string]*QueryFilter{k: {Type: query.Filter_Exact, Value: v + "1"}}, k, v)) + }) + + t.Run("regex", func(t *testing.T) { + require.False(t, applyFilter(map[string]*QueryFilter{k: {Type: query.Filter_Regex, Value: v + "&" + v + "1"}}, k, v)) + }) + }) + + t.Run("key removes from filter map", func(t *testing.T) { + // create filter map with several elements + m := map[string]*QueryFilter{ + k: {Type: query.Filter_Exact, Value: v}, + k + "1": {Type: query.Filter_Exact, Value: v}, + } + + // save initial len + initLen := len(m) + + // apply filter with key from filter map + applyFilter(m, k, v) + + // ascertain exactly key was removed from filter map + require.Len(t, m, initLen-1) + + // ascertain this is exactly applyFilter argument + _, ok := m[k] + require.False(t, ok) + }) + + t.Run("panic on unknown filter type", func(t *testing.T) { + // create filter type other than FilterExact and FilterRegex + fType := query.Filter_Exact + query.Filter_Regex + 1 + require.NotEqual(t, query.Filter_Exact, fType) + require.NotEqual(t, query.Filter_Regex, fType) + + // ascertain applyFilter does not process this type but panic + require.PanicsWithValue(t, + fmt.Sprintf(pmUndefinedFilterType, fType), + func() { applyFilter(map[string]*QueryFilter{k: {Type: fType}}, k, v) }, + ) + }) +} + +func Test_imposeQuery(t *testing.T) { + t.Run("tombstone filter", func(t *testing.T) { + // create testQuery with only tombstone filter + testQuery := query.Query{Filters: []QueryFilter{{Name: transport.KeyTombstone}}} + + // create object which is not a tombstone + obj := new(Object) + + testQueryMatch(t, testQuery, obj, func(t *testing.T, obj *Object) { + // adding tombstone header makes object to satisfy tombstone testQuery + obj.Headers = append(obj.Headers, Header{Value: new(object.Header_Tombstone)}) + }) + }) + + t.Run("system header", func(t *testing.T) { + addr := testObjectAddress(t) + cid, oid, ownerID := addr.CID, addr.ObjectID, OwnerID{3} + + // create testQuery with system header filters + testQuery := query.Query{Filters: []QueryFilter{ + {Type: query.Filter_Exact, Name: KeyCID, Value: cid.String()}, + {Type: query.Filter_Exact, Name: KeyID, Value: oid.String()}, + {Type: query.Filter_Exact, Name: KeyOwnerID, Value: ownerID.String()}, + }} + + // fn sets system header fields values to ones from filters + fn := func(t *testing.T, obj *Object) { obj.SystemHeader = SystemHeader{CID: cid, ID: oid, OwnerID: ownerID} } + + // create object with empty system header fields + obj := new(Object) + testQueryMatch(t, testQuery, obj, fn) + + // create object with CID from filters + sysHdr := SystemHeader{CID: cid} + obj = &Object{SystemHeader: sysHdr} + testQueryMatch(t, testQuery, obj, fn) + + // create object with OID from filters + sysHdr.CID = CID{} + sysHdr.ID = oid + obj = &Object{SystemHeader: sysHdr} + testQueryMatch(t, testQuery, obj, fn) + + // create object with OwnerID from filters + sysHdr.ID = ID{} + sysHdr.OwnerID = ownerID + obj = &Object{SystemHeader: sysHdr} + testQueryMatch(t, testQuery, obj, fn) + + // create object with CID and OwnerID from filters + sysHdr.CID = cid + obj = &Object{SystemHeader: sysHdr} + testQueryMatch(t, testQuery, obj, fn) + + // create object with OID and OwnerID from filters + sysHdr.CID = CID{} + sysHdr.ID = oid + obj = &Object{SystemHeader: sysHdr} + testQueryMatch(t, testQuery, obj, fn) + + // create object with OID and OwnerID from filters + sysHdr.ID = oid + obj = &Object{SystemHeader: sysHdr} + testQueryMatch(t, testQuery, obj, fn) + + // create object with CID and OID from filters + sysHdr.CID = cid + sysHdr.OwnerID = OwnerID{} + obj = &Object{SystemHeader: sysHdr} + testQueryMatch(t, testQuery, obj, fn) + }) + + t.Run("no children filter", func(t *testing.T) { + // create testQuery with only orphan filter + testQuery := query.Query{Filters: []QueryFilter{{Type: query.Filter_Exact, Name: transport.KeyNoChildren}}} + + // create object with child relation + obj := &Object{Headers: []Header{{Value: &object.Header_Link{Link: &object.Link{Type: object.Link_Child}}}}} + + testQueryMatch(t, testQuery, obj, func(t *testing.T, obj *Object) { + // child relation removal makes object to satisfy orphan testQuery + obj.Headers = nil + }) + }) + + t.Run("has parent filter", func(t *testing.T) { + // create testQuery with parent relation filter + testQuery := query.Query{Filters: []QueryFilter{{Type: query.Filter_Exact, Name: transport.KeyHasParent}}} + + // create object w/o parent + obj := new(Object) + + testQueryMatch(t, testQuery, obj, func(t *testing.T, obj *Object) { + // adding parent relation makes object to satisfy parent testQuery + obj.Headers = append(obj.Headers, Header{Value: &object.Header_Link{Link: &object.Link{Type: object.Link_Parent}}}) + }) + }) + + t.Run("root object filter", func(t *testing.T) { + // create testQuery with only root filter + testQuery := query.Query{Filters: []QueryFilter{{Type: query.Filter_Exact, Name: KeyRootObject}}} + + // create object with parent relation + obj := &Object{Headers: []Header{{Value: &object.Header_Link{Link: &object.Link{Type: object.Link_Parent}}}}} + + testQueryMatch(t, testQuery, obj, func(t *testing.T, obj *Object) { + // parent removal makes object to satisfy root testQuery + obj.Headers = nil + }) + }) + + t.Run("link value filters", func(t *testing.T) { + t.Run("parent", func(t *testing.T) { + testLinkQuery(t, transport.KeyParent, object.Link_Parent) + }) + + t.Run("child", func(t *testing.T) { + testLinkQuery(t, KeyChild, object.Link_Child) + }) + + t.Run("previous", func(t *testing.T) { + testLinkQuery(t, KeyPrev, object.Link_Previous) + }) + + t.Run("next", func(t *testing.T) { + testLinkQuery(t, KeyNext, object.Link_Next) + }) + + t.Run("other", func(t *testing.T) { + // create not usable link type + linkKey := object.Link_Parent + object.Link_Child + object.Link_Next + object.Link_Previous + + // add some usable link to testQuery + par := ID{1, 2, 3} + testQuery := query.Query{Filters: []QueryFilter{{Type: query.Filter_Exact, Name: transport.KeyParent, Value: par.String()}}} + + // ascertain that undefined link type has no affect on testQuery imposing + require.True(t, imposeQuery(testQuery, &Object{ + Headers: []Header{ + {Value: &object.Header_Link{Link: &object.Link{Type: linkKey}}}, + {Value: &object.Header_Link{Link: &object.Link{Type: object.Link_Parent, ID: par}}}, + }, + })) + }) + }) + + t.Run("user header filter", func(t *testing.T) { + // user header key-value pair + k, v := "header", "value" + + // query with user header filter + query := query.Query{Filters: []QueryFilter{{ + Type: query.Filter_Exact, + Name: k, + Value: v, + }}} + + // create user header with same key and different value + hdr := &UserHeader{Key: k, Value: v + "1"} + + // create object with this user header + obj := &Object{Headers: []Header{{Value: &object.Header_UserHeader{UserHeader: hdr}}}} + + testQueryMatch(t, query, obj, func(t *testing.T, obj *Object) { + // correcting value to one from filter makes object to satisfy query + hdr.Value = v + }) + }) + + t.Run("storage group filter", func(t *testing.T) { + // create testQuery with only storage group filter + testQuery := query.Query{Filters: []QueryFilter{{Type: query.Filter_Exact, Name: transport.KeyStorageGroup}}} + + // create object w/o storage group header + obj := new(Object) + + testQueryMatch(t, testQuery, obj, func(t *testing.T, obj *Object) { + // adding storage group headers make object to satisfy testQuery + obj.Headers = append(obj.Headers, Header{Value: &object.Header_StorageGroup{StorageGroup: new(storagegroup.StorageGroup)}}) + }) + }) +} + +func Test_filterSystemHeader(t *testing.T) { + var ( + ownerID1, ownerID2 = OwnerID{1}, OwnerID{2} + addr1, addr2 = testObjectAddress(t), testObjectAddress(t) + cid1, cid2 = addr1.CID, addr2.CID + oid1, oid2 = addr1.ObjectID, addr2.ObjectID + sysHdr = SystemHeader{ID: oid1, OwnerID: ownerID1, CID: cid1} + ) + require.NotEqual(t, ownerID1, ownerID2) + require.NotEqual(t, cid1, cid2) + require.NotEqual(t, oid1, oid2) + + t.Run("empty filter map", func(t *testing.T) { + // ascertain that any system header satisfies to empty (nil) filter map + require.True(t, filterSystemHeader(nil, &sysHdr)) + }) + + t.Run("missing of some of the fields", func(t *testing.T) { + // create filter map for system header + m := sysHeaderFilterMap(sysHdr) + + // copy system header for initial values saving + h := sysHdr + + // change CID + h.CID = cid2 + + // ascertain filter failure + require.False(t, filterSystemHeader(m, &h)) + + // remove CID from filter map + delete(m, KeyCID) + + // ascertain filter passage + require.True(t, filterSystemHeader(m, &h)) + + m = sysHeaderFilterMap(sysHdr) + h = sysHdr + + // change OwnerID + h.OwnerID = ownerID2 + + // ascertain filter failure + require.False(t, filterSystemHeader(m, &h)) + + // remove OwnerID from filter map + delete(m, KeyOwnerID) + + // ascertain filter passage + require.True(t, filterSystemHeader(m, &h)) + + m = sysHeaderFilterMap(sysHdr) + h = sysHdr + + // change ObjectID + h.ID = oid2 + + // ascertain filter failure + require.False(t, filterSystemHeader(m, &h)) + + // remove ObjectID from filter map + delete(m, KeyID) + + // ascertain filter passage + require.True(t, filterSystemHeader(m, &h)) + }) + + t.Run("valid fields passage", func(t *testing.T) { + require.True(t, filterSystemHeader(sysHeaderFilterMap(sysHdr), &sysHdr)) + }) + + t.Run("mismatching values failure", func(t *testing.T) { + h := sysHdr + + // make CID value not matching + h.CID = cid2 + + require.False(t, filterSystemHeader(sysHeaderFilterMap(sysHdr), &h)) + + h = sysHdr + + // make ObjectID value not matching + h.ID = oid2 + + require.False(t, filterSystemHeader(sysHeaderFilterMap(sysHdr), &h)) + + h = sysHdr + + // make OwnerID value not matching + h.OwnerID = ownerID2 + + require.False(t, filterSystemHeader(sysHeaderFilterMap(sysHdr), &h)) + }) +} + +// testQueryMatch imposes passed query to passed object for tests. +// Passed object should not match to passed query. +// Passed function must mutate object so that becomes query matching. +func testQueryMatch(t *testing.T, q query.Query, obj *Object, fn func(*testing.T, *Object)) { + require.False(t, imposeQuery(q, obj)) + fn(t, obj) + require.True(t, imposeQuery(q, obj)) +} + +// testLinkQuery tests correctness of imposing query with link filters. +// Inits object with value different from one from filter. Then uses testQueryMatch with correcting value func. +func testLinkQuery(t *testing.T, key string, lt object.Link_Type) { + // create new relation link + relative, err := refs.NewObjectID() + require.NoError(t, err) + + // create another relation link + wrongRelative := relative + for wrongRelative.Equal(relative) { + wrongRelative, err = refs.NewObjectID() + require.NoError(t, err) + } + + // create query with relation filter + query := query.Query{Filters: []QueryFilter{{ + Type: query.Filter_Exact, + Name: key, + Value: relative.String(), + }}} + + // create link with relation different from one from filter + link := &object.Link{Type: lt, ID: wrongRelative} + // create object with this link + obj := &Object{Headers: []Header{{Value: &object.Header_Link{Link: link}}}} + testQueryMatch(t, query, obj, func(t *testing.T, object *Object) { + // changing link value to one from filter make object to satisfy relation query + link.ID = relative + }) +} + +// sysHeaderFilterMap creates filter map for passed system header. +func sysHeaderFilterMap(hdr SystemHeader) map[string]*QueryFilter { + return map[string]*QueryFilter{ + KeyCID: { + Type: query.Filter_Exact, + Name: KeyCID, + Value: hdr.CID.String(), + }, + KeyOwnerID: { + Type: query.Filter_Exact, + Name: KeyOwnerID, + Value: hdr.OwnerID.String(), + }, + KeyID: { + Type: query.Filter_Exact, + Name: KeyID, + Value: hdr.ID.String(), + }, + } +} + +// testFullObjectWithQuery creates query with set of permissible filters and object matching to this query. +func testFullObjectWithQuery(t *testing.T) (query.Query, *Object) { + addr := testObjectAddress(t) + selfID, cid := addr.ObjectID, addr.CID + + ownerID := OwnerID{} + copy(ownerID[:], testData(t, refs.OwnerIDSize)) + + addrList := testAddrList(t, 4) + + parID, childID, nextID, prevID := addrList[0].ObjectID, addrList[1].ObjectID, addrList[2].ObjectID, addrList[3].ObjectID + + query := query.Query{Filters: []QueryFilter{ + {Type: query.Filter_Exact, Name: transport.KeyParent, Value: parID.String()}, + {Type: query.Filter_Exact, Name: KeyPrev, Value: prevID.String()}, + {Type: query.Filter_Exact, Name: KeyNext, Value: nextID.String()}, + {Type: query.Filter_Exact, Name: KeyChild, Value: childID.String()}, + {Type: query.Filter_Exact, Name: KeyOwnerID, Value: ownerID.String()}, + {Type: query.Filter_Exact, Name: KeyID, Value: selfID.String()}, + {Type: query.Filter_Exact, Name: KeyCID, Value: cid.String()}, + {Type: query.Filter_Exact, Name: transport.KeyStorageGroup}, + {Type: query.Filter_Exact, Name: transport.KeyTombstone}, + {Type: query.Filter_Exact, Name: transport.KeyHasParent}, + }} + + obj := &Object{ + SystemHeader: SystemHeader{ + ID: selfID, + OwnerID: ownerID, + CID: cid, + }, + Headers: []Header{ + {Value: &object.Header_Link{Link: &object.Link{Type: object.Link_Parent, ID: parID}}}, + {Value: &object.Header_Link{Link: &object.Link{Type: object.Link_Previous, ID: prevID}}}, + {Value: &object.Header_Link{Link: &object.Link{Type: object.Link_Next, ID: nextID}}}, + {Value: &object.Header_Link{Link: &object.Link{Type: object.Link_Child, ID: childID}}}, + {Value: &object.Header_StorageGroup{StorageGroup: new(storagegroup.StorageGroup)}}, + {Value: &object.Header_Tombstone{Tombstone: new(object.Tombstone)}}, + }, + } + + require.True(t, imposeQuery(query, obj)) + + return query, obj +} diff --git a/services/public/object/ranges.go b/services/public/object/ranges.go new file mode 100644 index 000000000..acc05e0cb --- /dev/null +++ b/services/public/object/ranges.go @@ -0,0 +1,481 @@ +package object + +import ( + "context" + "io" + "sync" + + "github.com/nspcc-dev/neofs-api-go/hash" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/objio" + "github.com/nspcc-dev/neofs-node/lib/transport" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +type ( + // Range is a type alias of + // Range from object package of neofs-api-go. + Range = object.Range + + // RangeDescriptor is a type alias of + // RangeDescriptor from objio package. + RangeDescriptor = objio.RangeDescriptor + + // RangeChopper is a type alias of + // RangeChopper from objio package. + RangeChopper = objio.RangeChopper + + // GetRangeRequest is a type alias of + // GetRangeRequest from object package of neofs-api-go. + GetRangeRequest = object.GetRangeRequest + + // GetRangeResponse is a type alias of + // GetRangeResponse from object package of neofs-api-go. + GetRangeResponse = object.GetRangeResponse + + // GetRangeHashRequest is a type alias of + // GetRangeResponse from object package of neofs-api-go. + GetRangeHashRequest = object.GetRangeHashRequest + + // GetRangeHashResponse is a type alias of + // GetRangeHashResponse from object package of neofs-api-go. + GetRangeHashResponse = object.GetRangeHashResponse + + objectRangeReceiver interface { + getRange(context.Context, rangeTool) (interface{}, error) + } + + rangeTool interface { + transport.RangeHashInfo + budOff(*RangeDescriptor) rangeTool + handler() rangeItemAccumulator + } + + rawRangeInfo struct { + *rawAddrInfo + rng Range + } + + rawRangeHashInfo struct { + *rawAddrInfo + rngList []Range + salt []byte + } + + coreRangeReceiver struct { + rngRevealer rangeRevealer + straightRngRecv objectRangeReceiver + + // Set of errors that won't be converted into errPayloadRangeNotFound + mErr map[error]struct{} + + log *zap.Logger + } + + straightRangeReceiver struct { + executor operationExecutor + } + + singleItemHandler struct { + *sync.Once + item interface{} + } + + rangeItemAccumulator interface { + responseItemHandler + collect() (interface{}, error) + } + + rangeHashAccum struct { + concat bool + h []Hash + } + + rangeRevealer interface { + reveal(context.Context, *RangeDescriptor) ([]RangeDescriptor, error) + } + + coreRngRevealer struct { + relativeRecv objio.RelativeReceiver + chopTable objio.ChopperTable + } + + getRangeServerWriter struct { + req *GetRangeRequest + + srv object.Service_GetRangeServer + + respPreparer responsePreparer + } +) + +const ( + emGetRangeFail = "could get object range #%d part #%d" + emRangeRevealFail = "could not reveal object range #%d" + emRangeCollect = "could not collect result of object range #%d" + + errRangeReveal = internal.Error("could not reveal payload range") +) + +var ( + _ transport.RangeInfo = (*rawRangeInfo)(nil) + _ rangeTool = (*rawRangeHashInfo)(nil) + _ rangeTool = (*transportRequest)(nil) + _ rangeItemAccumulator = (*rangeHashAccum)(nil) + _ rangeItemAccumulator = (*singleItemHandler)(nil) + _ rangeRevealer = (*coreRngRevealer)(nil) + _ objectRangeReceiver = (*coreRangeReceiver)(nil) + _ objectRangeReceiver = (*straightRangeReceiver)(nil) + _ io.Writer = (*getRangeServerWriter)(nil) + _ transport.RangeInfo = (*transportRequest)(nil) +) + +func (s *objectService) GetRange(req *GetRangeRequest, srv object.Service_GetRangeServer) (err error) { + defer func() { + if r := recover(); r != nil { + s.log.Error(panicLogMsg, + zap.Stringer("request", object.RequestRange), + zap.Any("reason", r), + ) + + err = errServerPanic + } + + err = s.statusCalculator.make(requestError{ + t: object.RequestRange, + e: err, + }) + }() + + var r interface{} + + if r, err = s.requestHandler.handleRequest(srv.Context(), handleRequestParams{ + request: req, + executor: s, + }); err == nil { + _, err = io.CopyBuffer( + &getRangeServerWriter{ + req: req, + srv: srv, + respPreparer: s.rangeChunkPreparer, + }, + r.(io.Reader), + make([]byte, maxGetPayloadSize), + ) + } + + return err +} + +func (s *objectService) GetRangeHash(ctx context.Context, req *GetRangeHashRequest) (res *GetRangeHashResponse, err error) { + defer func() { + if r := recover(); r != nil { + s.log.Error(panicLogMsg, + zap.Stringer("request", object.RequestRangeHash), + zap.Any("reason", r), + ) + + err = errServerPanic + } + + err = s.statusCalculator.make(requestError{ + t: object.RequestRangeHash, + e: err, + }) + }() + + var r interface{} + + if r, err = s.requestHandler.handleRequest(ctx, handleRequestParams{ + request: req, + executor: s, + }); err != nil { + return + } + + res = makeRangeHashResponse(r.([]Hash)) + err = s.respPreparer.prepareResponse(ctx, req, res) + + return +} + +func (s *coreRangeReceiver) getRange(ctx context.Context, rt rangeTool) (res interface{}, err error) { + defer func() { + if err != nil { + if _, ok := s.mErr[errors.Cause(err)]; !ok { + s.log.Error("get range failure", + zap.String("error", err.Error()), + ) + + err = errPayloadRangeNotFound + } + } + }() + + var ( + subRngSet []RangeDescriptor + rngSet = rt.GetRanges() + addr = rt.GetAddress() + handler = rt.handler() + ) + + for i := range rngSet { + rd := RangeDescriptor{ + Size: int64(rngSet[i].Length), + Offset: int64(rngSet[i].Offset), + Addr: addr, + } + + if rt.GetTTL() < service.NonForwardingTTL { + subRngSet = []RangeDescriptor{rd} + } else if subRngSet, err = s.rngRevealer.reveal(ctx, &rd); err != nil { + return nil, errors.Wrapf(err, emRangeRevealFail, i+1) + } else if len(subRngSet) == 0 { + return nil, errRangeReveal + } + + subRangeTool := rt.budOff(&rd) + subHandler := subRangeTool.handler() + + for j := range subRngSet { + tool := subRangeTool.budOff(&subRngSet[j]) + + if subRngSet[j].Addr.Equal(&addr) { + res, err = s.straightRngRecv.getRange(ctx, tool) + } else { + res, err = s.getRange(ctx, tool) + } + + if err != nil { + return nil, errors.Wrapf(err, emGetRangeFail, i+1, j+1) + } + + subHandler.handleItem(res) + } + + rngRes, err := subHandler.collect() + if err != nil { + return nil, errors.Wrapf(err, emRangeCollect, i+1) + } + + handler.handleItem(rngRes) + } + + return handler.collect() +} + +func (s *straightRangeReceiver) getRange(ctx context.Context, rt rangeTool) (interface{}, error) { + handler := newSingleItemHandler() + if err := s.executor.executeOperation(ctx, rt, handler); err != nil { + return nil, err + } + + return handler.collect() +} + +func (s *coreRngRevealer) reveal(ctx context.Context, r *RangeDescriptor) ([]RangeDescriptor, error) { + chopper, err := s.getChopper(r.Addr) + if err != nil { + return nil, err + } + + return chopper.Chop(ctx, r.Size, r.Offset, true) +} + +func (s *coreRngRevealer) getChopper(addr Address) (res RangeChopper, err error) { + if res, err = s.chopTable.GetChopper(addr, objio.RCCharybdis); err == nil && res.Closed() { + return + } else if res, err = s.chopTable.GetChopper(addr, objio.RCScylla); err == nil { + return + } else if res, err = objio.NewScylla(&objio.ChopperParams{ + RelativeReceiver: s.relativeRecv, + Addr: addr, + }); err != nil { + return nil, err + } + + _ = s.chopTable.PutChopper(addr, res) + + return +} + +func loopData(data []byte, size, off int64) []byte { + if len(data) == 0 { + return make([]byte, 0) + } + + res := make([]byte, 0, size) + + var ( + cut int64 + tail = data[off%int64(len(data)):] + ) + + for added := int64(0); added < size; added += cut { + cut = min(int64(len(tail)), size-added) + res = append(res, tail[:cut]...) + tail = data + } + + return res +} + +func min(a, b int64) int64 { + if a < b { + return a + } + + return b +} + +func newSingleItemHandler() rangeItemAccumulator { return &singleItemHandler{Once: new(sync.Once)} } + +func (s *singleItemHandler) handleItem(item interface{}) { s.Do(func() { s.item = item }) } + +func (s *singleItemHandler) collect() (interface{}, error) { return s.item, nil } + +func (s *rangeHashAccum) handleItem(h interface{}) { + if v, ok := h.(Hash); ok { + s.h = append(s.h, v) + return + } + + s.h = append(s.h, h.([]Hash)...) +} + +func (s *rangeHashAccum) collect() (interface{}, error) { + if s.concat { + return hash.Concat(s.h) + } + + return s.h, nil +} + +func (s *rawRangeHashInfo) GetRanges() []Range { + return s.rngList +} + +func (s *rawRangeHashInfo) setRanges(v []Range) { + s.rngList = v +} + +func (s *rawRangeHashInfo) GetSalt() []byte { + return s.salt +} + +func (s *rawRangeHashInfo) setSalt(v []byte) { + s.salt = v +} + +func (s *rawRangeHashInfo) getAddrInfo() *rawAddrInfo { + return s.rawAddrInfo +} + +func (s *rawRangeHashInfo) setAddrInfo(v *rawAddrInfo) { + s.rawAddrInfo = v + s.setType(object.RequestRangeHash) +} + +func newRawRangeHashInfo() *rawRangeHashInfo { + res := new(rawRangeHashInfo) + + res.setAddrInfo(newRawAddressInfo()) + + return res +} + +func (s *rawRangeHashInfo) budOff(r *RangeDescriptor) rangeTool { + res := newRawRangeHashInfo() + + res.setMetaInfo(s.getMetaInfo()) + res.setAddress(r.Addr) + res.setRanges([]Range{ + { + Offset: uint64(r.Offset), + Length: uint64(r.Size), + }, + }) + res.setSalt(loopData(s.salt, int64(len(s.salt)), r.Offset)) + res.setSessionToken(s.GetSessionToken()) + res.setBearerToken(s.GetBearerToken()) + res.setExtendedHeaders(s.ExtendedHeaders()) + + return res +} + +func (s *rawRangeHashInfo) handler() rangeItemAccumulator { return &rangeHashAccum{concat: true} } + +func (s *transportRequest) GetRanges() []Range { + return s.serviceRequest.(*object.GetRangeHashRequest).Ranges +} + +func (s *transportRequest) GetSalt() []byte { + return s.serviceRequest.(*object.GetRangeHashRequest).Salt +} + +func (s *transportRequest) budOff(rd *RangeDescriptor) rangeTool { + res := newRawRangeHashInfo() + + res.setTTL(s.GetTTL()) + res.setTimeout(s.GetTimeout()) + res.setAddress(rd.Addr) + res.setRanges([]Range{ + { + Offset: uint64(rd.Offset), + Length: uint64(rd.Size), + }, + }) + res.setSalt(s.serviceRequest.(*object.GetRangeHashRequest).GetSalt()) + res.setSessionToken(s.GetSessionToken()) + res.setBearerToken(s.GetBearerToken()) + res.setExtendedHeaders(s.ExtendedHeaders()) + + return res +} + +func (s *transportRequest) handler() rangeItemAccumulator { return new(rangeHashAccum) } + +func (s *getRangeServerWriter) Write(p []byte) (int, error) { + resp := makeRangeResponse(p) + if err := s.respPreparer.prepareResponse(s.srv.Context(), s.req, resp); err != nil { + return 0, err + } + + if err := s.srv.Send(resp); err != nil { + return 0, err + } + + return len(p), nil +} + +func (s *rawRangeInfo) GetRange() Range { + return s.rng +} + +func (s *rawRangeInfo) setRange(rng Range) { + s.rng = rng +} + +func (s *rawRangeInfo) getAddrInfo() *rawAddrInfo { + return s.rawAddrInfo +} + +func (s *rawRangeInfo) setAddrInfo(v *rawAddrInfo) { + s.rawAddrInfo = v + s.setType(object.RequestRange) +} + +func newRawRangeInfo() *rawRangeInfo { + res := new(rawRangeInfo) + + res.setAddrInfo(newRawAddressInfo()) + + return res +} + +func (s *transportRequest) GetRange() Range { + return s.serviceRequest.(*GetRangeRequest).Range +} diff --git a/services/public/object/ranges_test.go b/services/public/object/ranges_test.go new file mode 100644 index 000000000..57d6d2e82 --- /dev/null +++ b/services/public/object/ranges_test.go @@ -0,0 +1,778 @@ +package object + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/nspcc-dev/neofs-api-go/hash" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/objio" + "github.com/nspcc-dev/neofs-node/lib/transport" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +type ( + // Entity for mocking interfaces. + // Implementation of any interface intercepts arguments via f (if not nil). + // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. + testRangeEntity struct { + // Set of interfaces which entity must implement, but some methods from those does not call. + RangeChopper + object.Service_GetRangeServer + + // Argument interceptor. Used for ascertain of correct parameter passage between components. + f func(...interface{}) + // Mocked result of any interface. + res interface{} + // Mocked error of any interface. + err error + } +) + +var ( + _ objio.RelativeReceiver = (*testRangeEntity)(nil) + _ RangeChopper = (*testRangeEntity)(nil) + _ operationExecutor = (*testRangeEntity)(nil) + _ requestHandler = (*testRangeEntity)(nil) + _ rangeRevealer = (*testRangeEntity)(nil) + _ objectRangeReceiver = (*testRangeEntity)(nil) + _ object.Service_GetRangeServer = (*testRangeEntity)(nil) + _ responsePreparer = (*testRangeEntity)(nil) +) + +func (s *testRangeEntity) prepareResponse(_ context.Context, req serviceRequest, resp serviceResponse) error { + if s.f != nil { + s.f(req, resp) + } + return s.err +} + +func (s *testRangeEntity) Context() context.Context { return context.TODO() } + +func (s *testRangeEntity) Send(r *GetRangeResponse) error { + if s.f != nil { + s.f(r) + } + return s.err +} + +func (s *testRangeEntity) getRange(_ context.Context, t rangeTool) (interface{}, error) { + if s.f != nil { + s.f(t) + } + return s.res, s.err +} + +func (s *testRangeEntity) reveal(_ context.Context, r *RangeDescriptor) ([]RangeDescriptor, error) { + if s.f != nil { + s.f(r) + } + if s.err != nil { + return nil, s.err + } + return s.res.([]RangeDescriptor), nil +} + +func (s *testRangeEntity) Base(ctx context.Context, addr Address) (RangeDescriptor, error) { + if s.f != nil { + s.f(addr) + } + if s.err != nil { + return RangeDescriptor{}, s.err + } + return s.res.(RangeDescriptor), nil +} + +func (s *testRangeEntity) Neighbor(ctx context.Context, addr Address, left bool) (RangeDescriptor, error) { + if s.f != nil { + s.f(addr, left) + } + if s.err != nil { + return RangeDescriptor{}, s.err + } + return s.res.(RangeDescriptor), nil +} + +func (s *testRangeEntity) Chop(ctx context.Context, length, offset int64, fromStart bool) ([]RangeDescriptor, error) { + if s.f != nil { + s.f(length, offset, fromStart) + } + if s.err != nil { + return nil, s.err + } + return s.res.([]RangeDescriptor), nil +} + +func (s *testRangeEntity) Closed() bool { return s.res.(bool) } + +func (s *testRangeEntity) PutChopper(addr Address, chopper RangeChopper) error { + if s.f != nil { + s.f(addr, chopper) + } + return s.err +} + +func (s *testRangeEntity) GetChopper(addr Address, rc objio.RCType) (RangeChopper, error) { + if s.f != nil { + s.f(addr, rc) + } + if s.err != nil { + return nil, s.err + } + return s.res.(RangeChopper), nil +} + +func (s *testRangeEntity) executeOperation(_ context.Context, i transport.MetaInfo, h responseItemHandler) error { + if s.f != nil { + s.f(i, h) + } + return s.err +} + +func (s *testRangeEntity) handleRequest(_ context.Context, p handleRequestParams) (interface{}, error) { + if s.f != nil { + s.f(p) + } + return s.res, s.err +} + +func Test_objectService_GetRange(t *testing.T) { + req := &GetRangeRequest{Address: testObjectAddress(t)} + + t.Run("request handler error", func(t *testing.T) { + rhErr := internal.Error("test error for request handler") + + s := &objectService{ + statusCalculator: newStatusCalculator(), + } + + s.requestHandler = &testRangeEntity{ + f: func(items ...interface{}) { + t.Run("correct request handler params", func(t *testing.T) { + p := items[0].(handleRequestParams) + require.Equal(t, s, p.executor) + require.Equal(t, req, p.request) + }) + }, + err: rhErr, // force requestHandler to return rhErr + } + + // ascertain that error returns as expected + require.EqualError(t, s.GetRange(req, new(testRangeEntity)), rhErr.Error()) + }) + + t.Run("correct result", func(t *testing.T) { + fragment := testData(t, 10) + + resp := &GetRangeResponse{Fragment: fragment} + + s := objectService{ + requestHandler: &testRangeEntity{ + res: bytes.NewReader(fragment), // force requestHandler to return fragment + }, + rangeChunkPreparer: &testRangeEntity{ + f: func(items ...interface{}) { + require.Equal(t, req, items[0]) + require.Equal(t, makeRangeResponse(fragment), items[1]) + }, + res: resp, + }, + + statusCalculator: newStatusCalculator(), + } + + srv := &testRangeEntity{ + f: func(items ...interface{}) { + require.Equal(t, resp, items[0]) + }, + } + + require.NoError(t, s.GetRange(req, srv)) + }) +} + +func Test_objectService_GetRangeHash(t *testing.T) { + ctx := context.TODO() + + req := &GetRangeHashRequest{Address: testObjectAddress(t)} + + t.Run("request handler error", func(t *testing.T) { + rhErr := internal.Error("test error for request handler") + + s := &objectService{ + statusCalculator: newStatusCalculator(), + } + + s.requestHandler = &testRangeEntity{ + f: func(items ...interface{}) { + t.Run("correct request handler params", func(t *testing.T) { + p := items[0].(handleRequestParams) + require.Equal(t, s, p.executor) + require.Equal(t, req, p.request) + }) + }, + err: rhErr, // force requestHandler to return rhErr + } + + // ascertain that error returns as expected + res, err := s.GetRangeHash(ctx, req) + require.EqualError(t, err, rhErr.Error()) + require.Nil(t, res) + }) + + t.Run("correct result", func(t *testing.T) { + hCount := 5 + hashes := make([]Hash, 0, hCount) + + for i := 0; i < hCount; i++ { + hashes = append(hashes, hash.Sum(testData(t, 10))) + } + + s := objectService{ + requestHandler: &testRangeEntity{ + res: hashes, // force requestHandler to return fragments + }, + respPreparer: &testRangeEntity{ + f: func(items ...interface{}) { + require.Equal(t, req, items[0]) + require.Equal(t, makeRangeHashResponse(hashes), items[1]) + }, + res: &GetRangeHashResponse{Hashes: hashes}, + }, + + statusCalculator: newStatusCalculator(), + } + + res, err := s.GetRangeHash(ctx, req) + require.NoError(t, err) + require.Equal(t, hashes, res.Hashes) + }) +} + +func Test_coreRangeReceiver(t *testing.T) { + ctx := context.TODO() + log := zap.L() + + t.Run("range reveal failure", func(t *testing.T) { + revErr := internal.Error("test error for range revealer") + + rt := newRawRangeHashInfo() + rt.setTTL(service.NonForwardingTTL) + rt.setAddress(testObjectAddress(t)) + rt.setRanges([]Range{ + { + Offset: 1, + Length: 2, + }, + }) + + revealer := &testRangeEntity{ + f: func(items ...interface{}) { + require.Equal(t, &RangeDescriptor{ + Size: int64(rt.rngList[0].Length), + Offset: int64(rt.rngList[0].Offset), + Addr: rt.addr, + }, items[0]) + }, + err: revErr, + } + + s := &coreRangeReceiver{ + rngRevealer: revealer, + log: log, + } + + res, err := s.getRange(ctx, rt) + require.EqualError(t, err, errPayloadRangeNotFound.Error()) + require.Nil(t, res) + + revealer.err = nil + revealer.res = make([]RangeDescriptor, 0) + + res, err = s.getRange(ctx, rt) + require.EqualError(t, err, errPayloadRangeNotFound.Error()) + require.Nil(t, res) + }) + + t.Run("get sub range failure", func(t *testing.T) { + gErr := internal.Error("test error for get range") + + rt := newRawRangeHashInfo() + rt.setTTL(service.NonForwardingTTL) + rt.setAddress(testObjectAddress(t)) + rt.setRanges([]Range{ + { + Offset: 1, + Length: 2, + }, + }) + + revealer := &testRangeEntity{ + res: []RangeDescriptor{{Size: 3, Offset: 4, Addr: testObjectAddress(t)}}, + } + + called := false + revealer.f = func(items ...interface{}) { + if called { + revealer.err = gErr + return + } + called = true + } + + s := &coreRangeReceiver{ + rngRevealer: revealer, + log: log, + } + + res, err := s.getRange(ctx, rt) + require.EqualError(t, err, errPayloadRangeNotFound.Error()) + require.Nil(t, res) + }) + + t.Run("non-forwarding behavior", func(t *testing.T) { + rt := newRawRangeHashInfo() + rt.setTTL(service.NonForwardingTTL - 1) + rt.setAddress(testObjectAddress(t)) + rt.setRanges([]Range{ + { + Offset: 1, + Length: 2, + }, + }) + + rd := RangeDescriptor{ + Size: int64(rt.rngList[0].Length), + Offset: int64(rt.rngList[0].Offset), + Addr: rt.addr, + } + + d := hash.Sum(testData(t, 10)) + + s := &coreRangeReceiver{ + straightRngRecv: &testRangeEntity{ + f: func(items ...interface{}) { + require.Equal(t, rt.budOff(&rd), items[0]) + }, + res: d, + }, + } + + res, err := s.getRange(ctx, rt) + require.NoError(t, err) + require.Equal(t, d, res) + }) + + t.Run("correct result concat", func(t *testing.T) { + rt := newRawRangeHashInfo() + rt.setTTL(service.NonForwardingTTL) + rt.setRanges([]Range{ + {}, + }) + + revealer := new(testRangeEntity) + revCalled := false + revealer.f = func(items ...interface{}) { + if revCalled { + revealer.res = []RangeDescriptor{items[0].(RangeDescriptor)} + } else { + revealer.res = make([]RangeDescriptor, 2) + } + revCalled = true + } + + h1, h2 := hash.Sum(testData(t, 10)), hash.Sum(testData(t, 10)) + + recvCalled := false + receiver := new(testRangeEntity) + receiver.f = func(...interface{}) { + if recvCalled { + receiver.res = h2 + } else { + receiver.res = h1 + } + recvCalled = true + } + + s := &coreRangeReceiver{ + rngRevealer: revealer, + straightRngRecv: receiver, + } + + exp, err := hash.Concat([]Hash{h1, h2}) + require.NoError(t, err) + + res, err := s.getRange(ctx, rt) + require.NoError(t, err) + require.Equal(t, exp, res) + }) +} + +func Test_straightRangeReceiver_getRange(t *testing.T) { + ctx := context.TODO() + + req := new(transportRequest) + + t.Run("executor error", func(t *testing.T) { + exErr := internal.Error("test error for executor") + + s := &straightRangeReceiver{ + executor: &testRangeEntity{ + f: func(items ...interface{}) { + t.Run("correct executor params", func(t *testing.T) { + require.Equal(t, req, items[0]) + require.Equal(t, newSingleItemHandler(), items[1]) + }) + }, + err: exErr, // force operationExecutor to return exErr + }, + } + + res, err := s.getRange(ctx, req) + require.EqualError(t, err, exErr.Error()) + require.Nil(t, res) + }) + + t.Run("correct result", func(t *testing.T) { + v := testData(t, 10) + + s := &straightRangeReceiver{ + executor: &testRangeEntity{ + f: func(items ...interface{}) { + items[1].(rangeItemAccumulator).handleItem(v) + }, + err: nil, // force operationExecutor to return nil error + }, + } + + res, err := s.getRange(ctx, req) + require.NoError(t, err) + require.Equal(t, v, res) + }) +} + +func Test_coreRngRevealer_reveal(t *testing.T) { + ctx := context.TODO() + + rd := RangeDescriptor{ + Size: 5, + Offset: 6, + Addr: testObjectAddress(t), + } + + t.Run("charybdis chopper presence", func(t *testing.T) { + cErr := internal.Error("test error for charybdis") + + s := &coreRngRevealer{ + chopTable: &testRangeEntity{ + f: func(items ...interface{}) { + t.Run("correct chopper table params", func(t *testing.T) { + require.Equal(t, rd.Addr, items[0]) + require.Equal(t, objio.RCCharybdis, items[1]) + }) + }, + res: &testRangeEntity{ + f: func(items ...interface{}) { + t.Run("correct chopper params", func(t *testing.T) { + require.Equal(t, rd.Size, items[0]) + require.Equal(t, rd.Offset, items[1]) + require.True(t, items[2].(bool)) + }) + }, + res: true, // close chopper + err: cErr, // force RangeChopper to return cErr + }, + }, + } + + res, err := s.reveal(ctx, &rd) + require.EqualError(t, err, cErr.Error()) + require.Empty(t, res) + }) + + t.Run("scylla chopper presence", func(t *testing.T) { + scErr := internal.Error("test error for scylla") + + scylla := &testRangeEntity{ + err: scErr, // force RangeChopper to return scErr + } + + ct := new(testRangeEntity) + + ct.f = func(items ...interface{}) { + if items[1].(objio.RCType) == objio.RCCharybdis { + ct.err = internal.Error("") + } else { + ct.res = scylla + ct.err = nil + } + } + + s := &coreRngRevealer{ + chopTable: ct, + } + + res, err := s.reveal(ctx, &rd) + require.EqualError(t, err, scErr.Error()) + require.Empty(t, res) + }) + + t.Run("new scylla", func(t *testing.T) { + t.Run("error", func(t *testing.T) { + s := &coreRngRevealer{ + relativeRecv: nil, // pass empty relation receiver to fail constructor + chopTable: &testRangeEntity{ + err: internal.Error(""), // force ChopperTable to return non-nil error + }, + } + + res, err := s.reveal(ctx, &rd) + require.Error(t, err) + require.Nil(t, res) + }) + + t.Run("success", func(t *testing.T) { + rrErr := internal.Error("test error for relative receiver") + + relRecv := &testRangeEntity{ + err: rrErr, // force relative receiver to return rrErr + } + + scylla, err := objio.NewScylla(&objio.ChopperParams{ + RelativeReceiver: relRecv, + Addr: rd.Addr, + }) + require.NoError(t, err) + + callNum := 0 + + s := &coreRngRevealer{ + relativeRecv: relRecv, + chopTable: &testRangeEntity{ + f: func(items ...interface{}) { + t.Run("correct put chopper params", func(t *testing.T) { + if callNum >= 2 { + require.Equal(t, rd.Addr, items[0]) + require.Equal(t, scylla, items[1]) + } + }) + }, + err: internal.Error(""), // force ChopperTable to return non-nil error + }, + } + + expRes, expErr := scylla.Chop(ctx, rd.Size, rd.Offset, true) + require.Error(t, expErr) + + res, err := s.reveal(ctx, &rd) + require.EqualError(t, err, expErr.Error()) + require.Equal(t, expRes, res) + }) + }) +} + +func Test_transportRequest_rangeTool(t *testing.T) { + t.Run("get ranges", func(t *testing.T) { + rngs := []Range{ + {Offset: 1, Length: 2}, + {Offset: 3, Length: 4}, + } + + reqs := []transportRequest{ + {serviceRequest: &GetRangeHashRequest{Ranges: rngs}}, + } + + for i := range reqs { + require.Equal(t, reqs[i].GetRanges(), rngs) + } + }) + + t.Run("bud off", func(t *testing.T) { + var ( + timeout = 6 * time.Second + ttl = uint32(16) + rd = RangeDescriptor{ + Size: 1, + Offset: 2, + Addr: testObjectAddress(t), + } + ) + + t.Run("get range hash request", func(t *testing.T) { + salt := testData(t, 10) + + r := &GetRangeHashRequest{Salt: salt} + r.SetToken(new(service.Token)) + + req := &transportRequest{ + serviceRequest: r, + timeout: timeout, + } + req.SetTTL(ttl) + + tool := req.budOff(&rd).(transport.RangeHashInfo) + + require.Equal(t, timeout, tool.GetTimeout()) + require.Equal(t, ttl, tool.GetTTL()) + require.Equal(t, rd.Addr, tool.GetAddress()) + require.Equal(t, []Range{{Offset: uint64(rd.Offset), Length: uint64(rd.Size)}}, tool.GetRanges()) + require.Equal(t, salt, tool.GetSalt()) + require.Equal(t, r.GetSessionToken(), tool.GetSessionToken()) + }) + }) + + t.Run("handler", func(t *testing.T) { + t.Run("get range request", func(t *testing.T) { + req := &transportRequest{serviceRequest: new(GetRangeHashRequest)} + handler := req.handler() + require.Equal(t, new(rangeHashAccum), handler) + }) + }) +} + +func Test_rawRangeHashInfo(t *testing.T) { + t.Run("get ranges", func(t *testing.T) { + rngs := []Range{ + {Offset: 1, Length: 2}, + {Offset: 3, Length: 4}, + } + + r := newRawRangeHashInfo() + r.setRanges(rngs) + + require.Equal(t, rngs, r.GetRanges()) + }) + + t.Run("handler", func(t *testing.T) { + require.Equal(t, + &rangeHashAccum{concat: true}, + newRawRangeHashInfo().handler(), + ) + }) + + t.Run("bud off", func(t *testing.T) { + var ( + ttl = uint32(12) + timeout = 7 * time.Hour + ) + + r := newRawRangeHashInfo() + r.setTTL(ttl) + r.setTimeout(timeout) + r.setSalt(testData(t, 20)) + r.setSessionToken(new(service.Token)) + + rd := RangeDescriptor{ + Size: 120, + Offset: 71, + Addr: testObjectAddress(t), + } + + tool := r.budOff(&rd) + + require.Equal(t, ttl, tool.GetTTL()) + require.Equal(t, timeout, tool.GetTimeout()) + require.Equal(t, rd.Addr, tool.GetAddress()) + require.Equal(t, []Range{{Offset: uint64(rd.Offset), Length: uint64(rd.Size)}}, tool.GetRanges()) + require.Equal(t, r.GetSessionToken(), tool.GetSessionToken()) + require.Equal(t, + loopData(r.salt, int64(len(r.salt)), rd.Offset), + tool.(transport.RangeHashInfo).GetSalt(), + ) + }) +} + +func Test_rawRangeInfo(t *testing.T) { + t.Run("get ranges", func(t *testing.T) { + rng := Range{Offset: 1, Length: 2} + + r := newRawRangeInfo() + r.setRange(rng) + + require.Equal(t, rng, r.GetRange()) + }) +} + +func Test_loopSalt(t *testing.T) { + t.Run("empty data", func(t *testing.T) { + require.Empty(t, loopData(nil, 20, 10)) + require.Empty(t, loopData(make([]byte, 0), 20, 10)) + }) + + t.Run("data part", func(t *testing.T) { + var ( + off, size int64 = 10, 20 + d = testData(t, 40) + ) + require.Equal(t, d[off:off+size], loopData(d, size, off)) + }) + + t.Run("with recycle", func(t *testing.T) { + var ( + d = testData(t, 40) + off = int64(len(d) / 2) + size = 2 * off + ) + + require.Equal(t, + append(d[off:], d[:size-off]...), + loopData(d, size, off), + ) + }) +} + +func Test_rangeHashAccum(t *testing.T) { + t.Run("handle item", func(t *testing.T) { + s := &rangeHashAccum{ + h: []Hash{hash.Sum(testData(t, 10))}, + } + + h := hash.Sum(testData(t, 10)) + + exp := append(s.h, h) + + s.handleItem(h) + + require.Equal(t, exp, s.h) + + exp = append(s.h, s.h...) + + s.handleItem(s.h) + + require.Equal(t, exp, s.h) + }) + + t.Run("collect", func(t *testing.T) { + hashes := []Hash{hash.Sum(testData(t, 10)), hash.Sum(testData(t, 10))} + + t.Run("w/ concat", func(t *testing.T) { + s := &rangeHashAccum{ + concat: true, + h: hashes, + } + + expRes, expErr := hash.Concat(hashes) + + res, err := s.collect() + + require.Equal(t, expRes, res) + require.Equal(t, expErr, err) + }) + + t.Run("w/o concat", func(t *testing.T) { + s := &rangeHashAccum{ + concat: false, + h: hashes, + } + + res, err := s.collect() + require.NoError(t, err) + require.Equal(t, hashes, res) + }) + }) +} diff --git a/services/public/object/response.go b/services/public/object/response.go new file mode 100644 index 000000000..37f086764 --- /dev/null +++ b/services/public/object/response.go @@ -0,0 +1,144 @@ +package object + +import ( + "context" + + "github.com/nspcc-dev/neofs-api-go/acl" + "github.com/nspcc-dev/neofs-api-go/object" + libacl "github.com/nspcc-dev/neofs-node/lib/acl" +) + +type ( + serviceResponse interface { + SetEpoch(uint64) + } + + responsePreparer interface { + prepareResponse(context.Context, serviceRequest, serviceResponse) error + } + + epochResponsePreparer struct { + epochRecv EpochReceiver + } +) + +type complexResponsePreparer struct { + items []responsePreparer +} + +type aclResponsePreparer struct { + eaclSrc libacl.ExtendedACLSource + + aclInfoReceiver aclInfoReceiver + + reqActCalc requestActionCalculator +} + +type headersFromObject struct { + obj *Object +} + +var ( + _ responsePreparer = (*epochResponsePreparer)(nil) +) + +func (s headersFromObject) getHeaders() (*Object, bool) { + return s.obj, true +} + +func (s complexResponsePreparer) prepareResponse(ctx context.Context, req serviceRequest, resp serviceResponse) error { + for i := range s.items { + if err := s.items[i].prepareResponse(ctx, req, resp); err != nil { + return err + } + } + + return nil +} + +func (s *epochResponsePreparer) prepareResponse(_ context.Context, req serviceRequest, resp serviceResponse) error { + resp.SetEpoch(s.epochRecv.Epoch()) + + return nil +} + +func (s *aclResponsePreparer) prepareResponse(ctx context.Context, req serviceRequest, resp serviceResponse) error { + aclInfo, err := s.aclInfoReceiver.getACLInfo(ctx, req) + if err != nil { + return errAccessDenied + } else if !aclInfo.checkBearer && !aclInfo.checkExtended { + return nil + } + + var obj *Object + + switch r := resp.(type) { + case *object.GetResponse: + obj = r.GetObject() + case *object.HeadResponse: + obj = r.GetObject() + case interface { + GetObject() *Object + }: + obj = r.GetObject() + } + + if obj == nil { + return nil + } + + // FIXME: do not check request headers. + // At this stage request is already validated, but action calculator will check it again. + p := requestActionParams{ + eaclSrc: s.eaclSrc, + request: req, + objHdrSrc: headersFromObject{ + obj: obj, + }, + target: aclInfo.target, + } + + if aclInfo.checkBearer { + p.eaclSrc = eaclFromBearer{ + bearer: req.GetBearerToken(), + } + } + + if action := s.reqActCalc.calculateRequestAction(ctx, p); action != acl.ActionAllow { + return errAccessDenied + } + + return nil +} + +func makeDeleteResponse() *object.DeleteResponse { + return new(object.DeleteResponse) +} + +func makeRangeHashResponse(v []Hash) *GetRangeHashResponse { + return &GetRangeHashResponse{Hashes: v} +} + +func makeRangeResponse(v []byte) *GetRangeResponse { + return &GetRangeResponse{Fragment: v} +} + +func makeSearchResponse(v []Address) *object.SearchResponse { + return &object.SearchResponse{Addresses: v} +} + +func makeHeadResponse(v *Object) *object.HeadResponse { + return &object.HeadResponse{Object: v} +} + +func makePutResponse(v Address) *object.PutResponse { + return &object.PutResponse{Address: v} +} + +func makeGetHeaderResponse(v *Object) *object.GetResponse { + return &object.GetResponse{R: &object.GetResponse_Object{Object: v}} +} + +func makeGetChunkResponse(v []byte) *object.GetResponse { + return &object.GetResponse{R: &object.GetResponse_Chunk{Chunk: v}} +} diff --git a/services/public/object/response_test.go b/services/public/object/response_test.go new file mode 100644 index 000000000..5057029ab --- /dev/null +++ b/services/public/object/response_test.go @@ -0,0 +1,116 @@ +package object + +import ( + "context" + "testing" + + "github.com/nspcc-dev/neofs-api-go/hash" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/stretchr/testify/require" +) + +func TestEpochResponsePreparer(t *testing.T) { + epoch := uint64(33) + + s := &epochResponsePreparer{ + epochRecv: &testPutEntity{res: epoch}, + } + + ctx := context.TODO() + + t.Run("get", func(t *testing.T) { + t.Run("head", func(t *testing.T) { + obj := &Object{ + SystemHeader: SystemHeader{ + ID: testObjectAddress(t).ObjectID, + CID: testObjectAddress(t).CID, + }, + } + + resp := makeGetHeaderResponse(obj) + + require.NoError(t, s.prepareResponse(ctx, new(object.GetRequest), resp)) + + require.Equal(t, obj, resp.GetObject()) + require.Equal(t, epoch, resp.GetEpoch()) + }) + + t.Run("chunk", func(t *testing.T) { + chunk := testData(t, 10) + + resp := makeGetChunkResponse(chunk) + + require.NoError(t, s.prepareResponse(ctx, new(object.GetRequest), resp)) + + require.Equal(t, chunk, resp.GetChunk()) + require.Equal(t, epoch, resp.GetEpoch()) + }) + }) + + t.Run("put", func(t *testing.T) { + addr := testObjectAddress(t) + + resp := makePutResponse(addr) + require.NoError(t, s.prepareResponse(ctx, new(object.PutRequest), resp)) + + require.Equal(t, addr, resp.GetAddress()) + require.Equal(t, epoch, resp.GetEpoch()) + }) + + t.Run("head", func(t *testing.T) { + obj := &Object{ + SystemHeader: SystemHeader{ + PayloadLength: 7, + ID: testObjectAddress(t).ObjectID, + CID: testObjectAddress(t).CID, + }, + } + + resp := makeHeadResponse(obj) + require.NoError(t, s.prepareResponse(ctx, new(object.HeadRequest), resp)) + + require.Equal(t, obj, resp.GetObject()) + require.Equal(t, epoch, resp.GetEpoch()) + }) + + t.Run("search", func(t *testing.T) { + addrList := testAddrList(t, 5) + + resp := makeSearchResponse(addrList) + require.NoError(t, s.prepareResponse(ctx, new(object.SearchRequest), resp)) + + require.Equal(t, addrList, resp.GetAddresses()) + require.Equal(t, epoch, resp.GetEpoch()) + }) + + t.Run("range", func(t *testing.T) { + data := testData(t, 10) + + resp := makeRangeResponse(data) + require.NoError(t, s.prepareResponse(ctx, new(GetRangeRequest), resp)) + + require.Equal(t, data, resp.GetFragment()) + require.Equal(t, epoch, resp.GetEpoch()) + }) + + t.Run("range hash", func(t *testing.T) { + hashes := []Hash{ + hash.Sum(testData(t, 10)), + hash.Sum(testData(t, 10)), + } + + resp := makeRangeHashResponse(hashes) + require.NoError(t, s.prepareResponse(ctx, new(object.GetRangeHashRequest), resp)) + + require.Equal(t, hashes, resp.Hashes) + require.Equal(t, epoch, resp.GetEpoch()) + }) + + t.Run("delete", func(t *testing.T) { + resp := makeDeleteResponse() + require.NoError(t, s.prepareResponse(ctx, new(object.DeleteRequest), resp)) + + require.IsType(t, new(object.DeleteResponse), resp) + require.Equal(t, epoch, resp.GetEpoch()) + }) +} diff --git a/services/public/object/search.go b/services/public/object/search.go new file mode 100644 index 000000000..39771ddd6 --- /dev/null +++ b/services/public/object/search.go @@ -0,0 +1,169 @@ +package object + +import ( + "context" + "sync" + + "github.com/nspcc-dev/neofs-api-go/object" + v1 "github.com/nspcc-dev/neofs-api-go/query" + "github.com/nspcc-dev/neofs-node/lib/transport" + "go.uber.org/zap" +) + +// QueryFilter is a type alias of +// Filter from query package of neofs-api-go. +type QueryFilter = v1.Filter + +const ( + // KeyChild is a filter key to child link. + KeyChild = "CHILD" + + // KeyPrev is a filter key to previous link. + KeyPrev = "PREV" + + // KeyNext is a filter key to next link. + KeyNext = "NEXT" + + // KeyID is a filter key to object ID. + KeyID = "ID" + + // KeyCID is a filter key to container ID. + KeyCID = "CID" + + // KeyOwnerID is a filter key to owner ID. + KeyOwnerID = "OWNERID" + + // KeyRootObject is a filter key to objects w/o parent links. + KeyRootObject = "ROOT_OBJECT" +) + +type ( + objectSearcher interface { + searchObjects(context.Context, transport.SearchInfo) ([]Address, error) + } + + coreObjectSearcher struct { + executor operationExecutor + } + + // objectAddressSet is and interface of object address set. + objectAddressSet interface { + responseItemHandler + + // list returns all elements of set. + list() []Address + } + + // coreObjAddrSet is and implementation of objectAddressSet interface used in Object service production. + coreObjAddrSet struct { + // Read-write mutex for race protection. + *sync.RWMutex + + // Storing element of set. + items []Address + } +) + +var addrPerMsg = int64(maxGetPayloadSize / new(Address).Size()) + +var ( + _ transport.SearchInfo = (*transportRequest)(nil) + _ objectSearcher = (*coreObjectSearcher)(nil) + _ objectAddressSet = (*coreObjAddrSet)(nil) +) + +func (s *transportRequest) GetCID() CID { return s.serviceRequest.(*object.SearchRequest).CID() } + +func (s *transportRequest) GetQuery() []byte { + return s.serviceRequest.(*object.SearchRequest).GetQuery() +} + +func (s *objectService) Search(req *object.SearchRequest, srv object.Service_SearchServer) (err error) { + defer func() { + if r := recover(); r != nil { + s.log.Error(panicLogMsg, + zap.Stringer("request", object.RequestSearch), + zap.Any("reason", r), + ) + + err = errServerPanic + } + + err = s.statusCalculator.make(requestError{ + t: object.RequestSearch, + e: err, + }) + }() + + var r interface{} + + if r, err = s.requestHandler.handleRequest(srv.Context(), handleRequestParams{ + request: req, + executor: s, + }); err != nil { + return err + } + + addrList := r.([]Address) + + for { + cut := min(int64(len(addrList)), addrPerMsg) + + resp := makeSearchResponse(addrList[:cut]) + if err = s.respPreparer.prepareResponse(srv.Context(), req, resp); err != nil { + return + } + + if err = srv.Send(resp); err != nil { + return + } + + addrList = addrList[cut:] + if len(addrList) == 0 { + break + } + } + + return err +} + +func (s *coreObjectSearcher) searchObjects(ctx context.Context, sInfo transport.SearchInfo) ([]Address, error) { + addrSet := newUniqueAddressAccumulator() + if err := s.executor.executeOperation(ctx, sInfo, addrSet); err != nil { + return nil, err + } + + return addrSet.list(), nil +} + +func newUniqueAddressAccumulator() objectAddressSet { + return &coreObjAddrSet{ + RWMutex: new(sync.RWMutex), + items: make([]Address, 0, 10), + } +} + +func (s *coreObjAddrSet) handleItem(v interface{}) { + addrList := v.([]Address) + + s.Lock() + +loop: + for i := range addrList { + for j := range s.items { + if s.items[j].Equal(&addrList[i]) { + continue loop + } + } + s.items = append(s.items, addrList[i]) + } + + s.Unlock() +} + +func (s *coreObjAddrSet) list() []Address { + s.RLock() + defer s.RUnlock() + + return s.items +} diff --git a/services/public/object/search_test.go b/services/public/object/search_test.go new file mode 100644 index 000000000..dc65edef5 --- /dev/null +++ b/services/public/object/search_test.go @@ -0,0 +1,265 @@ +package object + +import ( + "context" + "testing" + + "github.com/nspcc-dev/neofs-api-go/object" + v1 "github.com/nspcc-dev/neofs-api-go/query" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/transport" + "github.com/stretchr/testify/require" +) + +type ( + // Entity for mocking interfaces. + // Implementation of any interface intercepts arguments via f (if not nil). + // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. + testSearchEntity struct { + // Set of interfaces which entity must implement, but some methods from those does not call. + object.Service_SearchServer + + // Argument interceptor. Used for ascertain of correct parameter passage between components. + f func(...interface{}) + // Mocked result of any interface. + res interface{} + // Mocked error of any interface. + err error + } +) + +var ( + _ requestHandler = (*testSearchEntity)(nil) + _ operationExecutor = (*testSearchEntity)(nil) + _ responsePreparer = (*testSearchEntity)(nil) + + _ object.Service_SearchServer = (*testSearchEntity)(nil) +) + +func (s *testSearchEntity) prepareResponse(_ context.Context, req serviceRequest, resp serviceResponse) error { + if s.f != nil { + s.f(req, resp) + } + return s.err +} + +func (s *testSearchEntity) Send(r *object.SearchResponse) error { + if s.f != nil { + s.f(r) + } + return s.err +} + +func (s *testSearchEntity) Context() context.Context { return context.TODO() } + +func (s *testSearchEntity) executeOperation(_ context.Context, p transport.MetaInfo, h responseItemHandler) error { + if s.f != nil { + s.f(p, h) + } + return s.err +} + +func (s *testSearchEntity) handleRequest(_ context.Context, p handleRequestParams) (interface{}, error) { + if s.f != nil { + s.f(p) + } + return s.res, s.err +} + +func TestSearchVerify(t *testing.T) { + t.Run("KeyNoChildren", func(t *testing.T) { + var ( + q = v1.Query{ + Filters: []QueryFilter{ + { + Type: v1.Filter_Exact, + Name: transport.KeyNoChildren, + }, + }, + } + obj = new(Object) + ) + require.True(t, imposeQuery(q, obj)) + + obj.Headers = append(obj.Headers, Header{Value: &object.Header_Link{ + Link: &object.Link{ + Type: object.Link_Child, + }, + }}) + require.False(t, imposeQuery(q, obj)) + }) +} + +func Test_coreObjAddrSet(t *testing.T) { + // create address accumulator + acc := newUniqueAddressAccumulator() + require.NotNil(t, acc) + + // check type correctness + v, ok := acc.(*coreObjAddrSet) + require.True(t, ok) + + // check fields initialization + require.NotNil(t, v.items) + require.NotNil(t, v.RWMutex) + + t.Run("add/list", func(t *testing.T) { + // ascertain that initial list is empty + require.Empty(t, acc.list()) + + // add first set of addresses + addrList1 := testAddrList(t, 5) + acc.handleItem(addrList1) + + // ascertain that list is equal to added list + require.Equal(t, addrList1, acc.list()) + + // add more addresses + addrList2 := testAddrList(t, 5) + acc.handleItem(addrList2) + + twoLists := append(addrList1, addrList2...) + + // ascertain that list is a concatenation of added lists + require.Equal(t, twoLists, acc.list()) + + // add second list again + acc.handleItem(addrList2) + + // ascertain that list have not changed after adding existing elements + require.Equal(t, twoLists, acc.list()) + }) +} + +func TestObjectService_Search(t *testing.T) { + req := &object.SearchRequest{ + ContainerID: testObjectAddress(t).CID, + Query: testData(t, 10), + } + + addrList := testAddrList(t, int(addrPerMsg)+5) + + t.Run("request handler failure", func(t *testing.T) { + rhErr := internal.Error("test error for request handler") + s := &objectService{ + statusCalculator: newStatusCalculator(), + } + + s.requestHandler = &testSearchEntity{ + f: func(items ...interface{}) { + p := items[0].(handleRequestParams) + require.Equal(t, req, p.request) + require.Equal(t, s, p.executor) + }, + err: rhErr, + } + + require.EqualError(t, s.Search(req, new(testSearchEntity)), rhErr.Error()) + }) + + t.Run("server error", func(t *testing.T) { + srvErr := internal.Error("test error for search server") + + resp := &object.SearchResponse{Addresses: addrList[:addrPerMsg]} + + s := &objectService{ + requestHandler: &testSearchEntity{ + res: addrList, + }, + respPreparer: &testSearchEntity{ + f: func(items ...interface{}) { + require.Equal(t, req, items[0]) + require.Equal(t, makeSearchResponse(addrList[:addrPerMsg]), items[1]) + }, + res: resp, + }, + + statusCalculator: newStatusCalculator(), + } + + srv := &testSearchEntity{ + f: func(items ...interface{}) { + require.Equal(t, resp, items[0]) + }, + err: srvErr, // force server to return srvErr + } + + require.EqualError(t, s.Search(req, srv), srvErr.Error()) + }) + + t.Run("correct result", func(t *testing.T) { + handler := &testSearchEntity{res: make([]Address, 0)} + + off := 0 + + var resp *object.SearchResponse + + s := &objectService{ + requestHandler: handler, + respPreparer: &testSearchEntity{ + f: func(items ...interface{}) { + require.Equal(t, req, items[0]) + resp = items[1].(*object.SearchResponse) + sz := len(resp.GetAddresses()) + require.Equal(t, makeSearchResponse(addrList[off:off+sz]), items[1]) + off += sz + }, + }, + + statusCalculator: newStatusCalculator(), + } + + srv := &testSearchEntity{ + f: func(items ...interface{}) { + require.Equal(t, resp, items[0]) + }, + } + + require.NoError(t, s.Search(req, srv)) + + handler.res = addrList + + require.NoError(t, s.Search(req, srv)) + }) +} + +func Test_coreObjectSearcher(t *testing.T) { + ctx := context.TODO() + + req := newRawSearchInfo() + req.setQuery(testData(t, 10)) + + t.Run("operation executor failure", func(t *testing.T) { + execErr := internal.Error("test error for operation executor") + + s := &coreObjectSearcher{ + executor: &testSearchEntity{ + f: func(items ...interface{}) { + require.Equal(t, req, items[0]) + require.Equal(t, newUniqueAddressAccumulator(), items[1]) + }, + err: execErr, + }, + } + + res, err := s.searchObjects(ctx, req) + require.EqualError(t, err, execErr.Error()) + require.Empty(t, res) + }) + + t.Run("correct result", func(t *testing.T) { + addrList := testAddrList(t, 5) + + s := &coreObjectSearcher{ + executor: &testSearchEntity{ + f: func(items ...interface{}) { + items[1].(responseItemHandler).handleItem(addrList) + }, + }, + } + + res, err := s.searchObjects(ctx, req) + require.NoError(t, err) + require.Equal(t, addrList, res) + }) +} diff --git a/services/public/object/service.go b/services/public/object/service.go new file mode 100644 index 000000000..87e120072 --- /dev/null +++ b/services/public/object/service.go @@ -0,0 +1,680 @@ +package object + +import ( + "context" + "crypto/ecdsa" + "math" + "time" + + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-api-go/hash" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-api-go/session" + "github.com/nspcc-dev/neofs-api-go/storagegroup" + "github.com/nspcc-dev/neofs-node/internal" + libacl "github.com/nspcc-dev/neofs-node/lib/acl" + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/nspcc-dev/neofs-node/lib/ir" + "github.com/nspcc-dev/neofs-node/lib/localstore" + "github.com/nspcc-dev/neofs-node/lib/objio" + "github.com/nspcc-dev/neofs-node/lib/objutil" + "github.com/nspcc-dev/neofs-node/lib/transformer" + "github.com/nspcc-dev/neofs-node/modules/grpc" + "github.com/panjf2000/ants/v2" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +type ( + // CID is a type alias of + // CID from refs package of neofs-api-go. + CID = refs.CID + + // Object is a type alias of + // Object from object package of neofs-api-go. + Object = object.Object + + // ID is a type alias of + // ObjectID from refs package of neofs-api-go. + ID = refs.ObjectID + + // OwnerID is a type alias of + // OwnerID from refs package of neofs-api-go. + OwnerID = refs.OwnerID + + // Address is a type alias of + // Address from refs package of neofs-api-go. + Address = refs.Address + + // Hash is a type alias of + // Hash from hash package of neofs-api-go. + Hash = hash.Hash + + // Meta is a type alias of + // ObjectMeta from localstore package. + Meta = localstore.ObjectMeta + + // Filter is a type alias of + // FilterPipeline from localstore package. + Filter = localstore.FilterPipeline + + // Header is a type alias of + // Header from object package of neofs-api-go. + Header = object.Header + + // UserHeader is a type alias of + // UserHeader from object package of neofs-api-go. + UserHeader = object.UserHeader + + // SystemHeader is a type alias of + // SystemHeader from object package of neofs-api-go. + SystemHeader = object.SystemHeader + + // CreationPoint is a type alias of + // CreationPoint from object package of neofs-api-go. + CreationPoint = object.CreationPoint + + // Service is an interface of the server of Object service. + Service interface { + grpc.Service + CapacityMeter + object.ServiceServer + } + + // CapacityMeter is an interface of node storage capacity meter. + CapacityMeter interface { + RelativeAvailableCap() float64 + AbsoluteAvailableCap() uint64 + } + + // EpochReceiver is an interface of the container of epoch number with read access. + EpochReceiver interface { + Epoch() uint64 + } + + // RemoteService is an interface of Object service client constructor. + RemoteService interface { + Remote(context.Context, multiaddr.Multiaddr) (object.ServiceClient, error) + } + + // Placer is an interface of placement component. + Placer interface { + IsContainerNode(ctx context.Context, addr multiaddr.Multiaddr, cid CID, previousNetMap bool) (bool, error) + GetNodes(ctx context.Context, addr Address, usePreviousNetMap bool, excl ...multiaddr.Multiaddr) ([]multiaddr.Multiaddr, error) + } + + // WorkerPool is an interface of go-routing pool. + WorkerPool interface { + Submit(func()) error + } + + // Salitor is a salting slice function. + Salitor func(data []byte, salt []byte) []byte + + serviceRequest interface { + object.Request + service.RequestData + service.SignKeyPairAccumulator + service.SignKeyPairSource + + SetToken(*service.Token) + + SetBearer(*service.BearerTokenMsg) + + SetHeaders([]service.RequestExtendedHeader_KV) + } + + // Params groups the parameters of Object service server's constructor. + Params struct { + CheckACL bool + + Assembly bool + + WindowSize int + + MaxProcessingSize uint64 + StorageCapacity uint64 + PoolSize int + Salitor Salitor + LocalStore localstore.Localstore + Placer Placer + ObjectRestorer transformer.ObjectRestorer + RemoteService RemoteService + AddressStore implementations.AddressStoreComponent + Logger *zap.Logger + TokenStore session.PrivateTokenStore + EpochReceiver EpochReceiver + + implementations.ContainerNodesLister + + DialTimeout time.Duration + + Key *ecdsa.PrivateKey + + PutParams OperationParams + GetParams OperationParams + DeleteParams OperationParams + HeadParams OperationParams + SearchParams OperationParams + RangeParams OperationParams + RangeHashParams OperationParams + + headRecv objectReceiver + + Verifier objutil.Verifier + + Transformer transformer.Transformer + + MaxPayloadSize uint64 + + // ACL pre-processor params + ACLHelper implementations.ACLHelper + BasicACLChecker libacl.BasicChecker + IRStorage ir.Storage + ContainerLister implementations.ContainerNodesLister + + SGInfoReceiver storagegroup.InfoReceiver + + OwnerKeyVerifier core.OwnerKeyVerifier + + ExtendedACLSource libacl.ExtendedACLSource + + requestActionCalculator + + targetFinder RequestTargeter + + aclInfoReceiver aclInfoReceiver + } + + // OperationParams groups the parameters of particular object operation. + OperationParams struct { + Timeout time.Duration + LogErrors bool + } + + objectService struct { + ls localstore.Localstore + storageCap uint64 + + executor implementations.SelectiveContainerExecutor + + pPut OperationParams + pGet OperationParams + pDel OperationParams + pHead OperationParams + pSrch OperationParams + pRng OperationParams + pRngHash OperationParams + + log *zap.Logger + + requestHandler requestHandler + + objSearcher objectSearcher + objRecv objectReceiver + objStorer objectStorer + objRemover objectRemover + rngRecv objectRangeReceiver + + payloadRngRecv payloadRangeReceiver + + respPreparer responsePreparer + + getChunkPreparer responsePreparer + rangeChunkPreparer responsePreparer + + statusCalculator *statusCalculator + } +) + +const ( + defaultDialTimeout = 5 * time.Second + defaultPutTimeout = time.Second + defaultGetTimeout = time.Second + defaultDeleteTimeout = time.Second + defaultHeadTimeout = time.Second + defaultSearchTimeout = time.Second + defaultRangeTimeout = time.Second + defaultRangeHashTimeout = time.Second + + defaultPoolSize = 10 + + readyObjectsCheckpointFilterName = "READY_OBJECTS_PUT_CHECKPOINT" + allObjectsCheckpointFilterName = "ALL_OBJECTS_PUT_CHECKPOINT" + + errEmptyTokenStore = internal.Error("objectService.New failed: key store not provided") + errEmptyPlacer = internal.Error("objectService.New failed: placer not provided") + errEmptyTransformer = internal.Error("objectService.New failed: transformer pipeline not provided") + errEmptyGRPC = internal.Error("objectService.New failed: gRPC connector not provided") + errEmptyAddress = internal.Error("objectService.New failed: address store not provided") + errEmptyLogger = internal.Error("objectService.New failed: logger not provided") + errEmptyEpochReceiver = internal.Error("objectService.New failed: epoch receiver not provided") + errEmptyLocalStore = internal.Error("new local client failed: localstore passed") + errEmptyPrivateKey = internal.Error("objectService.New failed: private key not provided") + errEmptyVerifier = internal.Error("objectService.New failed: object verifier not provided") + errEmptyACLHelper = internal.Error("objectService.New failed: ACL helper not provided") + errEmptyBasicACLChecker = internal.Error("objectService.New failed: basic ACL checker not provided") + errEmptyCnrLister = internal.Error("objectService.New failed: container lister not provided") + errEmptySGInfoRecv = internal.Error("objectService.New failed: SG info receiver not provided") + + errInvalidCIDFilter = internal.Error("invalid CID filter") + + errTokenRetrieval = internal.Error("objectService.Put failed on token retrieval") + + errHeaderExpected = internal.Error("expected header as a first message in stream") +) + +var requestSignFunc = service.SignRequestData + +var requestVerifyFunc = core.VerifyRequestWithSignatures + +// New is an Object service server's constructor. +func New(p *Params) (Service, error) { + if p.PutParams.Timeout <= 0 { + p.PutParams.Timeout = defaultPutTimeout + } + + if p.GetParams.Timeout <= 0 { + p.GetParams.Timeout = defaultGetTimeout + } + + if p.DeleteParams.Timeout <= 0 { + p.DeleteParams.Timeout = defaultDeleteTimeout + } + + if p.HeadParams.Timeout <= 0 { + p.HeadParams.Timeout = defaultHeadTimeout + } + + if p.SearchParams.Timeout <= 0 { + p.SearchParams.Timeout = defaultSearchTimeout + } + + if p.RangeParams.Timeout <= 0 { + p.RangeParams.Timeout = defaultRangeTimeout + } + + if p.RangeHashParams.Timeout <= 0 { + p.RangeHashParams.Timeout = defaultRangeHashTimeout + } + + if p.DialTimeout <= 0 { + p.DialTimeout = defaultDialTimeout + } + + if p.PoolSize <= 0 { + p.PoolSize = defaultPoolSize + } + + switch { + case p.TokenStore == nil: + return nil, errEmptyTokenStore + case p.Placer == nil: + return nil, errEmptyPlacer + case p.LocalStore == nil: + return nil, errEmptyLocalStore + case (p.ObjectRestorer == nil || p.Transformer == nil) && p.Assembly: + return nil, errEmptyTransformer + case p.RemoteService == nil: + return nil, errEmptyGRPC + case p.AddressStore == nil: + return nil, errEmptyAddress + case p.Logger == nil: + return nil, errEmptyLogger + case p.EpochReceiver == nil: + return nil, errEmptyEpochReceiver + case p.Key == nil: + return nil, errEmptyPrivateKey + case p.Verifier == nil: + return nil, errEmptyVerifier + case p.IRStorage == nil: + return nil, ir.ErrNilStorage + case p.ContainerLister == nil: + return nil, errEmptyCnrLister + case p.ACLHelper == nil: + return nil, errEmptyACLHelper + case p.BasicACLChecker == nil: + return nil, errEmptyBasicACLChecker + case p.SGInfoReceiver == nil: + return nil, errEmptySGInfoRecv + case p.OwnerKeyVerifier == nil: + return nil, core.ErrNilOwnerKeyVerifier + case p.ExtendedACLSource == nil: + return nil, libacl.ErrNilBinaryExtendedACLStore + } + + pool, err := ants.NewPool(p.PoolSize) + if err != nil { + return nil, errors.Wrap(err, "objectService.New failed: could not create worker pool") + } + + if p.MaxProcessingSize <= 0 { + p.MaxProcessingSize = math.MaxUint64 + } + + if p.StorageCapacity <= 0 { + p.StorageCapacity = math.MaxUint64 + } + + epochRespPreparer := &epochResponsePreparer{ + epochRecv: p.EpochReceiver, + } + + p.targetFinder = &targetFinder{ + log: p.Logger, + irStorage: p.IRStorage, + cnrLister: p.ContainerLister, + cnrOwnerChecker: p.ACLHelper, + } + + p.requestActionCalculator = &reqActionCalc{ + extACLChecker: libacl.NewExtendedACLChecker(), + + log: p.Logger, + } + + p.aclInfoReceiver = aclInfoReceiver{ + basicACLGetter: p.ACLHelper, + + basicChecker: p.BasicACLChecker, + + targetFinder: p.targetFinder, + } + + srv := &objectService{ + ls: p.LocalStore, + log: p.Logger, + pPut: p.PutParams, + pGet: p.GetParams, + pDel: p.DeleteParams, + pHead: p.HeadParams, + pSrch: p.SearchParams, + pRng: p.RangeParams, + pRngHash: p.RangeHashParams, + storageCap: p.StorageCapacity, + + requestHandler: &coreRequestHandler{ + preProc: newPreProcessor(p), + postProc: newPostProcessor(), + }, + + respPreparer: &complexResponsePreparer{ + items: []responsePreparer{ + epochRespPreparer, + &aclResponsePreparer{ + aclInfoReceiver: p.aclInfoReceiver, + + reqActCalc: p.requestActionCalculator, + + eaclSrc: p.ExtendedACLSource, + }, + }, + }, + + getChunkPreparer: epochRespPreparer, + + rangeChunkPreparer: epochRespPreparer, + + statusCalculator: serviceStatusCalculator(), + } + + tr, err := NewMultiTransport(MultiTransportParams{ + AddressStore: p.AddressStore, + EpochReceiver: p.EpochReceiver, + RemoteService: p.RemoteService, + Logger: p.Logger, + Key: p.Key, + PutTimeout: p.PutParams.Timeout, + GetTimeout: p.GetParams.Timeout, + HeadTimeout: p.HeadParams.Timeout, + SearchTimeout: p.SearchParams.Timeout, + RangeHashTimeout: p.RangeHashParams.Timeout, + DialTimeout: p.DialTimeout, + + PrivateTokenStore: p.TokenStore, + }) + if err != nil { + return nil, err + } + + exec, err := implementations.NewContainerTraverseExecutor(tr) + if err != nil { + return nil, err + } + + srv.executor, err = implementations.NewObjectContainerHandler(implementations.ObjectContainerHandlerParams{ + NodeLister: p.ContainerNodesLister, + Executor: exec, + Logger: p.Logger, + }) + if err != nil { + return nil, err + } + + local := &localStoreExecutor{ + salitor: p.Salitor, + epochRecv: p.EpochReceiver, + localStore: p.LocalStore, + } + + qvc := &queryVersionController{ + m: make(map[int]localQueryImposer), + } + + qvc.m[1] = &coreQueryImposer{ + fCreator: new(coreFilterCreator), + lsLister: p.LocalStore, + log: p.Logger, + } + + localExec := &localOperationExecutor{ + objRecv: local, + headRecv: local, + objStore: local, + queryImp: qvc, + rngReader: local, + rngHasher: local, + } + + opExec := &coreOperationExecutor{ + pre: new(coreExecParamsComp), + fin: &coreOperationFinalizer{ + curPlacementBuilder: &corePlacementUtil{ + prevNetMap: false, + placementBuilder: p.Placer, + log: p.Logger, + }, + prevPlacementBuilder: &corePlacementUtil{ + prevNetMap: true, + placementBuilder: p.Placer, + log: p.Logger, + }, + interceptorPreparer: &coreInterceptorPreparer{ + localExec: localExec, + addressStore: p.AddressStore, + }, + workerPool: pool, + traverseExec: exec, + resLogger: &coreResultLogger{ + mLog: requestLogMap(p), + log: p.Logger, + }, + log: p.Logger, + }, + loc: localExec, + } + + srv.objSearcher = &coreObjectSearcher{ + executor: opExec, + } + + childLister := &coreChildrenLister{ + queryFn: coreChildrenQueryFunc, + objSearcher: srv.objSearcher, + log: p.Logger, + timeout: p.SearchParams.Timeout, + } + + childrenRecv := &coreChildrenReceiver{ + timeout: p.HeadParams.Timeout, + } + + chopperTable := objio.NewChopperTable() + + relRecv := &neighborReceiver{ + firstChildQueryFn: firstChildQueryFunc, + leftNeighborQueryFn: leftNeighborQueryFunc, + rightNeighborQueryFn: rightNeighborQueryFunc, + rangeDescRecv: &selectiveRangeRecv{executor: srv.executor}, + } + + straightObjRecv := &straightObjectReceiver{ + executor: opExec, + } + + rngRecv := &corePayloadRangeReceiver{ + chopTable: chopperTable, + relRecv: relRecv, + payloadRecv: &corePayloadPartReceiver{ + rDataRecv: &straightRangeDataReceiver{ + executor: opExec, + }, + windowController: &simpleWindowController{ + windowSize: p.WindowSize, + }, + }, + mErr: map[error]struct{}{ + localstore.ErrOutOfRange: {}, + }, + log: p.Logger, + } + + coreObjRecv := &coreObjectReceiver{ + straightObjRecv: straightObjRecv, + childLister: childLister, + ancestralRecv: &coreAncestralReceiver{ + childrenRecv: childrenRecv, + objRewinder: &coreObjectRewinder{ + transformer: p.ObjectRestorer, + }, + pRangeRecv: rngRecv, + }, + log: p.Logger, + } + childrenRecv.coreObjRecv = coreObjRecv + srv.objRecv = coreObjRecv + srv.payloadRngRecv = rngRecv + + if !p.Assembly { + coreObjRecv.ancestralRecv, coreObjRecv.childLister = nil, nil + } + + p.headRecv = srv.objRecv + + filter, err := newIncomingObjectFilter(p) + if err != nil { + return nil, err + } + + straightStorer := &straightObjectStorer{ + executor: opExec, + } + + bf, err := basicFilter(p) + if err != nil { + return nil, err + } + + transformerObjStorer := &transformingObjectStorer{ + transformer: p.Transformer, + objStorer: straightStorer, + mErr: map[error]struct{}{ + transformer.ErrInvalidSGLinking: {}, + + implementations.ErrIncompleteSGInfo: {}, + }, + } + + srv.objStorer = &filteringObjectStorer{ + filter: bf, + objStorer: &bifurcatingObjectStorer{ + straightStorer: &filteringObjectStorer{ + filter: filter, + objStorer: &receivingObjectStorer{ + straightStorer: straightStorer, + vPayload: implementations.NewPayloadVerifier(), + }, + }, + tokenStorer: &tokenObjectStorer{ + tokenStore: p.TokenStore, + objStorer: transformerObjStorer, + }, + }, + } + + srv.objRemover = &coreObjRemover{ + delPrep: &coreDelPreparer{ + childLister: childLister, + }, + straightRem: &straightObjRemover{ + tombCreator: new(coreTombCreator), + objStorer: transformerObjStorer, + }, + tokenStore: p.TokenStore, + mErr: map[error]struct{}{}, + log: p.Logger, + } + + srv.rngRecv = &coreRangeReceiver{ + rngRevealer: &coreRngRevealer{ + relativeRecv: relRecv, + chopTable: chopperTable, + }, + straightRngRecv: &straightRangeReceiver{ + executor: opExec, + }, + mErr: map[error]struct{}{ + localstore.ErrOutOfRange: {}, + }, + log: p.Logger, + } + + return srv, nil +} + +func requestLogMap(p *Params) map[object.RequestType]struct{} { + m := make(map[object.RequestType]struct{}) + + if p.PutParams.LogErrors { + m[object.RequestPut] = struct{}{} + } + + if p.GetParams.LogErrors { + m[object.RequestGet] = struct{}{} + } + + if p.HeadParams.LogErrors { + m[object.RequestHead] = struct{}{} + } + + if p.SearchParams.LogErrors { + m[object.RequestSearch] = struct{}{} + } + + if p.RangeParams.LogErrors { + m[object.RequestRange] = struct{}{} + } + + if p.RangeHashParams.LogErrors { + m[object.RequestRangeHash] = struct{}{} + } + + return m +} + +func (s *objectService) Name() string { return "Object Service" } + +func (s *objectService) Register(g *grpc.Server) { object.RegisterServiceServer(g, s) } diff --git a/services/public/object/status.go b/services/public/object/status.go new file mode 100644 index 000000000..f8389c370 --- /dev/null +++ b/services/public/object/status.go @@ -0,0 +1,951 @@ +package object + +import ( + "fmt" + "sync" + + "github.com/golang/protobuf/proto" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/session" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/nspcc-dev/neofs-node/lib/localstore" + "github.com/nspcc-dev/neofs-node/lib/transformer" + "github.com/pkg/errors" + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// group of value for status error construction. +type statusInfo struct { + // status code + c codes.Code + // error message + m string + // error details + d []proto.Message +} + +type requestError struct { + // type of request + t object.RequestType + // request handler error + e error +} + +// error implementation used for details attaching. +type detailedError struct { + error + + d []proto.Message +} + +type statusCalculator struct { + *sync.RWMutex + + common map[error]*statusInfo + + custom map[requestError]*statusInfo +} + +const panicLogMsg = "rpc handler caused panic" + +const ( + msgServerPanic = "panic occurred during request processing" + errServerPanic = internal.Error("panic on call handler") +) + +const ( + msgUnauthenticated = "request does not have valid authentication credentials for the operation" + errUnauthenticated = internal.Error("unauthenticated request") +) + +const ( + msgReSigning = "server could not re-sign request" + errReSigning = internal.Error("could not re-sign request") +) + +const ( + msgInvalidTTL = "invalid TTL value" + errInvalidTTL = internal.Error("invalid TTL value") +) + +const ( + msgNotLocalContainer = "server is not presented in container" + errNotLocalContainer = internal.Error("not local container") + descNotLocalContainer = "server is outside container" +) + +const ( + msgContainerAffiliationProblem = "server could not check container affiliation" + errContainerAffiliationProblem = internal.Error("could not check container affiliation") +) + +const ( + msgContainerNotFound = "container not found" + errContainerNotFound = internal.Error("container not found") + descContainerNotFound = "handling a non-existent container" +) + +const ( + msgPlacementProblem = "there were problems building the placement vector on the server" + errPlacementProblem = internal.Error("could not traverse over container") +) + +const ( + msgOverloaded = "system resource overloaded" + errOverloaded = internal.Error("system resource overloaded") +) + +const ( + msgAccessDenied = "access to requested operation is denied" + errAccessDenied = internal.Error("access denied") +) + +const ( + msgPutMessageProblem = "invalid message type" + msgPutNilObject = "object is null" +) + +const ( + msgCutObjectPayload = "lack of object payload data" +) + +const ( + msgMissingTokenKeys = "missing public keys in token" + msgBrokenToken = "token structure failed verification" + msgTokenObjectID = "missing object ID in token" +) + +const ( + msgProcPayloadSize = "max payload size of processing object overflow" + errProcPayloadSize = internal.Error("max processing object payload size overflow") +) + +const ( + msgObjectCreationEpoch = "invalid creation epoch of object" + errObjectFromTheFuture = internal.Error("object from the future") +) + +const ( + msgObjectPayloadSize = "max object payload size overflow" + errObjectPayloadSize = internal.Error("max object payload size overflow") +) + +const ( + msgLocalStorageOverflow = "not enough space in local storage" + errLocalStorageOverflow = internal.Error("local storage overflow") +) + +const ( + msgPayloadChecksum = "invalid payload checksum" + errPayloadChecksum = internal.Error("invalid payload checksum") +) + +const ( + msgObjectHeadersVerification = "object headers failed verification" + errObjectHeadersVerification = internal.Error("object headers failed verification") +) + +const ( + msgForwardPutObject = "forward object failure" +) + +const ( + msgPutLocalFailure = "local object put failure" + errPutLocal = internal.Error("local object put failure") +) + +const ( + msgPrivateTokenRecv = "private token receive failure" +) + +const ( + msgInvalidSGLinking = "invalid storage group headers" +) + +const ( + msgIncompleteSGInfo = "collect storage group info failure" +) + +const ( + msgTransformationFailure = "object preparation failure" +) + +const ( + msgWrongSGSize = "wrong storage group size" + errWrongSGSize = internal.Error("wrong storage group size") +) + +const ( + msgWrongSGHash = "wrong storage group homomorphic hash" + errWrongSGHash = internal.Error("wrong storage group homomorphic hash") +) + +const ( + msgObjectNotFound = "object not found" +) + +const ( + msgObjectHeaderNotFound = "object header not found" +) + +const ( + msgNonAssembly = "assembly option is not enabled on the server" +) + +const ( + msgPayloadOutOfRange = "range is out of object payload bounds" +) + +const ( + msgPayloadRangeNotFound = "object payload range not found" + errPayloadRangeNotFound = internal.Error("object payload range not found") +) + +const ( + msgMissingToken = "missing token in request" +) + +const ( + msgPutTombstone = "could not store tombstone" +) + +const ( + msgDeletePrepare = "delete information preparation failure" + errDeletePrepare = internal.Error("delete information preparation failure") +) + +const ( + msgQueryVersion = "unsupported query version" +) + +const ( + msgSearchQueryUnmarshal = "query unmarshal failure" +) + +const ( + msgLocalQueryImpose = "local query imposing failure" +) + +var mStatusCommon = map[error]*statusInfo{ + // RPC implementation recovered panic + errServerPanic: { + c: codes.Internal, + m: msgServerPanic, + }, + // Request authentication credentials problem + errUnauthenticated: { + c: codes.Unauthenticated, + m: msgUnauthenticated, + d: requestAuthDetails(), + }, + // Request re-signing problem + errReSigning: { + c: codes.Internal, + m: msgReSigning, + }, + // Invalid request TTL + errInvalidTTL: { + c: codes.InvalidArgument, + m: msgInvalidTTL, + d: invalidTTLDetails(), + }, + // Container affiliation check problem + errContainerAffiliationProblem: { + c: codes.Internal, + m: msgContainerAffiliationProblem, + }, + // Server is outside container + errNotLocalContainer: { + c: codes.FailedPrecondition, + m: msgNotLocalContainer, + d: containerAbsenceDetails(), + }, + // Container not found in storage + errContainerNotFound: { + c: codes.NotFound, + m: msgContainerNotFound, + }, + // Container placement build problem + errPlacementProblem: { + c: codes.Internal, + m: msgPlacementProblem, + }, + // System resource overloaded + errOverloaded: { + c: codes.Unavailable, + m: msgOverloaded, + }, + // Access violations + errAccessDenied: { + c: codes.PermissionDenied, + m: msgAccessDenied, + }, + // Maximum processing payload size overflow + errProcPayloadSize: { + c: codes.FailedPrecondition, + m: msgProcPayloadSize, + d: nil, // TODO: NSPCC-1048 + }, +} + +var mStatusCustom = map[requestError]*statusInfo{ + // Invalid first message in Put client stream + { + t: object.RequestPut, + e: errHeaderExpected, + }: { + c: codes.InvalidArgument, + m: msgPutMessageProblem, + d: putFirstMessageDetails(), + }, + // Nil object in Put request + { + t: object.RequestPut, + e: errObjectExpected, + }: { + c: codes.InvalidArgument, + m: msgPutNilObject, + d: putNilObjectDetails(), + }, + // Lack of object payload data + { + t: object.RequestPut, + e: transformer.ErrPayloadEOF, + }: { + c: codes.InvalidArgument, + m: msgCutObjectPayload, + d: payloadSizeDetails(), + }, + // Lack of public keys in the token + { + t: object.RequestPut, + e: errMissingOwnerKeys, + }: { + c: codes.PermissionDenied, + m: msgMissingTokenKeys, + d: tokenKeysDetails(), + }, + // Broken token structure + { + t: object.RequestPut, + e: errBrokenToken, + }: { + c: codes.PermissionDenied, + m: msgBrokenToken, + }, + // Missing object ID in token + { + t: object.RequestPut, + e: errWrongTokenAddress, + }: { + c: codes.PermissionDenied, + m: msgTokenObjectID, + d: tokenOIDDetails(), + }, + // Invalid after-first message in stream + { + t: object.RequestPut, + e: errChunkExpected, + }: { + c: codes.InvalidArgument, + m: msgPutMessageProblem, + d: putChunkMessageDetails(), + }, + { + t: object.RequestPut, + e: errObjectFromTheFuture, + }: { + c: codes.FailedPrecondition, + m: msgObjectCreationEpoch, + d: nil, // TODO: NSPCC-1048 + }, + { + t: object.RequestPut, + e: errObjectPayloadSize, + }: { + c: codes.FailedPrecondition, + m: msgObjectPayloadSize, + d: nil, // TODO: NSPCC-1048 + }, + { + t: object.RequestPut, + e: errLocalStorageOverflow, + }: { + c: codes.Unavailable, + m: msgLocalStorageOverflow, + d: localStorageOverflowDetails(), + }, + { + t: object.RequestPut, + e: errPayloadChecksum, + }: { + c: codes.InvalidArgument, + m: msgPayloadChecksum, + d: payloadChecksumHeaderDetails(), + }, + { + t: object.RequestPut, + e: errObjectHeadersVerification, + }: { + c: codes.InvalidArgument, + m: msgObjectHeadersVerification, + }, + { + t: object.RequestPut, + e: errIncompleteOperation, + }: { + c: codes.Unavailable, + m: msgForwardPutObject, + }, + { + t: object.RequestPut, + e: errPutLocal, + }: { + c: codes.Internal, + m: msgPutLocalFailure, + }, + { + t: object.RequestPut, + e: errTokenRetrieval, + }: { + c: codes.Aborted, + m: msgPrivateTokenRecv, + }, + { + t: object.RequestPut, + e: transformer.ErrInvalidSGLinking, + }: { + c: codes.InvalidArgument, + m: msgInvalidSGLinking, + d: sgLinkingDetails(), + }, + { + t: object.RequestPut, + e: implementations.ErrIncompleteSGInfo, + }: { + c: codes.NotFound, + m: msgIncompleteSGInfo, + }, + { + t: object.RequestPut, + e: errTransformer, + }: { + c: codes.Internal, + m: msgTransformationFailure, + }, + { + t: object.RequestPut, + e: errWrongSGSize, + }: { + c: codes.InvalidArgument, + m: msgWrongSGSize, + }, + { + t: object.RequestPut, + e: errWrongSGHash, + }: { + c: codes.InvalidArgument, + m: msgWrongSGHash, + }, + { + t: object.RequestGet, + e: errIncompleteOperation, + }: { + c: codes.NotFound, + m: msgObjectNotFound, + }, + { + t: object.RequestHead, + e: errIncompleteOperation, + }: { + c: codes.NotFound, + m: msgObjectHeaderNotFound, + }, + { + t: object.RequestGet, + e: errNonAssembly, + }: { + c: codes.Unimplemented, + m: msgNonAssembly, + }, + { + t: object.RequestHead, + e: errNonAssembly, + }: { + c: codes.Unimplemented, + m: msgNonAssembly, + }, + { + t: object.RequestGet, + e: childrenNotFound, + }: { + c: codes.NotFound, + m: msgObjectNotFound, + }, + { + t: object.RequestHead, + e: childrenNotFound, + }: { + c: codes.NotFound, + m: msgObjectHeaderNotFound, + }, + { + t: object.RequestRange, + e: localstore.ErrOutOfRange, + }: { + c: codes.OutOfRange, + m: msgPayloadOutOfRange, + }, + { + t: object.RequestRange, + e: errPayloadRangeNotFound, + }: { + c: codes.NotFound, + m: msgPayloadRangeNotFound, + }, + { + t: object.RequestDelete, + e: errNilToken, + }: { + c: codes.InvalidArgument, + m: msgMissingToken, + d: missingTokenDetails(), + }, + { + t: object.RequestDelete, + e: errMissingOwnerKeys, + }: { + c: codes.PermissionDenied, + m: msgMissingTokenKeys, + d: tokenKeysDetails(), + }, + { + t: object.RequestDelete, + e: errBrokenToken, + }: { + c: codes.PermissionDenied, + m: msgBrokenToken, + }, + { + t: object.RequestDelete, + e: errWrongTokenAddress, + }: { + c: codes.PermissionDenied, + m: msgTokenObjectID, + d: tokenOIDDetails(), + }, + { + t: object.RequestDelete, + e: errTokenRetrieval, + }: { + c: codes.Aborted, + m: msgPrivateTokenRecv, + }, + { + t: object.RequestDelete, + e: errIncompleteOperation, + }: { + c: codes.Unavailable, + m: msgPutTombstone, + }, + { + t: object.RequestDelete, + e: errDeletePrepare, + }: { + c: codes.Internal, + m: msgDeletePrepare, + }, + { + t: object.RequestSearch, + e: errUnsupportedQueryVersion, + }: { + c: codes.Unimplemented, + m: msgQueryVersion, + }, + { + t: object.RequestSearch, + e: errSearchQueryUnmarshal, + }: { + c: codes.InvalidArgument, + m: msgSearchQueryUnmarshal, + }, + { + t: object.RequestSearch, + e: errLocalQueryImpose, + }: { + c: codes.Internal, + m: msgLocalQueryImpose, + }, + { + t: object.RequestRangeHash, + e: errPayloadRangeNotFound, + }: { + c: codes.NotFound, + m: msgPayloadRangeNotFound, + }, + { + t: object.RequestRangeHash, + e: localstore.ErrOutOfRange, + }: { + c: codes.OutOfRange, + m: msgPayloadOutOfRange, + }, +} + +func serviceStatusCalculator() *statusCalculator { + s := newStatusCalculator() + + for k, v := range mStatusCommon { + s.addCommon(k, v) + } + + for k, v := range mStatusCustom { + s.addCustom(k, v) + } + + return s +} + +func statusError(v *statusInfo) (bool, error) { + st, err := status.New(v.c, v.m).WithDetails(v.d...) + if err != nil { + return false, nil + } + + return true, st.Err() +} + +func (s *statusCalculator) addCommon(k error, v *statusInfo) { + s.Lock() + s.common[k] = v + s.Unlock() +} + +func (s *statusCalculator) addCustom(k requestError, v *statusInfo) { + s.Lock() + s.custom[k] = v + s.Unlock() +} + +func (s *statusCalculator) make(e requestError) error { + s.RLock() + defer s.RUnlock() + + var ( + ok bool + v *statusInfo + d []proto.Message + err = errors.Cause(e.e) + ) + + if v, ok := err.(*detailedError); ok { + d = v.d + err = v.error + } else if v, ok := err.(detailedError); ok { + d = v.d + err = v.error + } + + if v, ok = s.common[err]; !ok { + if v, ok = s.custom[requestError{ + t: e.t, + e: err, + }]; !ok { + return e.e + } + } + + vv := *v + + vv.d = append(vv.d, d...) + + if ok, res := statusError(&vv); ok { + return res + } + + return e.e +} + +func newStatusCalculator() *statusCalculator { + return &statusCalculator{ + RWMutex: new(sync.RWMutex), + common: make(map[error]*statusInfo), + custom: make(map[requestError]*statusInfo), + } +} + +func requestAuthDetails() []proto.Message { + return []proto.Message{ + &errdetails.BadRequest{ + FieldViolations: []*errdetails.BadRequest_FieldViolation{ + { + Field: "Signatures", + Description: "should be formed according to VerificationHeader signing", + }, + }, + }, + } +} + +func invalidTTLDetails() []proto.Message { + return []proto.Message{ + &errdetails.BadRequest{ + FieldViolations: []*errdetails.BadRequest_FieldViolation{ + { + Field: "TTL", + Description: "should greater or equal than NonForwardingTTL", + }, + }, + }, + } +} + +func containerAbsenceDetails() []proto.Message { + return []proto.Message{ + &errdetails.PreconditionFailure{ + Violations: []*errdetails.PreconditionFailure_Violation{ + { + Type: "container options", + Subject: "container nodes", + Description: "server node should be presented container", + }, + }, + }, + } +} + +func containerDetails(cid CID, desc string) []proto.Message { + return []proto.Message{ + &errdetails.ResourceInfo{ + ResourceType: "container", + ResourceName: cid.String(), + Description: desc, + }, + } +} + +func putFirstMessageDetails() []proto.Message { + return []proto.Message{ + &errdetails.BadRequest{ + FieldViolations: []*errdetails.BadRequest_FieldViolation{ + { + Field: "R", + Description: "should be PutRequest_Header", + }, + }, + }, + } +} + +func putChunkMessageDetails() []proto.Message { + return []proto.Message{ + &errdetails.BadRequest{ + FieldViolations: []*errdetails.BadRequest_FieldViolation{ + { + Field: "R", + Description: "should be PutRequest_Chunk", + }, + { + Field: "R.Chunk", + Description: "should not be empty", + }, + }, + }, + } +} + +func putNilObjectDetails() []proto.Message { + return []proto.Message{ + &errdetails.BadRequest{ + FieldViolations: []*errdetails.BadRequest_FieldViolation{ + { + Field: "R.Object", + Description: "should not be null", + }, + }, + }, + } +} + +func payloadSizeDetails() []proto.Message { + return []proto.Message{ + &errdetails.BadRequest{ + FieldViolations: []*errdetails.BadRequest_FieldViolation{ + { + Field: "R.Object.SystemHeader.PayloadLength", + Description: "should be equal to the sum of the sizes of the streaming payload chunks", + }, + }, + }, + } +} + +func tokenKeysDetails() []proto.Message { + return []proto.Message{ + &errdetails.BadRequest{ + FieldViolations: []*errdetails.BadRequest_FieldViolation{ + { + Field: "R.Token.PublicKeys", + Description: "should be non-empty list of marshaled ecdsa public keys", + }, + }, + }, + } +} + +func tokenOIDDetails() []proto.Message { + return []proto.Message{ + &errdetails.BadRequest{ + FieldViolations: []*errdetails.BadRequest_FieldViolation{ + { + Field: "R.Token.ObjectID", + Description: "should contain requested object", + }, + }, + }, + } +} + +func maxProcPayloadSizeDetails(sz uint64) []proto.Message { + return []proto.Message{ + &errdetails.PreconditionFailure{ + Violations: []*errdetails.PreconditionFailure_Violation{ + { + Type: "object requirements", + Subject: "max processing payload size", + Description: fmt.Sprintf("should not be greater than %d bytes", sz), + }, + }, + }, + } +} + +func objectCreationEpochDetails(e uint64) []proto.Message { + return []proto.Message{ + &errdetails.PreconditionFailure{ + Violations: []*errdetails.PreconditionFailure_Violation{ + { + Type: "object requirements", + Subject: "creation epoch", + Description: fmt.Sprintf("should not be greater than %d", e), + }, + }, + }, + } +} + +func maxObjectPayloadSizeDetails(sz uint64) []proto.Message { + return []proto.Message{ + &errdetails.PreconditionFailure{ + Violations: []*errdetails.PreconditionFailure_Violation{ + { + Type: "object requirements", + Subject: "max object payload size", + Description: fmt.Sprintf("should not be greater than %d bytes", sz), + }, + }, + }, + } +} + +func localStorageOverflowDetails() []proto.Message { + return []proto.Message{ + &errdetails.ResourceInfo{ + ResourceType: "local storage", + ResourceName: "disk storage", + Description: "not enough space", + }, + } +} + +func payloadChecksumHeaderDetails() []proto.Message { + return []proto.Message{ + &errdetails.BadRequest{ + FieldViolations: []*errdetails.BadRequest_FieldViolation{ + { + Field: "R.Object.Headers", + Description: "should contain correct payload checksum header", + }, + }, + }, + } +} + +func objectHeadersVerificationDetails(e error) []proto.Message { + return []proto.Message{ + &errdetails.BadRequest{ + FieldViolations: []*errdetails.BadRequest_FieldViolation{ + { + Field: "R.Object.Headers", + Description: e.Error(), + }, + }, + }, + } +} + +func privateTokenRecvDetails(id session.TokenID, owner OwnerID) []proto.Message { + return []proto.Message{ + &errdetails.ResourceInfo{ + ResourceType: "private token", + ResourceName: id.String(), + Owner: owner.String(), + Description: "problems with getting a private token", + }, + } +} + +func sgLinkingDetails() []proto.Message { + return []proto.Message{ + &errdetails.BadRequest{ + FieldViolations: []*errdetails.BadRequest_FieldViolation{ + { + Field: "R.Object.Headers", + Description: "should not contain Header_StorageGroup and Link_StorageGroup or should contain both", + }, + }, + }, + } +} + +func sgSizeDetails(exp, act uint64) []proto.Message { + return []proto.Message{ + &errdetails.BadRequest{ + FieldViolations: []*errdetails.BadRequest_FieldViolation{ + { + Field: "R.Object.Headers", + Description: fmt.Sprintf("wrong storage group size: expected %d, collected %d", exp, act), + }, + }, + }, + } +} + +func sgHashDetails(exp, act Hash) []proto.Message { + return []proto.Message{ + &errdetails.BadRequest{ + FieldViolations: []*errdetails.BadRequest_FieldViolation{ + { + Field: "R.Object.Headers", + Description: fmt.Sprintf("wrong storage group hash: expected %s, collected %s", exp, act), + }, + }, + }, + } +} + +func missingTokenDetails() []proto.Message { + return []proto.Message{ + &errdetails.BadRequest{ + FieldViolations: []*errdetails.BadRequest_FieldViolation{ + { + Field: "Token", + Description: "should not be null", + }, + }, + }, + } +} diff --git a/services/public/object/status_test.go b/services/public/object/status_test.go new file mode 100644 index 000000000..b076fec83 --- /dev/null +++ b/services/public/object/status_test.go @@ -0,0 +1,1210 @@ +package object + +import ( + "context" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/session" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/nspcc-dev/neofs-node/lib/localstore" + "github.com/nspcc-dev/neofs-node/lib/test" + "github.com/nspcc-dev/neofs-node/lib/transformer" + "github.com/stretchr/testify/require" + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type testPanickingHandler struct{} + +func (*testPanickingHandler) handleRequest(context.Context, handleRequestParams) (interface{}, error) { + panic("panicking handler") +} + +func TestStatusCalculator(t *testing.T) { + t.Run("unknown error", func(t *testing.T) { + e := internal.Error("error for test") + + s := newStatusCalculator() + + require.Equal(t, e, s.make(requestError{ + e: e, + })) + }) + + t.Run("common error", func(t *testing.T) { + v := &statusInfo{ + c: codes.Aborted, + m: "test error message", + d: []proto.Message{ + &errdetails.ResourceInfo{ + ResourceType: "type", + ResourceName: "name", + Owner: "owner", + Description: "description", + }, + }, + } + + s := newStatusCalculator() + + e := internal.Error("error for test") + + s.addCommon(e, v) + + ok, err := statusError(v) + require.True(t, ok) + + require.Equal(t, + err, + s.make(requestError{ + e: e, + }), + ) + }) + + t.Run("custom error", func(t *testing.T) { + var ( + c1, c2 = codes.Aborted, codes.AlreadyExists + t1, t2 = object.RequestPut, object.RequestGet + e1, e2 = internal.Error("test error 1"), internal.Error("test error 2") + m1, m2 = "message 1", "message 2" + ) + + s := newStatusCalculator() + + s1 := &statusInfo{ + c: c1, + m: m1, + } + + re1 := requestError{ + t: t1, + e: e1, + } + + s.addCustom(re1, s1) + + s2 := &statusInfo{ + c: c2, + m: m2, + } + + r2 := requestError{ + t: t2, + e: e2, + } + + s.addCustom(r2, s2) + + ok, err1 := statusError(s1) + require.True(t, ok) + + ok, err2 := statusError(s2) + require.True(t, ok) + + require.Equal(t, + err1, + s.make(re1), + ) + + require.Equal(t, + err2, + s.make(r2), + ) + }) +} + +func testStatusCommon(t *testing.T, h requestHandler, c codes.Code, m string, d []interface{}) { + ctx := context.TODO() + + s := &objectService{ + log: test.NewTestLogger(false), + requestHandler: h, + statusCalculator: serviceStatusCalculator(), + } + + errPut := s.Put(&testPutEntity{ + res: object.MakePutRequestHeader(new(Object)), + }) + + errGet := s.Get(new(object.GetRequest), new(testGetEntity)) + + _, errHead := s.Head(ctx, new(object.HeadRequest)) + + _, errDelete := s.Head(ctx, new(object.HeadRequest)) + + errRange := s.GetRange(new(GetRangeRequest), new(testRangeEntity)) + + _, errRangeHash := s.GetRangeHash(ctx, new(object.GetRangeHashRequest)) + + errSearch := s.Search(new(object.SearchRequest), new(testSearchEntity)) + + errs := []error{ + errPut, + errGet, + errHead, + errRange, + errRangeHash, + errSearch, + errDelete, + } + + for _, err := range errs { + st, ok := status.FromError(err) + require.True(t, ok) + + require.Equal(t, c, st.Code()) + require.Equal(t, m, st.Message()) + require.Equal(t, d, st.Details()) + } +} + +func TestStatusCommon(t *testing.T) { + t.Run("handler panic", func(t *testing.T) { + ds := make([]interface{}, 0) + + testStatusCommon(t, + new(testPanickingHandler), + codes.Internal, + msgServerPanic, + ds, + ) + }) + + t.Run("request authentication", func(t *testing.T) { + ds := make([]interface{}, 0) + + for _, d := range requestAuthDetails() { + ds = append(ds, d) + } + + testStatusCommon(t, + &testPutEntity{ + err: errUnauthenticated, + }, + codes.Unauthenticated, + msgUnauthenticated, + ds, + ) + }) + + t.Run("re-signing problem", func(t *testing.T) { + ds := make([]interface{}, 0) + + testStatusCommon(t, + &testPutEntity{ + err: errReSigning, + }, + codes.Internal, + msgReSigning, + ds, + ) + }) + + t.Run("invalid TTL", func(t *testing.T) { + ds := make([]interface{}, 0) + + for _, d := range invalidTTLDetails() { + ds = append(ds, d) + } + + testStatusCommon(t, + &testPutEntity{ + err: errInvalidTTL, + }, + codes.InvalidArgument, + msgInvalidTTL, + ds, + ) + }) + + t.Run("container affiliation problem", func(t *testing.T) { + ds := make([]interface{}, 0) + + testStatusCommon(t, + &testPutEntity{ + err: errContainerAffiliationProblem, + }, + codes.Internal, + msgContainerAffiliationProblem, + ds, + ) + }) + + t.Run("container not found", func(t *testing.T) { + ds := make([]interface{}, 0) + + testStatusCommon(t, + &testPutEntity{ + err: errContainerNotFound, + }, + codes.NotFound, + msgContainerNotFound, + ds, + ) + }) + + t.Run("server is missing in container", func(t *testing.T) { + ds := make([]interface{}, 0) + + for _, d := range containerAbsenceDetails() { + ds = append(ds, d) + } + + testStatusCommon(t, + &testPutEntity{ + err: errNotLocalContainer, + }, + codes.FailedPrecondition, + msgNotLocalContainer, + ds, + ) + }) + + t.Run("placement problem", func(t *testing.T) { + ds := make([]interface{}, 0) + + testStatusCommon(t, + &testPutEntity{ + err: errPlacementProblem, + }, + codes.Internal, + msgPlacementProblem, + ds, + ) + }) + + t.Run("system resource overloaded", func(t *testing.T) { + ds := make([]interface{}, 0) + + testStatusCommon(t, + &testPutEntity{ + err: errOverloaded, + }, + codes.Unavailable, + msgOverloaded, + ds, + ) + }) + + t.Run("access denied", func(t *testing.T) { + ds := make([]interface{}, 0) + + testStatusCommon(t, + &testPutEntity{ + err: errAccessDenied, + }, + codes.PermissionDenied, + msgAccessDenied, + ds, + ) + }) + + t.Run("max processing payload size overflow", func(t *testing.T) { + maxSz := uint64(100) + + ds := make([]interface{}, 0) + + for _, d := range maxProcPayloadSizeDetails(maxSz) { + ds = append(ds, d) + } + + testStatusCommon(t, + &testPutEntity{ + err: &detailedError{ + error: errProcPayloadSize, + d: maxProcPayloadSizeDetails(maxSz), + }, + }, + codes.FailedPrecondition, + msgProcPayloadSize, + ds, + ) + }) +} + +func testStatusPut(t *testing.T, h requestHandler, srv object.Service_PutServer, info statusInfo, d []interface{}) { + s := &objectService{ + log: test.NewTestLogger(false), + requestHandler: h, + statusCalculator: serviceStatusCalculator(), + } + + err := s.Put(srv) + + st, ok := status.FromError(err) + require.True(t, ok) + + require.Equal(t, info.c, st.Code()) + require.Equal(t, info.m, st.Message()) + require.Equal(t, d, st.Details()) +} + +func TestStatusPut(t *testing.T) { + t.Run("invalid first message type", func(t *testing.T) { + ds := make([]interface{}, 0) + + for _, d := range putFirstMessageDetails() { + ds = append(ds, d) + } + + srv := &testPutEntity{ + res: object.MakePutRequestChunk(nil), + } + + info := statusInfo{ + c: codes.InvalidArgument, + m: msgPutMessageProblem, + } + + testStatusPut(t, nil, srv, info, ds) + }) + + t.Run("invalid first message type", func(t *testing.T) { + ds := make([]interface{}, 0) + + for _, d := range putNilObjectDetails() { + ds = append(ds, d) + } + + srv := &testPutEntity{ + res: object.MakePutRequestHeader(nil), + } + + info := statusInfo{ + c: codes.InvalidArgument, + m: msgPutNilObject, + } + + testStatusPut(t, nil, srv, info, ds) + }) + + t.Run("invalid first message type", func(t *testing.T) { + ds := make([]interface{}, 0) + + for _, d := range payloadSizeDetails() { + ds = append(ds, d) + } + + srv := &testPutEntity{ + res: object.MakePutRequestHeader(new(Object)), + } + + h := &testPutEntity{ + err: transformer.ErrPayloadEOF, + } + + info := statusInfo{ + c: codes.InvalidArgument, + m: msgCutObjectPayload, + } + + testStatusPut(t, h, srv, info, ds) + }) + + t.Run("token w/o public keys", func(t *testing.T) { + ds := make([]interface{}, 0) + + for _, d := range tokenKeysDetails() { + ds = append(ds, d) + } + + srv := &testPutEntity{ + res: object.MakePutRequestHeader(new(Object)), + } + + h := &testPutEntity{ + err: errMissingOwnerKeys, + } + + info := statusInfo{ + c: codes.PermissionDenied, + m: msgMissingTokenKeys, + } + + testStatusPut(t, h, srv, info, ds) + }) + + t.Run("broken token", func(t *testing.T) { + ds := make([]interface{}, 0) + + srv := &testPutEntity{ + res: object.MakePutRequestHeader(new(Object)), + } + + h := &testPutEntity{ + err: errBrokenToken, + } + + info := statusInfo{ + c: codes.PermissionDenied, + m: msgBrokenToken, + } + + testStatusPut(t, h, srv, info, ds) + }) + + t.Run("missing object in token", func(t *testing.T) { + ds := make([]interface{}, 0) + + for _, d := range tokenOIDDetails() { + ds = append(ds, d) + } + + srv := &testPutEntity{ + res: object.MakePutRequestHeader(new(Object)), + } + + h := &testPutEntity{ + err: errWrongTokenAddress, + } + + info := statusInfo{ + c: codes.PermissionDenied, + m: msgTokenObjectID, + } + + testStatusPut(t, h, srv, info, ds) + }) + + t.Run("object from future", func(t *testing.T) { + e := uint64(3) + + ds := make([]interface{}, 0) + + for _, d := range objectCreationEpochDetails(e) { + ds = append(ds, d) + } + + srv := &testPutEntity{ + res: object.MakePutRequestHeader(new(Object)), + } + + h := &testPutEntity{ + err: &detailedError{ + error: errObjectFromTheFuture, + d: objectCreationEpochDetails(e), + }, + } + + info := statusInfo{ + c: codes.FailedPrecondition, + m: msgObjectCreationEpoch, + } + + testStatusPut(t, h, srv, info, ds) + }) + + t.Run("max object payload size", func(t *testing.T) { + sz := uint64(3) + + ds := make([]interface{}, 0) + + for _, d := range maxObjectPayloadSizeDetails(sz) { + ds = append(ds, d) + } + + srv := &testPutEntity{ + res: object.MakePutRequestHeader(new(Object)), + } + + h := &testPutEntity{ + err: &detailedError{ + error: errObjectPayloadSize, + d: maxObjectPayloadSizeDetails(sz), + }, + } + + info := statusInfo{ + c: codes.FailedPrecondition, + m: msgObjectPayloadSize, + } + + testStatusPut(t, h, srv, info, ds) + }) + + t.Run("local storage overflow", func(t *testing.T) { + ds := make([]interface{}, 0) + + for _, d := range localStorageOverflowDetails() { + ds = append(ds, d) + } + + srv := &testPutEntity{ + res: object.MakePutRequestHeader(new(Object)), + } + + h := &testPutEntity{ + err: errLocalStorageOverflow, + } + + info := statusInfo{ + c: codes.Unavailable, + m: msgLocalStorageOverflow, + } + + testStatusPut(t, h, srv, info, ds) + }) + + t.Run("invalid payload checksum", func(t *testing.T) { + ds := make([]interface{}, 0) + + for _, d := range payloadChecksumHeaderDetails() { + ds = append(ds, d) + } + + srv := &testPutEntity{ + res: object.MakePutRequestHeader(new(Object)), + } + + h := &testPutEntity{ + err: errPayloadChecksum, + } + + info := statusInfo{ + c: codes.InvalidArgument, + m: msgPayloadChecksum, + } + + testStatusPut(t, h, srv, info, ds) + }) + + t.Run("invalid object header structure", func(t *testing.T) { + e := internal.Error("test error") + + ds := make([]interface{}, 0) + + for _, d := range objectHeadersVerificationDetails(e) { + ds = append(ds, d) + } + + srv := &testPutEntity{ + res: object.MakePutRequestHeader(new(Object)), + } + + h := &testPutEntity{ + err: &detailedError{ + error: errObjectHeadersVerification, + d: objectHeadersVerificationDetails(e), + }, + } + + info := statusInfo{ + c: codes.InvalidArgument, + m: msgObjectHeadersVerification, + } + + testStatusPut(t, h, srv, info, ds) + }) + + t.Run("put generated object failure", func(t *testing.T) { + ds := make([]interface{}, 0) + + srv := &testPutEntity{ + res: object.MakePutRequestHeader(new(Object)), + } + + h := &testPutEntity{ + err: errIncompleteOperation, + } + + info := statusInfo{ + c: codes.Unavailable, + m: msgForwardPutObject, + } + + testStatusPut(t, h, srv, info, ds) + }) + + t.Run("private token receive failure", func(t *testing.T) { + owner := OwnerID{1, 2, 3} + tokenID := session.TokenID{4, 5, 6} + + ds := make([]interface{}, 0) + + for _, d := range privateTokenRecvDetails(tokenID, owner) { + ds = append(ds, d) + } + + srv := &testPutEntity{ + res: object.MakePutRequestHeader(new(Object)), + } + + h := &testPutEntity{ + err: &detailedError{ + error: errTokenRetrieval, + d: privateTokenRecvDetails(tokenID, owner), + }, + } + + info := statusInfo{ + c: codes.Aborted, + m: msgPrivateTokenRecv, + } + + testStatusPut(t, h, srv, info, ds) + }) + + t.Run("invalid SG headers", func(t *testing.T) { + ds := make([]interface{}, 0) + + for _, d := range sgLinkingDetails() { + ds = append(ds, d) + } + + srv := &testPutEntity{ + res: object.MakePutRequestHeader(new(Object)), + } + + h := &testPutEntity{ + err: transformer.ErrInvalidSGLinking, + } + + info := statusInfo{ + c: codes.InvalidArgument, + m: msgInvalidSGLinking, + } + + testStatusPut(t, h, srv, info, ds) + }) + + t.Run("incomplete SG info", func(t *testing.T) { + ds := make([]interface{}, 0) + + srv := &testPutEntity{ + res: object.MakePutRequestHeader(new(Object)), + } + + h := &testPutEntity{ + err: implementations.ErrIncompleteSGInfo, + } + + info := statusInfo{ + c: codes.NotFound, + m: msgIncompleteSGInfo, + } + + testStatusPut(t, h, srv, info, ds) + }) + + t.Run("object transformation failure", func(t *testing.T) { + ds := make([]interface{}, 0) + + srv := &testPutEntity{ + res: object.MakePutRequestHeader(new(Object)), + } + + h := &testPutEntity{ + err: errTransformer, + } + + info := statusInfo{ + c: codes.Internal, + m: msgTransformationFailure, + } + + testStatusPut(t, h, srv, info, ds) + }) + + t.Run("wrong SG size", func(t *testing.T) { + var exp, act uint64 = 1, 2 + + ds := make([]interface{}, 0) + + for _, d := range sgSizeDetails(exp, act) { + ds = append(ds, d) + } + + srv := &testPutEntity{ + res: object.MakePutRequestHeader(new(Object)), + } + + h := &testPutEntity{ + err: &detailedError{ + error: errWrongSGSize, + d: sgSizeDetails(exp, act), + }, + } + + info := statusInfo{ + c: codes.InvalidArgument, + m: msgWrongSGSize, + } + + testStatusPut(t, h, srv, info, ds) + }) + + t.Run("wrong SG size", func(t *testing.T) { + var exp, act = Hash{1}, Hash{2} + + ds := make([]interface{}, 0) + + for _, d := range sgHashDetails(exp, act) { + ds = append(ds, d) + } + + srv := &testPutEntity{ + res: object.MakePutRequestHeader(new(Object)), + } + + h := &testPutEntity{ + err: &detailedError{ + error: errWrongSGHash, + d: sgHashDetails(exp, act), + }, + } + + info := statusInfo{ + c: codes.InvalidArgument, + m: msgWrongSGHash, + } + + testStatusPut(t, h, srv, info, ds) + }) +} + +func testStatusGet(t *testing.T, h requestHandler, srv object.Service_GetServer, info statusInfo, d []interface{}) { + s := &objectService{ + log: test.NewTestLogger(false), + requestHandler: h, + statusCalculator: serviceStatusCalculator(), + } + + err := s.Get(new(object.GetRequest), srv) + + st, ok := status.FromError(err) + require.True(t, ok) + + require.Equal(t, info.c, st.Code()) + require.Equal(t, info.m, st.Message()) + require.Equal(t, d, st.Details()) +} + +func TestStatusGet(t *testing.T) { + t.Run("object not found", func(t *testing.T) { + ds := make([]interface{}, 0) + + srv := new(testGetEntity) + + h := &testGetEntity{ + err: errIncompleteOperation, + } + + info := statusInfo{ + c: codes.NotFound, + m: msgObjectNotFound, + } + + testStatusGet(t, h, srv, info, ds) + }) + + t.Run("non-assembly", func(t *testing.T) { + ds := make([]interface{}, 0) + + srv := new(testGetEntity) + + h := &testGetEntity{ + err: errNonAssembly, + } + + info := statusInfo{ + c: codes.Unimplemented, + m: msgNonAssembly, + } + + testStatusGet(t, h, srv, info, ds) + }) + + t.Run("children not found", func(t *testing.T) { + ds := make([]interface{}, 0) + + srv := new(testGetEntity) + + h := &testGetEntity{ + err: childrenNotFound, + } + + info := statusInfo{ + c: codes.NotFound, + m: msgObjectNotFound, + } + + testStatusGet(t, h, srv, info, ds) + }) +} + +func testStatusHead(t *testing.T, h requestHandler, info statusInfo, d []interface{}) { + s := &objectService{ + log: test.NewTestLogger(false), + requestHandler: h, + statusCalculator: serviceStatusCalculator(), + } + + _, err := s.Head(context.TODO(), new(object.HeadRequest)) + + st, ok := status.FromError(err) + require.True(t, ok) + + require.Equal(t, info.c, st.Code()) + require.Equal(t, info.m, st.Message()) + require.Equal(t, d, st.Details()) +} + +func TestStatusHead(t *testing.T) { + t.Run("object not found", func(t *testing.T) { + ds := make([]interface{}, 0) + + h := &testHeadEntity{ + err: errIncompleteOperation, + } + + info := statusInfo{ + c: codes.NotFound, + m: msgObjectHeaderNotFound, + } + + testStatusHead(t, h, info, ds) + }) + + t.Run("non-assembly", func(t *testing.T) { + ds := make([]interface{}, 0) + + h := &testHeadEntity{ + err: errNonAssembly, + } + + info := statusInfo{ + c: codes.Unimplemented, + m: msgNonAssembly, + } + + testStatusHead(t, h, info, ds) + }) + + t.Run("children not found", func(t *testing.T) { + ds := make([]interface{}, 0) + + h := &testHeadEntity{ + err: childrenNotFound, + } + + info := statusInfo{ + c: codes.NotFound, + m: msgObjectHeaderNotFound, + } + + testStatusHead(t, h, info, ds) + }) +} + +func testStatusGetRange(t *testing.T, h requestHandler, srv object.Service_GetRangeServer, info statusInfo, d []interface{}) { + s := &objectService{ + log: test.NewTestLogger(false), + requestHandler: h, + statusCalculator: serviceStatusCalculator(), + } + + err := s.GetRange(new(GetRangeRequest), srv) + + st, ok := status.FromError(err) + require.True(t, ok) + + require.Equal(t, info.c, st.Code()) + require.Equal(t, info.m, st.Message()) + require.Equal(t, d, st.Details()) +} + +func TestStatusGetRange(t *testing.T) { + t.Run("payload range is out of bounds", func(t *testing.T) { + ds := make([]interface{}, 0) + + srv := new(testRangeEntity) + + h := &testRangeEntity{ + err: localstore.ErrOutOfRange, + } + + info := statusInfo{ + c: codes.OutOfRange, + m: msgPayloadOutOfRange, + } + + testStatusGetRange(t, h, srv, info, ds) + }) + + t.Run("payload range not found", func(t *testing.T) { + ds := make([]interface{}, 0) + + srv := new(testRangeEntity) + + h := &testRangeEntity{ + err: errPayloadRangeNotFound, + } + + info := statusInfo{ + c: codes.NotFound, + m: msgPayloadRangeNotFound, + } + + testStatusGetRange(t, h, srv, info, ds) + }) +} + +func testStatusDelete(t *testing.T, h requestHandler, info statusInfo, d []interface{}) { + s := &objectService{ + log: test.NewTestLogger(false), + requestHandler: h, + statusCalculator: serviceStatusCalculator(), + } + + _, err := s.Delete(context.TODO(), new(object.DeleteRequest)) + + st, ok := status.FromError(err) + require.True(t, ok) + + require.Equal(t, info.c, st.Code()) + require.Equal(t, info.m, st.Message()) + require.Equal(t, d, st.Details()) +} + +func TestStatusDelete(t *testing.T) { + t.Run("missing token", func(t *testing.T) { + ds := make([]interface{}, 0) + + for _, d := range missingTokenDetails() { + ds = append(ds, d) + } + + h := &testHeadEntity{ + err: errNilToken, + } + + info := statusInfo{ + c: codes.InvalidArgument, + m: msgMissingToken, + } + + testStatusDelete(t, h, info, ds) + }) + + t.Run("missing public keys in token", func(t *testing.T) { + ds := make([]interface{}, 0) + + for _, d := range tokenKeysDetails() { + ds = append(ds, d) + } + + h := &testHeadEntity{ + err: errMissingOwnerKeys, + } + + info := statusInfo{ + c: codes.PermissionDenied, + m: msgMissingTokenKeys, + } + + testStatusDelete(t, h, info, ds) + }) + + t.Run("broken token structure", func(t *testing.T) { + ds := make([]interface{}, 0) + + h := &testHeadEntity{ + err: errBrokenToken, + } + + info := statusInfo{ + c: codes.PermissionDenied, + m: msgBrokenToken, + } + + testStatusDelete(t, h, info, ds) + }) + + t.Run("missing object ID in token", func(t *testing.T) { + ds := make([]interface{}, 0) + + for _, d := range tokenOIDDetails() { + ds = append(ds, d) + } + + h := &testHeadEntity{ + err: errWrongTokenAddress, + } + + info := statusInfo{ + c: codes.PermissionDenied, + m: msgTokenObjectID, + } + + testStatusDelete(t, h, info, ds) + }) + + t.Run("private token receive", func(t *testing.T) { + ds := make([]interface{}, 0) + + h := &testHeadEntity{ + err: errTokenRetrieval, + } + + info := statusInfo{ + c: codes.Aborted, + m: msgPrivateTokenRecv, + } + + testStatusDelete(t, h, info, ds) + }) + + t.Run("incomplete tombstone put", func(t *testing.T) { + ds := make([]interface{}, 0) + + h := &testHeadEntity{ + err: errIncompleteOperation, + } + + info := statusInfo{ + c: codes.Unavailable, + m: msgPutTombstone, + } + + testStatusDelete(t, h, info, ds) + }) + + t.Run("delete preparation failure", func(t *testing.T) { + ds := make([]interface{}, 0) + + h := &testHeadEntity{ + err: errDeletePrepare, + } + + info := statusInfo{ + c: codes.Internal, + m: msgDeletePrepare, + } + + testStatusDelete(t, h, info, ds) + }) +} + +func testStatusSearch(t *testing.T, h requestHandler, srv object.Service_SearchServer, info statusInfo, d []interface{}) { + s := &objectService{ + log: test.NewTestLogger(false), + requestHandler: h, + statusCalculator: serviceStatusCalculator(), + } + + err := s.Search(new(object.SearchRequest), srv) + + st, ok := status.FromError(err) + require.True(t, ok) + + require.Equal(t, info.c, st.Code()) + require.Equal(t, info.m, st.Message()) + require.Equal(t, d, st.Details()) +} + +func TestStatusSearch(t *testing.T) { + t.Run("unsupported query version", func(t *testing.T) { + ds := make([]interface{}, 0) + + srv := new(testSearchEntity) + + h := &testSearchEntity{ + err: errUnsupportedQueryVersion, + } + + info := statusInfo{ + c: codes.Unimplemented, + m: msgQueryVersion, + } + + testStatusSearch(t, h, srv, info, ds) + }) + + t.Run("query unmarshal failure", func(t *testing.T) { + ds := make([]interface{}, 0) + + srv := new(testSearchEntity) + + h := &testSearchEntity{ + err: errSearchQueryUnmarshal, + } + + info := statusInfo{ + c: codes.InvalidArgument, + m: msgSearchQueryUnmarshal, + } + + testStatusSearch(t, h, srv, info, ds) + }) + + t.Run("query imposing problems", func(t *testing.T) { + ds := make([]interface{}, 0) + + srv := new(testSearchEntity) + + h := &testSearchEntity{ + err: errLocalQueryImpose, + } + + info := statusInfo{ + c: codes.Internal, + m: msgLocalQueryImpose, + } + + testStatusSearch(t, h, srv, info, ds) + }) +} + +func testStatusGetRangeHash(t *testing.T, h requestHandler, info statusInfo, d []interface{}) { + s := &objectService{ + log: test.NewTestLogger(false), + requestHandler: h, + statusCalculator: serviceStatusCalculator(), + } + + _, err := s.GetRangeHash(context.TODO(), new(object.GetRangeHashRequest)) + + st, ok := status.FromError(err) + require.True(t, ok) + + require.Equal(t, info.c, st.Code()) + require.Equal(t, info.m, st.Message()) + require.Equal(t, d, st.Details()) +} + +func TestStatusGetRangeHash(t *testing.T) { + t.Run("payload range not found", func(t *testing.T) { + ds := make([]interface{}, 0) + + h := &testRangeEntity{ + err: errPayloadRangeNotFound, + } + + info := statusInfo{ + c: codes.NotFound, + m: msgPayloadRangeNotFound, + } + + testStatusGetRangeHash(t, h, info, ds) + }) + + t.Run("range out-of-bounds", func(t *testing.T) { + ds := make([]interface{}, 0) + + h := &testRangeEntity{ + err: localstore.ErrOutOfRange, + } + + info := statusInfo{ + c: codes.OutOfRange, + m: msgPayloadOutOfRange, + } + + testStatusGetRangeHash(t, h, info, ds) + }) +} diff --git a/services/public/object/token.go b/services/public/object/token.go new file mode 100644 index 000000000..81c543700 --- /dev/null +++ b/services/public/object/token.go @@ -0,0 +1,107 @@ +package object + +import ( + "context" + "crypto/ecdsa" + + "github.com/nspcc-dev/neofs-api-go/service" + crypto "github.com/nspcc-dev/neofs-crypto" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/core" +) + +type sessionTokenVerifier interface { + verifySessionToken(context.Context, service.SessionToken) error +} + +type complexTokenVerifier struct { + verifiers []sessionTokenVerifier +} + +type tokenSignatureVerifier struct { + ownerKeys []*ecdsa.PublicKey +} + +type tokenEpochsVerifier struct { + epochRecv EpochReceiver +} + +type tokenPreProcessor struct { + keyVerifier core.OwnerKeyVerifier + + staticVerifier sessionTokenVerifier +} + +const errCreatedAfterExpiration = internal.Error("creation epoch number is greater than expired one") + +const errTokenExpired = internal.Error("token is expired") + +const errForbiddenSpawn = internal.Error("request spawn is forbidden") + +func (s tokenPreProcessor) preProcess(ctx context.Context, req serviceRequest) error { + token := req.GetSessionToken() + if token == nil { + return nil + } + + if !allowedSpawn(token.GetVerb(), req.Type()) { + return errForbiddenSpawn + } + + if err := s.keyVerifier.VerifyKey(ctx, token); err != nil { + return err + } + + ownerKeyBytes := token.GetOwnerKey() + + verifier := newComplexTokenVerifier( + s.staticVerifier, + &tokenSignatureVerifier{ + ownerKeys: []*ecdsa.PublicKey{ + crypto.UnmarshalPublicKey(ownerKeyBytes), + }, + }, + ) + + return verifier.verifySessionToken(ctx, token) +} + +func newComplexTokenVerifier(verifiers ...sessionTokenVerifier) sessionTokenVerifier { + return &complexTokenVerifier{ + verifiers: verifiers, + } +} + +func (s complexTokenVerifier) verifySessionToken(ctx context.Context, token service.SessionToken) error { + for i := range s.verifiers { + if s.verifiers[i] == nil { + continue + } else if err := s.verifiers[i].verifySessionToken(ctx, token); err != nil { + return err + } + } + + return nil +} + +func (s tokenSignatureVerifier) verifySessionToken(ctx context.Context, token service.SessionToken) error { + verifiedToken := service.NewVerifiedSessionToken(token) + + for i := range s.ownerKeys { + if err := service.VerifySignatureWithKey(s.ownerKeys[i], verifiedToken); err != nil { + return err + } + } + + return nil +} + +func (s tokenEpochsVerifier) verifySessionToken(ctx context.Context, token service.SessionToken) error { + if expired := token.ExpirationEpoch(); token.CreationEpoch() > expired { + return errCreatedAfterExpiration + } else if s.epochRecv.Epoch() > expired { + return errTokenExpired + } + + return nil +} diff --git a/services/public/object/token_test.go b/services/public/object/token_test.go new file mode 100644 index 000000000..e07ccb858 --- /dev/null +++ b/services/public/object/token_test.go @@ -0,0 +1,156 @@ +package object + +import ( + "context" + "errors" + "testing" + + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/stretchr/testify/require" +) + +// Entity for mocking interfaces. +// Implementation of any interface intercepts arguments via f (if not nil). +// If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. +type testTokenEntity struct { + // Set of interfaces which testCommonEntity must implement, but some methods from those does not call. + + // Argument interceptor. Used for ascertain of correct parameter passage between components. + f func(...interface{}) + // Mocked result of any interface. + res interface{} + // Mocked error of any interface. + err error +} + +func (s testTokenEntity) VerifyKey(_ context.Context, p core.OwnerKeyContainer) error { + if s.f != nil { + s.f(p) + } + return s.err +} + +func (s testTokenEntity) Epoch() uint64 { + return s.res.(uint64) +} + +func (s testTokenEntity) verifySessionToken(_ context.Context, token service.SessionToken) error { + if s.f != nil { + s.f(token) + } + return s.err +} + +func TestTokenPreProcessor(t *testing.T) { + ctx := context.TODO() + + t.Run("nil token", func(t *testing.T) { + var req serviceRequest = new(object.PutRequest) + require.Nil(t, req.GetSessionToken()) + + s := new(tokenPreProcessor) + + require.NoError(t, s.preProcess(ctx, req)) + }) + + t.Run("forbidden spawn", func(t *testing.T) { + token := new(service.Token) + + req := new(object.PutRequest) + req.SetToken(token) + + token.SetVerb(service.Token_Info_Get) + + s := new(tokenPreProcessor) + + require.EqualError(t, s.preProcess(ctx, req), errForbiddenSpawn.Error()) + }) + + t.Run("owner key verifier failure", func(t *testing.T) { + verifierErr := errors.New("test error for key verifier") + + owner := OwnerID{1, 2, 3} + token := new(service.Token) + token.SetOwnerID(owner) + + req := new(object.PutRequest) + req.SetToken(token) + + s := &tokenPreProcessor{ + keyVerifier: &testTokenEntity{ + f: func(items ...interface{}) { + require.Equal(t, token, items[0]) + }, + err: verifierErr, + }, + } + + require.EqualError(t, s.preProcess(ctx, req), verifierErr.Error()) + }) + + t.Run("static verifier error", func(t *testing.T) { + vErr := errors.New("test error for static verifier") + + owner := OwnerID{1, 2, 3} + token := new(service.Token) + token.SetOwnerID(owner) + + req := new(object.PutRequest) + req.SetToken(token) + + s := &tokenPreProcessor{ + keyVerifier: new(testTokenEntity), + staticVerifier: &testTokenEntity{ + f: func(items ...interface{}) { + require.Equal(t, token, items[0]) + }, + err: vErr, + }, + } + + require.EqualError(t, s.preProcess(ctx, req), vErr.Error()) + }) +} + +func TestTokenEpochsVerifier(t *testing.T) { + ctx := context.TODO() + + t.Run("created after expiration", func(t *testing.T) { + token := new(service.Token) + token.SetExpirationEpoch(1) + token.SetCreationEpoch(token.ExpirationEpoch() + 1) + + s := new(tokenEpochsVerifier) + + require.EqualError(t, s.verifySessionToken(ctx, token), errCreatedAfterExpiration.Error()) + }) + + t.Run("expired token", func(t *testing.T) { + token := new(service.Token) + token.SetExpirationEpoch(1) + + s := &tokenEpochsVerifier{ + epochRecv: &testTokenEntity{ + res: token.ExpirationEpoch() + 1, + }, + } + + require.EqualError(t, s.verifySessionToken(ctx, token), errTokenExpired.Error()) + }) + + t.Run("valid token", func(t *testing.T) { + token := new(service.Token) + token.SetCreationEpoch(1) + token.SetExpirationEpoch(token.CreationEpoch() + 1) + + s := &tokenEpochsVerifier{ + epochRecv: &testTokenEntity{ + res: token.ExpirationEpoch() - 1, + }, + } + + require.NoError(t, s.verifySessionToken(ctx, token)) + }) +} diff --git a/services/public/object/transport_implementations.go b/services/public/object/transport_implementations.go new file mode 100644 index 000000000..3c85ce057 --- /dev/null +++ b/services/public/object/transport_implementations.go @@ -0,0 +1,743 @@ +package object + +import ( + "bytes" + "context" + "crypto/ecdsa" + "fmt" + "io" + "time" + + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-api-go/session" + crypto "github.com/nspcc-dev/neofs-crypto" + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/nspcc-dev/neofs-node/lib/transport" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +type ( + // MultiTransportParams groups the parameters for object transport component's constructor. + MultiTransportParams struct { + AddressStore implementations.AddressStoreComponent + EpochReceiver EpochReceiver + RemoteService RemoteService + Logger *zap.Logger + Key *ecdsa.PrivateKey + PutTimeout time.Duration + GetTimeout time.Duration + HeadTimeout time.Duration + SearchTimeout time.Duration + RangeHashTimeout time.Duration + DialTimeout time.Duration + + PrivateTokenStore session.PrivateTokenStore + } + + transportComponent struct { + reqSender requestSender + + resTracker resultTracker + + getCaller remoteProcessCaller + putCaller remoteProcessCaller + headCaller remoteProcessCaller + rangeCaller remoteProcessCaller + rangeHashCaller remoteProcessCaller + searchCaller remoteProcessCaller + } + + requestSender interface { + sendRequest(context.Context, sendParams) (interface{}, error) + } + + sendParams struct { + req transport.MetaInfo + node multiaddr.Multiaddr + handler remoteProcessCaller + } + + clientInfo struct { + sc object.ServiceClient + key *ecdsa.PublicKey + } + + remoteProcessCaller interface { + call(context.Context, serviceRequest, *clientInfo) (interface{}, error) + } + + getCaller struct { + } + + putCaller struct { + } + + headCaller struct { + } + + rangeCaller struct { + } + + rangeHashCaller struct { + } + + searchCaller struct { + } + + coreRequestSender struct { + requestPrep transportRequestPreparer + addressStore implementations.AddressStoreComponent + remoteService RemoteService + + putTimeout time.Duration + getTimeout time.Duration + searchTimeout time.Duration + headTimeout time.Duration + rangeHashTimeout time.Duration + dialTimeout time.Duration + } + + signingFunc func(*ecdsa.PrivateKey, service.RequestSignedData) error + + coreRequestPreparer struct { + epochRecv EpochReceiver + key *ecdsa.PrivateKey + signingFunc signingFunc + + privateTokenStore session.PrivateTokenSource + } + + transportRequestPreparer interface { + prepareRequest(transport.MetaInfo) (serviceRequest, error) + } + + transportRequest struct { + serviceRequest + timeout time.Duration + } + + putRequestSequence struct { + *object.PutRequest + chunks []*object.PutRequest + } + + rawMetaInfo struct { + raw bool + ttl uint32 + timeout time.Duration + token service.SessionToken + rt object.RequestType + bearer service.BearerToken + extHdrs []service.ExtendedHeader + } + + rawAddrInfo struct { + *rawMetaInfo + addr Address + } +) + +const ( + minRemoteRequestTimeout = 5 * time.Second + minDialTimeout = 500 * time.Millisecond +) + +const pmWrongRequestType = "unknown type: %T" + +var ( + _ serviceRequest = (*putRequestSequence)(nil) + _ transport.MetaInfo = (*transportRequest)(nil) + _ requestSender = (*coreRequestSender)(nil) + _ transport.ObjectTransport = (*transportComponent)(nil) + _ transportRequestPreparer = (*coreRequestPreparer)(nil) + _ transport.MetaInfo = (*rawMetaInfo)(nil) + _ transport.AddressInfo = (*rawAddrInfo)(nil) + + _ remoteProcessCaller = (*getCaller)(nil) + _ remoteProcessCaller = (*putCaller)(nil) + _ remoteProcessCaller = (*headCaller)(nil) + _ remoteProcessCaller = (*searchCaller)(nil) + _ remoteProcessCaller = (*rangeCaller)(nil) + _ remoteProcessCaller = (*rangeHashCaller)(nil) +) + +func newRawMetaInfo() *rawMetaInfo { + return new(rawMetaInfo) +} + +func (s *rawMetaInfo) GetTTL() uint32 { + return s.ttl +} + +func (s *rawMetaInfo) setTTL(ttl uint32) { + s.ttl = ttl +} + +func (s *rawMetaInfo) GetTimeout() time.Duration { + return s.timeout +} + +func (s *rawMetaInfo) setTimeout(dur time.Duration) { + s.timeout = dur +} + +func (s *rawMetaInfo) GetSessionToken() service.SessionToken { + return s.token +} + +func (s *rawMetaInfo) setSessionToken(token service.SessionToken) { + s.token = token +} + +func (s *rawMetaInfo) GetBearerToken() service.BearerToken { + return s.bearer +} + +func (s *rawMetaInfo) setBearerToken(token service.BearerToken) { + s.bearer = token +} + +func (s *rawMetaInfo) ExtendedHeaders() []service.ExtendedHeader { + return s.extHdrs +} + +func (s *rawMetaInfo) setExtendedHeaders(v []service.ExtendedHeader) { + s.extHdrs = v +} + +func (s *rawMetaInfo) GetRaw() bool { + return s.raw +} + +func (s *rawMetaInfo) setRaw(raw bool) { + s.raw = raw +} + +func (s *rawMetaInfo) Type() object.RequestType { + return s.rt +} + +func (s *rawMetaInfo) setType(rt object.RequestType) { + s.rt = rt +} + +func (s *rawAddrInfo) GetAddress() Address { + return s.addr +} + +func (s *rawAddrInfo) setAddress(addr Address) { + s.addr = addr +} + +func (s *rawAddrInfo) getMetaInfo() *rawMetaInfo { + return s.rawMetaInfo +} + +func (s *rawAddrInfo) setMetaInfo(v *rawMetaInfo) { + s.rawMetaInfo = v +} + +func newRawAddressInfo() *rawAddrInfo { + res := new(rawAddrInfo) + + res.setMetaInfo(newRawMetaInfo()) + + return res +} + +func (s *transportRequest) GetTimeout() time.Duration { return s.timeout } + +func (s *transportComponent) Transport(ctx context.Context, p transport.ObjectTransportParams) { + res, err := s.sendRequest(ctx, p.TransportInfo, p.TargetNode) + p.ResultHandler.HandleResult(ctx, p.TargetNode, res, err) + + go s.resTracker.trackResult(ctx, resultItems{ + requestType: p.TransportInfo.Type(), + node: p.TargetNode, + satisfactory: err == nil, + }) +} + +func (s *transportComponent) sendRequest(ctx context.Context, reqInfo transport.MetaInfo, node multiaddr.Multiaddr) (interface{}, error) { + p := sendParams{ + req: reqInfo, + node: node, + } + + switch reqInfo.Type() { + case object.RequestSearch: + p.handler = s.searchCaller + case object.RequestPut: + p.handler = s.putCaller + case object.RequestHead: + p.handler = s.headCaller + case object.RequestGet: + p.handler = s.getCaller + case object.RequestRangeHash: + p.handler = s.rangeHashCaller + case object.RequestRange: + p.handler = s.rangeCaller + default: + panic(fmt.Sprintf(pmWrongRequestType, reqInfo)) + } + + return s.reqSender.sendRequest(ctx, p) +} + +func (s *searchCaller) call(ctx context.Context, r serviceRequest, c *clientInfo) (interface{}, error) { + cSearch, err := c.sc.Search(ctx, r.(*object.SearchRequest)) + if err != nil { + return nil, err + } + + res := make([]Address, 0) + + for { + r, err := cSearch.Recv() + if err != nil { + if err == io.EOF { + break + } + + return nil, err + } + + res = append(res, r.Addresses...) + } + + return res, nil +} + +func (s *rangeHashCaller) call(ctx context.Context, r serviceRequest, c *clientInfo) (interface{}, error) { + resp, err := c.sc.GetRangeHash(ctx, r.(*object.GetRangeHashRequest)) + if err != nil { + return nil, err + } + + return resp.Hashes, nil +} + +func (s *rangeCaller) call(ctx context.Context, r serviceRequest, c *clientInfo) (interface{}, error) { + req := r.(*GetRangeRequest) + + resp, err := c.sc.GetRange(ctx, req) + if err != nil { + return nil, err + } + + data := make([]byte, 0, req.Range.Length) + + for { + resp, err := resp.Recv() + if err != nil { + if err == io.EOF { + break + } + + return nil, err + } + + data = append(data, resp.Fragment...) + } + + return bytes.NewReader(data), nil +} + +func (s *headCaller) call(ctx context.Context, r serviceRequest, c *clientInfo) (interface{}, error) { + resp, err := c.sc.Head(ctx, r.(*object.HeadRequest)) + if err != nil { + return nil, err + } + + return resp.Object, nil +} + +func (s *getCaller) call(ctx context.Context, r serviceRequest, c *clientInfo) (interface{}, error) { + getClient, err := c.sc.Get(ctx, r.(*object.GetRequest)) + if err != nil { + return nil, err + } + + resp, err := getClient.Recv() + if err != nil { + return nil, err + } + + obj := resp.GetObject() + + if resp.NotFull() { + obj.Payload = make([]byte, 0, obj.SystemHeader.PayloadLength) + + for { + resp, err := getClient.Recv() + if err != nil { + if err == io.EOF { + break + } + + return nil, errors.Wrap(err, "get object received error") + } + + obj.Payload = append(obj.Payload, resp.GetChunk()...) + } + } + + return obj, nil +} + +func (s *putCaller) call(ctx context.Context, r serviceRequest, c *clientInfo) (interface{}, error) { + putClient, err := c.sc.Put(ctx) + if err != nil { + return nil, err + } + + req := r.(*putRequestSequence) + + if err := putClient.Send(req.PutRequest); err != nil { + return nil, err + } + + for i := range req.chunks { + if err := putClient.Send(req.chunks[i]); err != nil { + return nil, err + } + } + + resp, err := putClient.CloseAndRecv() + if err != nil { + return nil, err + } + + return &resp.Address, nil +} + +func (s *coreRequestPreparer) prepareRequest(req transport.MetaInfo) (serviceRequest, error) { + var ( + signed bool + tr *transportRequest + r serviceRequest + ) + + if tr, signed = req.(*transportRequest); signed { + r = tr.serviceRequest + } else { + switch req.Type() { + case object.RequestSearch: + r = prepareSearchRequest(req.(transport.SearchInfo)) + case object.RequestPut: + r = preparePutRequest(req.(transport.PutInfo)) + case object.RequestGet: + r = prepareGetRequest(req.(transport.GetInfo)) + case object.RequestHead: + r = prepareHeadRequest(req.(transport.HeadInfo)) + case object.RequestRange: + r = prepareRangeRequest(req.(transport.RangeInfo)) + case object.RequestRangeHash: + r = prepareRangeHashRequest(req.(transport.RangeHashInfo)) + default: + panic(fmt.Sprintf(pmWrongRequestType, req)) + } + } + + r.SetTTL(req.GetTTL()) + r.SetEpoch(s.epochRecv.Epoch()) + r.SetRaw(req.GetRaw()) + r.SetBearer( + toBearerMessage( + req.GetBearerToken(), + ), + ) + r.SetHeaders( + toExtendedHeaderMessages( + req.ExtendedHeaders(), + ), + ) + + if signed { + return r, nil + } + + key := s.key + + if token := req.GetSessionToken(); token != nil { + /* FIXME: here we must determine whether the node is trusted, + and if so, sign the request with a session key. + In current implementation trusted node may lose its reputation + in case of sending user requests in a nonexistent session. + */ + r.SetToken(toTokenMessage(token)) + + privateTokenKey := session.PrivateTokenKey{} + privateTokenKey.SetTokenID(token.GetID()) + privateTokenKey.SetOwnerID(token.GetOwnerID()) + + pToken, err := s.privateTokenStore.Fetch(privateTokenKey) + if err == nil { + if err := signRequest(pToken.PrivateKey(), r); err != nil { + return nil, err + } + } + } + + return r, signRequest(key, r) +} + +func toTokenMessage(token service.SessionToken) *service.Token { + if token == nil { + return nil + } else if v, ok := token.(*service.Token); ok { + return v + } + + res := new(service.Token) + + res.SetID(token.GetID()) + res.SetOwnerID(token.GetOwnerID()) + res.SetVerb(token.GetVerb()) + res.SetAddress(token.GetAddress()) + res.SetCreationEpoch(token.CreationEpoch()) + res.SetExpirationEpoch(token.ExpirationEpoch()) + res.SetSessionKey(token.GetSessionKey()) + res.SetSignature(token.GetSignature()) + + return res +} + +func toBearerMessage(token service.BearerToken) *service.BearerTokenMsg { + if token == nil { + return nil + } else if v, ok := token.(*service.BearerTokenMsg); ok { + return v + } + + res := new(service.BearerTokenMsg) + + res.SetACLRules(token.GetACLRules()) + res.SetOwnerID(token.GetOwnerID()) + res.SetExpirationEpoch(token.ExpirationEpoch()) + res.SetOwnerKey(token.GetOwnerKey()) + res.SetSignature(token.GetSignature()) + + return res +} + +func toExtendedHeaderMessages(hs []service.ExtendedHeader) []service.RequestExtendedHeader_KV { + res := make([]service.RequestExtendedHeader_KV, 0, len(hs)) + + for i := range hs { + if hs[i] == nil { + continue + } + + h := service.RequestExtendedHeader_KV{} + h.SetK(hs[i].Key()) + h.SetV(hs[i].Value()) + + res = append(res, h) + } + + return res +} + +func signRequest(key *ecdsa.PrivateKey, req serviceRequest) error { + signKeys := req.GetSignKeyPairs() + ln := len(signKeys) + + // TODO: public key bytes can be stored in struct once + if ln > 0 && bytes.Equal( + crypto.MarshalPublicKey(signKeys[ln-1].GetPublicKey()), + crypto.MarshalPublicKey(&key.PublicKey), + ) { + return nil + } + + return requestSignFunc(key, req) +} + +// TODO: write docs, write tests. +func prepareSearchRequest(req transport.SearchInfo) serviceRequest { + return &object.SearchRequest{ + ContainerID: req.GetCID(), + Query: req.GetQuery(), + QueryVersion: 1, + } +} + +func prepareGetRequest(req transport.GetInfo) serviceRequest { + return &object.GetRequest{ + Address: req.GetAddress(), + } +} + +func prepareHeadRequest(req transport.HeadInfo) serviceRequest { + return &object.HeadRequest{ + Address: req.GetAddress(), + FullHeaders: req.GetFullHeaders(), + } +} + +func preparePutRequest(req transport.PutInfo) serviceRequest { + obj := req.GetHead() + chunks := splitBytes(obj.Payload, maxGetPayloadSize) + + // copy object to save payload of initial object unchanged + nObj := new(Object) + *nObj = *obj + nObj.Payload = nil + + res := &putRequestSequence{ + PutRequest: object.MakePutRequestHeader(nObj), + chunks: make([]*object.PutRequest, 0, len(chunks)), + } + + // TODO: think about chunk messages signing + for i := range chunks { + res.chunks = append(res.chunks, object.MakePutRequestChunk(chunks[i])) + } + + return res +} + +func prepareRangeHashRequest(req transport.RangeHashInfo) serviceRequest { + return &object.GetRangeHashRequest{ + Address: req.GetAddress(), + Ranges: req.GetRanges(), + Salt: req.GetSalt(), + } +} + +func prepareRangeRequest(req transport.RangeInfo) serviceRequest { + return &GetRangeRequest{ + Address: req.GetAddress(), + Range: req.GetRange(), + } +} + +// TODO: write docs, write tests. +func (s *coreRequestSender) defaultTimeout(req transport.MetaInfo) time.Duration { + switch req.Type() { + case object.RequestSearch: + return s.searchTimeout + case object.RequestPut: + return s.putTimeout + case object.RequestGet: + return s.getTimeout + case object.RequestHead: + return s.headTimeout + case object.RequestRangeHash: + return s.rangeHashTimeout + } + + return minRemoteRequestTimeout +} + +// TODO: write docs, write tests. +func (s *coreRequestSender) sendRequest(ctx context.Context, p sendParams) (interface{}, error) { + var err error + + if p.node == nil { + if p.node, err = s.addressStore.SelfAddr(); err != nil { + return nil, err + } + } + + timeout := p.req.GetTimeout() + if timeout <= 0 { + timeout = s.defaultTimeout(p.req) + } + + r, err := s.requestPrep.prepareRequest(p.req) + if err != nil { + return nil, err + } + + dialCtx, cancel := context.WithTimeout(ctx, s.dialTimeout) + + c, err := s.remoteService.Remote(dialCtx, p.node) + + cancel() + + if err != nil { + return nil, err + } + + ctx, cancel = context.WithTimeout(ctx, timeout) + defer cancel() + + return p.handler.call(ctx, r, &clientInfo{ + sc: c, + key: s.addressStore.PublicKey(p.node), + }) +} + +// NewMultiTransport is an object transport component's constructor. +func NewMultiTransport(p MultiTransportParams) (transport.ObjectTransport, error) { + switch { + case p.RemoteService == nil: + return nil, errEmptyGRPC + case p.AddressStore == nil: + return nil, errEmptyAddress + case p.Logger == nil: + return nil, errEmptyLogger + case p.EpochReceiver == nil: + return nil, errEmptyEpochReceiver + case p.Key == nil: + return nil, errEmptyPrivateKey + case p.PrivateTokenStore == nil: + return nil, errEmptyTokenStore + } + + if p.PutTimeout <= 0 { + p.PutTimeout = minRemoteRequestTimeout + } + + if p.GetTimeout <= 0 { + p.GetTimeout = minRemoteRequestTimeout + } + + if p.HeadTimeout <= 0 { + p.HeadTimeout = minRemoteRequestTimeout + } + + if p.SearchTimeout <= 0 { + p.SearchTimeout = minRemoteRequestTimeout + } + + if p.RangeHashTimeout <= 0 { + p.RangeHashTimeout = minRemoteRequestTimeout + } + + if p.DialTimeout <= 0 { + p.DialTimeout = minDialTimeout + } + + return &transportComponent{ + reqSender: &coreRequestSender{ + requestPrep: &coreRequestPreparer{ + epochRecv: p.EpochReceiver, + key: p.Key, + signingFunc: requestSignFunc, + + privateTokenStore: p.PrivateTokenStore, + }, + addressStore: p.AddressStore, + remoteService: p.RemoteService, + putTimeout: p.PutTimeout, + getTimeout: p.GetTimeout, + searchTimeout: p.SearchTimeout, + headTimeout: p.HeadTimeout, + rangeHashTimeout: p.RangeHashTimeout, + dialTimeout: p.DialTimeout, + }, + resTracker: &idleResultTracker{}, + getCaller: &getCaller{}, + putCaller: &putCaller{}, + headCaller: &headCaller{}, + rangeCaller: &rangeCaller{}, + rangeHashCaller: &rangeHashCaller{}, + searchCaller: &searchCaller{}, + }, nil +} diff --git a/services/public/object/transport_test.go b/services/public/object/transport_test.go new file mode 100644 index 000000000..74ae2899a --- /dev/null +++ b/services/public/object/transport_test.go @@ -0,0 +1,76 @@ +package object + +import ( + "context" + "testing" + + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" +) + +type ( + // Entity for mocking interfaces. + // Implementation of any interface intercepts arguments via f (if not nil). + // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. + testTransportEntity struct { + // Set of interfaces which entity must implement, but some methods from those does not call. + object.ServiceClient + object.Service_PutClient + + // Argument interceptor. Used for ascertain of correct parameter passage between components. + f func(...interface{}) + // Mocked result of any interface. + res interface{} + // Mocked error of any interface. + err error + } +) + +var ( + _ object.ServiceClient = (*testTransportEntity)(nil) + _ object.Service_PutClient = (*testTransportEntity)(nil) +) + +func (s *testTransportEntity) Send(*object.PutRequest) error { return s.err } + +func (s *testTransportEntity) CloseAndRecv() (*object.PutResponse, error) { + if s.err != nil { + return nil, s.err + } + return s.res.(*object.PutResponse), nil +} + +func (s *testTransportEntity) Put(ctx context.Context, opts ...grpc.CallOption) (object.Service_PutClient, error) { + if s.err != nil { + return nil, s.err + } + return s.res.(object.Service_PutClient), nil +} + +func Test_putHandler(t *testing.T) { + ctx := context.TODO() + + t.Run("return type correctness", func(t *testing.T) { + addr := new(Address) + *addr = testObjectAddress(t) + + srvClient := &testTransportEntity{ + res: &testTransportEntity{ + res: &object.PutResponse{ + Address: *addr, + }, + }, + } + + putC := &putCaller{} + + res, err := putC.call(ctx, &putRequestSequence{PutRequest: new(object.PutRequest)}, &clientInfo{ + sc: srvClient, + }) + require.NoError(t, err) + + // ascertain that value returns as expected + require.Equal(t, addr, res) + }) +} diff --git a/services/public/object/traverse.go b/services/public/object/traverse.go new file mode 100644 index 000000000..38ec9d8b8 --- /dev/null +++ b/services/public/object/traverse.go @@ -0,0 +1,186 @@ +package object + +import ( + "context" + "sync" + + "github.com/nspcc-dev/neofs-api-go/container" + "github.com/nspcc-dev/neofs-node/lib/implementations" + + "github.com/multiformats/go-multiaddr" + "github.com/pkg/errors" +) + +type ( + containerTraverser interface { + implementations.Traverser + add(multiaddr.Multiaddr, bool) + done(multiaddr.Multiaddr) bool + finished() bool + close() + Err() error + } + + placementBuilder interface { + buildPlacement(context.Context, Address, ...multiaddr.Multiaddr) ([]multiaddr.Multiaddr, error) + } + + traverseParams struct { + tryPrevNM bool + addr Address + curPlacementBuilder placementBuilder + prevPlacementBuilder placementBuilder + maxRecycleCount int + stopCount int + } + + coreTraverser struct { + closed bool + + usePrevNM bool + + recycleNum int + + *sync.RWMutex + traverseParams + failed []multiaddr.Multiaddr + mDone map[string]struct{} + err error + } +) + +var ( + _ placementBuilder = (*corePlacementUtil)(nil) + _ containerTraverser = (*coreTraverser)(nil) +) + +func (s *coreTraverser) Next(ctx context.Context) []multiaddr.Multiaddr { + if s.isClosed() || s.finished() { + return nil + } + + s.Lock() + defer s.Unlock() + + return s.next(ctx) +} + +func minInt(a, b int) int { + if a < b { + return a + } + + return b +} + +func (s *coreTraverser) next(ctx context.Context) (nodes []multiaddr.Multiaddr) { + defer func() { + if s.stopCount == 0 { + s.stopCount = len(nodes) + } + + if s.stopCount > 0 { + nodes = nodes[:minInt( + s.stopCount-len(s.mDone), + len(nodes), + )] + } + }() + + var placeBuilder = s.curPlacementBuilder + if s.usePrevNM { + placeBuilder = s.prevPlacementBuilder + } + + nodes, s.err = placeBuilder.buildPlacement(ctx, s.addr, s.failed...) + if errors.Is(errors.Cause(s.err), container.ErrNotFound) { + return + } + + for i := 0; i < len(nodes); i++ { + if _, ok := s.mDone[nodes[i].String()]; ok { + nodes = append(nodes[:i], nodes[i+1:]...) + i-- + } + + continue + } + + if len(nodes) == 0 { + if !s.usePrevNM && s.tryPrevNM { + s.usePrevNM = true + return s.next(ctx) + } + + if s.recycleNum < s.maxRecycleCount { + s.reset() + return s.next(ctx) + } + } + + return nodes +} + +func (s *coreTraverser) reset() { + s.usePrevNM = false + s.failed = s.failed[:0] + s.recycleNum++ +} + +func (s *coreTraverser) add(node multiaddr.Multiaddr, ok bool) { + s.Lock() + if ok { + s.mDone[node.String()] = struct{}{} + } else { + s.failed = append(s.failed, node) + } + s.Unlock() +} + +func (s *coreTraverser) done(node multiaddr.Multiaddr) bool { + s.RLock() + _, ok := s.mDone[node.String()] + s.RUnlock() + + return ok +} + +func (s *coreTraverser) close() { + s.Lock() + s.closed = true + s.Unlock() +} + +func (s *coreTraverser) isClosed() bool { + s.RLock() + defer s.RUnlock() + + return s.closed +} + +func (s *coreTraverser) finished() bool { + s.RLock() + defer s.RUnlock() + + return s.stopCount > 0 && len(s.mDone) >= s.stopCount +} + +func (s *coreTraverser) Err() error { + s.RLock() + defer s.RUnlock() + + return s.err +} + +func newContainerTraverser(p *traverseParams) containerTraverser { + return &coreTraverser{ + RWMutex: new(sync.RWMutex), + traverseParams: *p, + failed: make([]multiaddr.Multiaddr, 0), + mDone: make(map[string]struct{}), + } +} + +func (s *corePlacementUtil) buildPlacement(ctx context.Context, addr Address, excl ...multiaddr.Multiaddr) ([]multiaddr.Multiaddr, error) { + return s.placementBuilder.GetNodes(ctx, addr, s.prevNetMap, excl...) +} diff --git a/services/public/object/traverse_test.go b/services/public/object/traverse_test.go new file mode 100644 index 000000000..93462b20b --- /dev/null +++ b/services/public/object/traverse_test.go @@ -0,0 +1,378 @@ +package object + +import ( + "context" + "strconv" + "sync" + "testing" + + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +type ( + // Entity for mocking interfaces. + // Implementation of any interface intercepts arguments via f (if not nil). + // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. + testTraverseEntity struct { + // Set of interfaces which testCommonEntity must implement, but some methods from those does not call. + serviceRequest + Placer + + // Argument interceptor. Used for ascertain of correct parameter passage between components. + f func(...interface{}) + // Mocked result of any interface. + res interface{} + // Mocked error of any interface. + err error + } +) + +var ( + _ Placer = (*testTraverseEntity)(nil) + _ placementBuilder = (*testTraverseEntity)(nil) +) + +func (s *testTraverseEntity) GetNodes(ctx context.Context, a Address, p bool, e ...multiaddr.Multiaddr) ([]multiaddr.Multiaddr, error) { + if s.f != nil { + s.f(a, p, e) + } + if s.err != nil { + return nil, s.err + } + return s.res.([]multiaddr.Multiaddr), nil +} + +func (s *testTraverseEntity) buildPlacement(_ context.Context, addr Address, excl ...multiaddr.Multiaddr) ([]multiaddr.Multiaddr, error) { + if s.f != nil { + s.f(addr, excl) + } + if s.err != nil { + return nil, s.err + } + return s.res.([]multiaddr.Multiaddr), nil +} + +func Test_coreCnrAffChecker_buildPlacement(t *testing.T) { + ctx := context.TODO() + addr := testObjectAddress(t) + nodes := testNodeList(t, 2) + + t.Run("correct placer params", func(t *testing.T) { + s := &corePlacementUtil{ + prevNetMap: true, + placementBuilder: &testTraverseEntity{ + f: func(items ...interface{}) { + require.Equal(t, addr, items[0].(Address)) + require.True(t, items[1].(bool)) + require.Equal(t, nodes, items[2].([]multiaddr.Multiaddr)) + }, + err: internal.Error(""), // just to prevent panic + }, + log: zap.L(), + } + + s.buildPlacement(ctx, addr, nodes...) + }) + + t.Run("correct result", func(t *testing.T) { + t.Run("placer error", func(t *testing.T) { + s := &corePlacementUtil{ + placementBuilder: &testTraverseEntity{ + err: internal.Error(""), // force Placer to return some error + }, + log: zap.L(), + } + + res, err := s.buildPlacement(ctx, addr) + require.Error(t, err) + require.Empty(t, res) + }) + + t.Run("placer success", func(t *testing.T) { + s := &corePlacementUtil{ + placementBuilder: &testTraverseEntity{ + res: nodes, // force Placer to return nodes + }, + log: zap.L(), + } + + res, err := s.buildPlacement(ctx, addr) + require.NoError(t, err) + require.Equal(t, nodes, res) + }) + }) +} + +func Test_coreTraverser(t *testing.T) { + ctx := context.TODO() + + t.Run("new", func(t *testing.T) { + addr := testObjectAddress(t) + pl := new(testTraverseEntity) + + v := newContainerTraverser(&traverseParams{ + tryPrevNM: true, + addr: addr, + curPlacementBuilder: pl, + prevPlacementBuilder: pl, + maxRecycleCount: 10, + }) + + res := v.(*coreTraverser) + + require.NotNil(t, res.RWMutex) + require.Equal(t, addr, res.addr) + require.True(t, res.tryPrevNM) + require.False(t, res.usePrevNM) + require.NotNil(t, res.mDone) + require.Empty(t, res.mDone) + require.Empty(t, res.failed) + require.Equal(t, 10, res.maxRecycleCount) + require.Equal(t, pl, res.curPlacementBuilder) + require.Equal(t, pl, res.prevPlacementBuilder) + require.Equal(t, 0, res.stopCount) + }) + + t.Run("close", func(t *testing.T) { + v := newContainerTraverser(&traverseParams{ + curPlacementBuilder: &testTraverseEntity{ + res: make([]multiaddr.Multiaddr, 1), + }, + }) + + v.close() + + require.Empty(t, v.Next(ctx)) + require.True(t, v.(*coreTraverser).isClosed()) + }) + + t.Run("done", func(t *testing.T) { + nodes := testNodeList(t, 3) + v := newContainerTraverser(&traverseParams{}) + + v.add(nodes[0], true) + require.True(t, v.done(nodes[0])) + + v.add(nodes[1], false) + require.False(t, v.done(nodes[1])) + + require.False(t, v.done(nodes[2])) + }) + + t.Run("finished", func(t *testing.T) { + + t.Run("zero stop count", func(t *testing.T) { + containerTraverser := &coreTraverser{ + RWMutex: new(sync.RWMutex), + traverseParams: traverseParams{stopCount: 0}, + } + require.False(t, containerTraverser.finished()) + }) + + t.Run("positive stop count", func(t *testing.T) { + containerTraverser := &coreTraverser{ + RWMutex: new(sync.RWMutex), + mDone: make(map[string]struct{}), + traverseParams: traverseParams{stopCount: 3}, + } + + for i := 0; i < containerTraverser.stopCount-1; i++ { + containerTraverser.mDone[strconv.Itoa(i)] = struct{}{} + } + + require.False(t, containerTraverser.finished()) + + containerTraverser.mDone["last node"] = struct{}{} + + require.True(t, containerTraverser.finished()) + }) + }) + + t.Run("add result", func(t *testing.T) { + mAddr := testNode(t, 0) + + containerTraverser := &coreTraverser{ + RWMutex: new(sync.RWMutex), + mDone: make(map[string]struct{}), + } + + containerTraverser.add(mAddr, true) + _, ok := containerTraverser.mDone[mAddr.String()] + require.True(t, ok) + + containerTraverser.add(mAddr, false) + require.Contains(t, containerTraverser.failed, mAddr) + }) + + t.Run("reset", func(t *testing.T) { + initRecycleNum := 1 + + s := &coreTraverser{ + failed: testNodeList(t, 1), + usePrevNM: true, + recycleNum: initRecycleNum, + } + + s.reset() + + require.Empty(t, s.failed) + require.False(t, s.usePrevNM) + require.Equal(t, initRecycleNum+1, s.recycleNum) + }) + + t.Run("next", func(t *testing.T) { + + t.Run("exclude done nodes from result", func(t *testing.T) { + nodes := testNodeList(t, 5) + done := make([]multiaddr.Multiaddr, 2) + copy(done, nodes) + + pl := &testTraverseEntity{res: nodes} + tr := newContainerTraverser(&traverseParams{curPlacementBuilder: pl}) + + for i := range done { + tr.add(done[i], true) + } + + res := tr.Next(ctx) + for i := range done { + require.NotContains(t, res, done[i]) + } + + }) + + t.Run("stop count initialization", func(t *testing.T) { + nodes := testNodeList(t, 5) + + pl := &testTraverseEntity{res: nodes} + + tr := newContainerTraverser(&traverseParams{curPlacementBuilder: pl}) + + _ = tr.Next(ctx) + require.Equal(t, len(nodes), tr.(*coreTraverser).stopCount) + }) + + t.Run("all nodes are done", func(t *testing.T) { + nodes := testNodeList(t, 5) + pl := &testTraverseEntity{res: nodes} + tr := newContainerTraverser(&traverseParams{curPlacementBuilder: pl}) + + require.Equal(t, nodes, tr.Next(ctx)) + + for i := range nodes { + tr.add(nodes[i], true) + } + + require.Empty(t, tr.Next(ctx)) + }) + + t.Run("failed nodes accounting", func(t *testing.T) { + nodes := testNodeList(t, 5) + failed := nodes[:len(nodes)-2] + _ = failed + addr := testObjectAddress(t) + + pl := &testTraverseEntity{ + f: func(items ...interface{}) { + t.Run("correct placer params", func(t *testing.T) { + require.Equal(t, addr, items[0].(Address)) + require.Equal(t, failed, items[1].([]multiaddr.Multiaddr)) + }) + }, + res: nodes, + } + + tr := newContainerTraverser(&traverseParams{ + addr: addr, + curPlacementBuilder: pl, + }) + + for i := range failed { + tr.add(failed[i], false) + } + + _ = tr.Next(ctx) + }) + + t.Run("placement build failure", func(t *testing.T) { + + t.Run("forbid previous network map", func(t *testing.T) { + pl := &testTraverseEntity{res: make([]multiaddr.Multiaddr, 0)} + + tr := newContainerTraverser(&traverseParams{curPlacementBuilder: pl}) + + require.Empty(t, tr.Next(ctx)) + }) + + t.Run("allow previous network map", func(t *testing.T) { + + t.Run("failure", func(t *testing.T) { + pl := &testTraverseEntity{ + res: make([]multiaddr.Multiaddr, 0), + } + + tr := newContainerTraverser(&traverseParams{ + tryPrevNM: true, + curPlacementBuilder: pl, + prevPlacementBuilder: pl, + }) + + require.Empty(t, tr.Next(ctx)) + }) + + t.Run("success", func(t *testing.T) { + nodes := testNodeList(t, 5) + + tr := newContainerTraverser(&traverseParams{ + tryPrevNM: true, + curPlacementBuilder: &testTraverseEntity{ + res: make([]multiaddr.Multiaddr, 0), + }, + prevPlacementBuilder: &testTraverseEntity{ + res: nodes, + }, + }) + + require.Equal(t, nodes, tr.Next(ctx)) + }) + }) + + t.Run("recycle", func(t *testing.T) { + recycleCount := 5 + + curNetMapCallCounter, prevNetMapCallCounter := 0, 0 + + tr := newContainerTraverser(&traverseParams{ + tryPrevNM: true, + curPlacementBuilder: &testTraverseEntity{ + f: func(items ...interface{}) { + curNetMapCallCounter++ + }, + res: make([]multiaddr.Multiaddr, 0), + }, + prevPlacementBuilder: &testTraverseEntity{ + f: func(items ...interface{}) { + prevNetMapCallCounter++ + }, + res: make([]multiaddr.Multiaddr, 0), + }, + maxRecycleCount: recycleCount, + }) + + _ = tr.Next(ctx) + require.Equal(t, recycleCount+1, prevNetMapCallCounter) + require.Equal(t, recycleCount+1, curNetMapCallCounter) + }) + }) + }) +} + +func testNodeList(t *testing.T, count int) (res []multiaddr.Multiaddr) { + for i := 0; i < count; i++ { + res = append(res, testNode(t, i)) + } + return +} diff --git a/services/public/object/ttl.go b/services/public/object/ttl.go new file mode 100644 index 000000000..cdc8a5748 --- /dev/null +++ b/services/public/object/ttl.go @@ -0,0 +1,211 @@ +package object + +import ( + "context" + + "github.com/nspcc-dev/neofs-api-go/container" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +type ( + // ttlPreProcessor is an implementation of requestPreProcessor interface used in Object service production. + ttlPreProcessor struct { + // List of static TTL conditions. + staticCond []service.TTLCondition + + // List of TTL condition constructors. + condPreps []ttlConditionPreparer + + // Processing function. + fProc func(service.TTLSource, ...service.TTLCondition) error + } + + // ttlConditionPreparer is an interface of TTL condition constructor. + ttlConditionPreparer interface { + // prepareTTLCondition creates TTL condition instance based on passed request. + prepareTTLCondition(context.Context, object.Request) service.TTLCondition + } + + // coreTTLCondPreparer is an implementation of ttlConditionPreparer interface used in Object service production. + coreTTLCondPreparer struct { + curAffChecker containerAffiliationChecker + prevAffChecker containerAffiliationChecker + } + + containerAffiliationResult int + + // containerAffiliationChecker is an interface of container membership validator. + containerAffiliationChecker interface { + // Checks local node is affiliated with container with passed ID. + affiliated(context.Context, CID) containerAffiliationResult + } + + // corePlacementUtil is an implementation of containerAffiliationChecker interface used in Object service production. + corePlacementUtil struct { + // Previous network map flag. + prevNetMap bool + + // Local node net address store. + localAddrStore implementations.AddressStore + + // Container nodes membership maintainer. + placementBuilder Placer + + // Logging component. + log *zap.Logger + } +) + +// decTTLPreProcessor is an implementation of requestPreProcessor. +type decTTLPreProcessor struct { +} + +const ( + _ containerAffiliationResult = iota + affUnknown + affNotFound + affPresence + affAbsence +) + +const ( + lmSelfAddrRecvFail = "could not receive local network address" +) + +var ( + _ containerAffiliationChecker = (*corePlacementUtil)(nil) + _ ttlConditionPreparer = (*coreTTLCondPreparer)(nil) + _ requestPreProcessor = (*ttlPreProcessor)(nil) + + _ service.TTLCondition = validTTLCondition + + _ requestPreProcessor = (*decTTLPreProcessor)(nil) +) + +// requestPreProcessor method implementation. +// +// Panics with pmEmptyServiceRequest on empty request. +// +// Constructs set of TTL conditions via internal constructors. +// Returns result of internal TTL conditions processing function. +func (s *ttlPreProcessor) preProcess(ctx context.Context, req serviceRequest) error { + if req == nil { + panic(pmEmptyServiceRequest) + } + + dynamicCond := make([]service.TTLCondition, len(s.condPreps)) + + for i := range s.condPreps { + dynamicCond[i] = s.condPreps[i].prepareTTLCondition(ctx, req) + } + + return s.fProc(req, append(s.staticCond, dynamicCond...)...) +} + +// ttlConditionPreparer method implementation. +// +// Condition returns ErrNotLocalContainer if and only if request is non-forwarding and local node is not presented +// in placement vector corresponding to request. +func (s *coreTTLCondPreparer) prepareTTLCondition(ctx context.Context, req object.Request) service.TTLCondition { + if req == nil { + panic(pmEmptyServiceRequest) + } + + return func(ttl uint32) error { + // check forwarding assumption + if ttl >= service.SingleForwardingTTL { + // container affiliation doesn't matter + return nil + } + + // get target container ID from request body + cid := req.CID() + + // check local node affiliation to container + aff := s.curAffChecker.affiliated(ctx, cid) + + if aff == affAbsence && req.AllowPreviousNetMap() { + // request can be forwarded to container members from previous epoch + aff = s.prevAffChecker.affiliated(ctx, cid) + } + + switch aff { + case affUnknown: + return errContainerAffiliationProblem + case affNotFound: + return &detailedError{ + error: errContainerNotFound, + d: containerDetails(cid, descContainerNotFound), + } + case affAbsence: + return &detailedError{ + error: errNotLocalContainer, + d: containerDetails(cid, descNotLocalContainer), + } + } + + return nil + } +} + +// containerAffiliationChecker method implementation. +// +// If local network address store returns error, logger writes error and affUnknown returns. +// If placement builder returns error +// - caused by ErrNotFound, affNotFound returns; +// - status error with NotFound code, affNotFound returns; +// - any other, affUnknown returns, +// Otherwise, if placement builder returns +// - true, affPresence returns; +// - false, affAbsence returns. +func (s *corePlacementUtil) affiliated(ctx context.Context, cid CID) containerAffiliationResult { + selfAddr, err := s.localAddrStore.SelfAddr() + if err != nil { + s.log.Error(lmSelfAddrRecvFail, zap.Error(err)) + return affUnknown + } + + aff, err := s.placementBuilder.IsContainerNode(ctx, selfAddr, cid, s.prevNetMap) + if err != nil { + if err := errors.Cause(err); errors.Is(err, container.ErrNotFound) { + return affNotFound + } + + return affUnknown + } + + if !aff { + return affAbsence + } + + return affPresence +} + +func processTTLConditions(req service.TTLSource, cs ...service.TTLCondition) error { + ttl := req.GetTTL() + + for i := range cs { + if err := cs[i](ttl); err != nil { + return err + } + } + + return nil +} + +func validTTLCondition(ttl uint32) error { + if ttl < service.NonForwardingTTL { + return errInvalidTTL + } + + return nil +} + +func (s *decTTLPreProcessor) preProcess(_ context.Context, req serviceRequest) error { + req.SetTTL(req.GetTTL() - 1) + return nil +} diff --git a/services/public/object/ttl_test.go b/services/public/object/ttl_test.go new file mode 100644 index 000000000..073d87951 --- /dev/null +++ b/services/public/object/ttl_test.go @@ -0,0 +1,377 @@ +package object + +import ( + "context" + "math/rand" + "strconv" + "testing" + + "github.com/multiformats/go-multiaddr" + "github.com/nspcc-dev/neofs-api-go/container" + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +type ( + // Entity for mocking interfaces. + // Implementation of any interface intercepts arguments via f (if not nil). + // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. + testTTLEntity struct { + // Set of interfaces which testCommonEntity must implement, but some methods from those does not call. + serviceRequest + Placer + + // Argument interceptor. Used for ascertain of correct parameter passage between components. + f func(...interface{}) + // Mocked result of any interface. + res interface{} + // Mocked error of any interface. + err error + } +) + +var ( + _ ttlConditionPreparer = (*testTTLEntity)(nil) + _ implementations.AddressStore = (*testTTLEntity)(nil) + _ containerAffiliationChecker = (*testTTLEntity)(nil) + _ Placer = (*testTTLEntity)(nil) +) + +func (s *testTTLEntity) SelfAddr() (multiaddr.Multiaddr, error) { + if s.err != nil { + return nil, s.err + } + return s.res.(multiaddr.Multiaddr), nil +} + +func (s *testTTLEntity) IsContainerNode(_ context.Context, m multiaddr.Multiaddr, c CID, b bool) (bool, error) { + if s.f != nil { + s.f(m, c, b) + } + if s.err != nil { + return false, s.err + } + return s.res.(bool), nil +} + +func (s *testTTLEntity) CID() CID { return s.res.([]interface{})[0].(CID) } + +func (s *testTTLEntity) AllowPreviousNetMap() bool { return s.res.([]interface{})[1].(bool) } + +func (s *testTTLEntity) prepareTTLCondition(_ context.Context, req object.Request) service.TTLCondition { + if s.f != nil { + s.f(req) + } + return s.res.(service.TTLCondition) +} + +func (s *testTTLEntity) affiliated(ctx context.Context, cid CID) containerAffiliationResult { + if s.f != nil { + s.f(cid) + } + return s.res.(containerAffiliationResult) +} + +func Test_ttlPreProcessor_preProcess(t *testing.T) { + ctx := context.TODO() + + // create custom request with forwarding TTL + req := &testTTLEntity{res: uint32(service.SingleForwardingTTL)} + + t.Run("empty request", func(t *testing.T) { + require.PanicsWithValue(t, pmEmptyServiceRequest, func() { + // ascertain that nil request causes panic + _ = new(ttlPreProcessor).preProcess(ctx, nil) + }) + }) + + t.Run("correct processing", func(t *testing.T) { + // create custom error + pErr := internal.Error("test error for processing func") + + // create custom ttlConditionPreparer + condPreparer := &testTTLEntity{ + f: func(items ...interface{}) { + t.Run("correct condition preparer params", func(t *testing.T) { + // ascertain that request argument of ttlPreProcessor and ttlConditionPreparer are the same + require.Equal(t, req, items[0].(object.Request)) + }) + }, + res: service.TTLCondition(func(uint32) error { return nil }), + } + + s := &ttlPreProcessor{ + condPreps: []ttlConditionPreparer{condPreparer}, + fProc: func(service.TTLSource, ...service.TTLCondition) error { + return pErr // force processing function to return created error + }, + } + + // ascertain error returns as expected + require.EqualError(t, + s.preProcess(ctx, req), + pErr.Error(), + ) + }) +} + +func Test_coreTTLCondPreparer_prepareTTLCondition(t *testing.T) { + ctx := context.TODO() + + // create container ID + cid := testObjectAddress(t).CID + + // // create network address + // mAddr := testNode(t, 0) + // + // // create custom AddressStore + // as := &testTTLEntity{ + // res: mAddr, // force AddressStore to return created address + // } + + t.Run("empty request", func(t *testing.T) { + require.PanicsWithValue(t, pmEmptyServiceRequest, func() { + // ascertain that nil request causes panic + _ = new(coreTTLCondPreparer).prepareTTLCondition(ctx, nil) + }) + }) + + t.Run("forwarding TTL", func(t *testing.T) { + s := &coreTTLCondPreparer{ + curAffChecker: new(testTTLEntity), + prevAffChecker: new(testTTLEntity), + } + + cond := s.prepareTTLCondition(ctx, new(testTTLEntity)) + + // ascertain that error returns as expected + require.NoError(t, cond(service.SingleForwardingTTL)) + }) + + t.Run("non-forwarding TTL", func(t *testing.T) { + t.Run("container non-affiliation", func(t *testing.T) { + t.Run("disallow previous epoch affiliation", func(t *testing.T) { + // create custom serviceRequest for test + req := &testTTLEntity{res: []interface{}{ + cid, // force serviceRequest to return cid + false, // force serviceRequest to disallow previous network map + }} + + s := &coreTTLCondPreparer{ + curAffChecker: &testTTLEntity{ + f: func(items ...interface{}) { + t.Run("correct current epoch affiliation checker params", func(t *testing.T) { + require.Equal(t, cid, items[0].(CID)) + }) + }, + res: affAbsence, // force current epoch containerAffiliationChecker to return affAbsence + }, + prevAffChecker: &testTTLEntity{ + f: func(items ...interface{}) { + t.Run("correct previous epoch affiliation checker params", func(t *testing.T) { + require.Equal(t, cid, items[0].(CID)) + }) + }, + res: affPresence, // force previous epoch containerAffiliationChecker to return affPresence + }, + } + + cond := s.prepareTTLCondition(ctx, req) + + // ascertain that error returns as expected + require.EqualError(t, + cond(service.SingleForwardingTTL-1), // pass any non-forwarding TTL + errNotLocalContainer.Error(), + ) + }) + + t.Run("allow previous epoch affiliation", func(t *testing.T) { + // create custom serviceRequest for test + req := &testTTLEntity{res: []interface{}{ + cid, // force serviceRequest to return cid + true, // force serviceRequest to allow previous network map + }} + + s := &coreTTLCondPreparer{ + curAffChecker: &testTTLEntity{ + res: affAbsence, // force current epoch containerAffiliationChecker to return affAbsence + }, + prevAffChecker: &testTTLEntity{ + res: affAbsence, // force previous epoch containerAffiliationChecker to return affAbsence + }, + } + + cond := s.prepareTTLCondition(ctx, req) + + // ascertain that error returns as expected + require.EqualError(t, + cond(service.SingleForwardingTTL-1), // pass any non-forwarding TTL + errNotLocalContainer.Error(), + ) + }) + }) + + t.Run("container affiliation", func(t *testing.T) { + t.Run("disallow previous epoch affiliation", func(t *testing.T) { + // create custom serviceRequest for test + req := &testTTLEntity{res: []interface{}{ + cid, // force serviceRequest to return cid + false, // force serviceRequest to disallow previous network map + }} + + s := &coreTTLCondPreparer{ + curAffChecker: &testTTLEntity{ + res: affPresence, // force current epoch containerAffiliationChecker to return affPresence + }, + prevAffChecker: &testTTLEntity{ + res: affAbsence, // force previous epoch containerAffiliationChecker to return affAbsence + }, + } + + cond := s.prepareTTLCondition(ctx, req) + + // ascertain that error returns as expected + require.NoError(t, + cond(service.SingleForwardingTTL-1), // pass any non-forwarding TTL + ) + }) + + t.Run("allow previous epoch affiliation", func(t *testing.T) { + // create custom serviceRequest for test + req := &testTTLEntity{res: []interface{}{ + cid, // force serviceRequest to return cid + true, // force serviceRequest to allow previous network map + }} + + s := &coreTTLCondPreparer{ + curAffChecker: &testTTLEntity{ + res: affAbsence, // force current epoch containerAffiliationChecker to return affAbsence + }, + prevAffChecker: &testTTLEntity{ + res: affPresence, // force previous epoch containerAffiliationChecker to return affPresence + }, + } + + cond := s.prepareTTLCondition(ctx, req) + + // ascertain that error returns as expected + require.NoError(t, + cond(service.SingleForwardingTTL-1), // pass any non-forwarding TTL + ) + }) + }) + }) +} + +func Test_coreCnrAffChecker_affiliated(t *testing.T) { + ctx := context.TODO() + + // create container ID + cid := testObjectAddress(t).CID + + log := zap.L() + + t.Run("local network address store error", func(t *testing.T) { + // create custom error for test + saErr := internal.Error("test error for self addr store") + + s := &corePlacementUtil{ + localAddrStore: &testTTLEntity{ + err: saErr, // force address store to return saErr + }, + log: log, + } + + require.Equal(t, affUnknown, s.affiliated(ctx, cid)) + }) + + t.Run("placement build result", func(t *testing.T) { + // create network address + mAddr := testNode(t, 0) + + // create custom AddressStore + as := &testTTLEntity{ + res: mAddr, // force AddressStore to return created address + } + + t.Run("error", func(t *testing.T) { + pb := &testTTLEntity{ + f: func(items ...interface{}) { + t.Run("correct placement builder params", func(t *testing.T) { + require.Equal(t, mAddr, items[0].(multiaddr.Multiaddr)) + require.Equal(t, cid, items[1].(CID)) + require.Equal(t, true, items[2].(bool)) + }) + }, + } + + pb.err = internal.Error("") // force Placer to return some non-nil error + + s := &corePlacementUtil{ + prevNetMap: true, + localAddrStore: as, + placementBuilder: pb, + log: log, + } + + require.Equal(t, affUnknown, s.affiliated(ctx, cid)) + + pb.err = container.ErrNotFound + + require.Equal(t, affNotFound, s.affiliated(ctx, cid)) + }) + + t.Run("no error", func(t *testing.T) { + t.Run("affiliation", func(t *testing.T) { + s := &corePlacementUtil{ + localAddrStore: as, + placementBuilder: &testTTLEntity{ + res: true, // force Placer to return true, nil + }, + log: log, + } + + require.Equal(t, affPresence, s.affiliated(ctx, cid)) + }) + + t.Run("non-affiliation", func(t *testing.T) { + s := &corePlacementUtil{ + localAddrStore: as, + placementBuilder: &testTTLEntity{ + res: false, // force Placer to return false, nil + }, + log: log, + } + + require.Equal(t, affAbsence, s.affiliated(ctx, cid)) + }) + }) + }) +} + +// testNode returns 0.0.0.0:(8000+num). +func testNode(t *testing.T, num int) multiaddr.Multiaddr { + mAddr, err := multiaddr.NewMultiaddr("/ip4/0.0.0.0/tcp/" + strconv.Itoa(8000+num)) + require.NoError(t, err) + return mAddr +} + +// testObjectAddress returns new random object address. +func testObjectAddress(t *testing.T) Address { + oid, err := refs.NewObjectID() + require.NoError(t, err) + return Address{CID: refs.CIDForBytes(testData(t, refs.CIDSize)), ObjectID: oid} +} + +// testData returns size bytes of random data. +func testData(t *testing.T, size int) []byte { + res := make([]byte, size) + _, err := rand.Read(res) + require.NoError(t, err) + return res +} diff --git a/services/public/object/verb.go b/services/public/object/verb.go new file mode 100644 index 000000000..8551b91f1 --- /dev/null +++ b/services/public/object/verb.go @@ -0,0 +1,79 @@ +package object + +import ( + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/service" +) + +// Verb is a type alias of +// Token_Info_Verb from service package of neofs-api-go. +type Verb = service.Token_Info_Verb + +const ( + undefinedVerbDesc uint32 = 1 << iota + putVerbDesc + getVerbDesc + headVerbDesc + deleteVerbDesc + searchVerbDesc + rangeVerbDesc + rangeHashVerbDesc +) + +const ( + headSpawnMask = headVerbDesc | getVerbDesc | putVerbDesc | rangeVerbDesc | rangeHashVerbDesc + rangeHashSpawnMask = rangeHashVerbDesc + rangeSpawnMask = rangeVerbDesc | getVerbDesc + getSpawnMask = getVerbDesc + putSpawnMask = putVerbDesc | deleteVerbDesc + deleteSpawnMask = deleteVerbDesc + searchSpawnMask = searchVerbDesc | getVerbDesc | putVerbDesc | headVerbDesc | rangeVerbDesc | rangeHashVerbDesc | deleteVerbDesc +) + +func toVerbDesc(verb Verb) uint32 { + switch verb { + case service.Token_Info_Put: + return putVerbDesc + case service.Token_Info_Get: + return getVerbDesc + case service.Token_Info_Head: + return headVerbDesc + case service.Token_Info_Delete: + return deleteVerbDesc + case service.Token_Info_Search: + return searchVerbDesc + case service.Token_Info_Range: + return rangeVerbDesc + case service.Token_Info_RangeHash: + return rangeHashVerbDesc + default: + return undefinedVerbDesc + } +} + +func toSpawnMask(rt object.RequestType) uint32 { + switch rt { + case object.RequestPut: + return putSpawnMask + case object.RequestGet: + return getSpawnMask + case object.RequestHead: + return headSpawnMask + case object.RequestDelete: + return deleteSpawnMask + case object.RequestSearch: + return searchSpawnMask + case object.RequestRange: + return rangeSpawnMask + case object.RequestRangeHash: + return rangeHashSpawnMask + default: + return undefinedVerbDesc + } +} + +func allowedSpawn(from Verb, to object.RequestType) bool { + desc := toVerbDesc(from) + + return toSpawnMask(to)&desc == desc +} diff --git a/services/public/object/verb_test.go b/services/public/object/verb_test.go new file mode 100644 index 000000000..0c01e4bed --- /dev/null +++ b/services/public/object/verb_test.go @@ -0,0 +1,124 @@ +package object + +import ( + "testing" + + "github.com/nspcc-dev/neofs-api-go/object" + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/stretchr/testify/require" +) + +func TestAllowedSpawn(t *testing.T) { + items := []struct { + to object.RequestType + ok []Verb + fail []Verb + }{ + { // Put + to: object.RequestPut, + ok: []Verb{ + service.Token_Info_Put, + service.Token_Info_Delete, + }, + fail: []Verb{ + service.Token_Info_Get, + service.Token_Info_Head, + service.Token_Info_Range, + service.Token_Info_RangeHash, + }, + }, + { // Get + to: object.RequestGet, + ok: []Verb{ + service.Token_Info_Get, + }, + fail: []Verb{ + service.Token_Info_Put, + service.Token_Info_Delete, + service.Token_Info_RangeHash, + service.Token_Info_Head, + service.Token_Info_Search, + service.Token_Info_Range, + }, + }, + { // Head + to: object.RequestHead, + ok: []Verb{ + service.Token_Info_Head, + service.Token_Info_Put, + service.Token_Info_Range, + service.Token_Info_Get, + service.Token_Info_RangeHash, + }, + fail: []Verb{ + service.Token_Info_Search, + service.Token_Info_Delete, + }, + }, + { // Delete + to: object.RequestDelete, + ok: []Verb{ + service.Token_Info_Delete, + }, + fail: []Verb{ + service.Token_Info_Get, + service.Token_Info_Head, + service.Token_Info_Range, + service.Token_Info_RangeHash, + service.Token_Info_Put, + service.Token_Info_Search, + }, + }, + { // Search + to: object.RequestSearch, + ok: []Verb{ + service.Token_Info_Put, + service.Token_Info_Get, + service.Token_Info_Head, + service.Token_Info_Delete, + service.Token_Info_Range, + service.Token_Info_RangeHash, + service.Token_Info_Search, + }, + fail: []Verb{}, + }, + { // Range + to: object.RequestRange, + ok: []Verb{ + service.Token_Info_Get, + service.Token_Info_Range, + }, + fail: []Verb{ + service.Token_Info_Put, + service.Token_Info_Delete, + service.Token_Info_RangeHash, + service.Token_Info_Head, + service.Token_Info_Search, + }, + }, + { // RangeHash + to: object.RequestRangeHash, + ok: []Verb{ + service.Token_Info_RangeHash, + }, + fail: []Verb{ + service.Token_Info_Put, + service.Token_Info_Get, + service.Token_Info_Delete, + service.Token_Info_Range, + service.Token_Info_Head, + service.Token_Info_Search, + }, + }, + } + + for _, item := range items { + for _, from := range item.ok { + require.True(t, allowedSpawn(from, item.to)) + } + + for _, from := range item.fail { + require.False(t, allowedSpawn(from, item.to)) + } + } +} diff --git a/services/public/object/verification.go b/services/public/object/verification.go new file mode 100644 index 000000000..de51365c8 --- /dev/null +++ b/services/public/object/verification.go @@ -0,0 +1,36 @@ +package object + +import ( + "context" + + "github.com/nspcc-dev/neofs-api-go/service" +) + +type ( + verifyRequestFunc func(token service.RequestVerifyData) error + + // verifyPreProcessor is an implementation of requestPreProcessor interface. + verifyPreProcessor struct { + // Verifying function. + fVerify verifyRequestFunc + } +) + +var _ requestPreProcessor = (*verifyPreProcessor)(nil) + +// requestPreProcessor method implementation. +// +// Panics with pmEmptyServiceRequest on empty request. +// +// Returns result of internal requestVerifyFunc instance. +func (s *verifyPreProcessor) preProcess(_ context.Context, req serviceRequest) (err error) { + if req == nil { + panic(pmEmptyServiceRequest) + } + + if err = s.fVerify(req); err != nil { + err = errUnauthenticated + } + + return +} diff --git a/services/public/object/verification_test.go b/services/public/object/verification_test.go new file mode 100644 index 000000000..b7c305c08 --- /dev/null +++ b/services/public/object/verification_test.go @@ -0,0 +1,63 @@ +package object + +import ( + "context" + "testing" + + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/stretchr/testify/require" +) + +type ( + // Entity for mocking interfaces. + // Implementation of any interface intercepts arguments via f (if not nil). + // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. + testVerificationEntity struct { + // Set of interfaces which testCommonEntity must implement, but some methods from those does not call. + serviceRequest + + // Argument interceptor. Used for ascertain of correct parameter passage between components. + f func(...interface{}) + // Mocked result of any interface. + res interface{} + // Mocked error of any interface. + err error + } +) + +func Test_verifyPreProcessor_preProcess(t *testing.T) { + ctx := context.TODO() + + t.Run("empty request", func(t *testing.T) { + require.PanicsWithValue(t, pmEmptyServiceRequest, func() { + _ = new(verifyPreProcessor).preProcess(ctx, nil) + }) + }) + + t.Run("correct result", func(t *testing.T) { + t.Run("failure", func(t *testing.T) { + // create custom error + vErr := internal.Error("test error for verifying func") + + s := &verifyPreProcessor{ + fVerify: func(service.RequestVerifyData) error { return vErr }, // force requestVerifyFunc to return vErr + } + + // ascertain that error returns as expected + require.EqualError(t, + s.preProcess(ctx, new(testVerificationEntity)), + errUnauthenticated.Error(), + ) + }) + + t.Run("success", func(t *testing.T) { + s := &verifyPreProcessor{ + fVerify: func(service.RequestVerifyData) error { return nil }, // force requestVerifyFunc to return nil + } + + // ascertain that nil error returns as expected + require.NoError(t, s.preProcess(ctx, new(testVerificationEntity))) + }) + }) +} diff --git a/services/public/session/create.go b/services/public/session/create.go new file mode 100644 index 000000000..85696fbd2 --- /dev/null +++ b/services/public/session/create.go @@ -0,0 +1,53 @@ +package session + +import ( + "context" + "errors" + + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-api-go/session" +) + +var errExpiredSession = errors.New("expired session") + +func (s sessionService) Create(ctx context.Context, req *CreateRequest) (*CreateResponse, error) { + // check lifetime + expired := req.ExpirationEpoch() + if s.epochReceiver.Epoch() > expired { + return nil, errExpiredSession + } + + // generate private token for session + pToken, err := session.NewPrivateToken(expired) + if err != nil { + return nil, err + } + + pkBytes, err := session.PublicSessionToken(pToken) + if err != nil { + return nil, err + } + + // generate token ID + tokenID, err := refs.NewUUID() + if err != nil { + return nil, err + } + + // create private token storage key + pTokenKey := session.PrivateTokenKey{} + pTokenKey.SetOwnerID(req.GetOwnerID()) + pTokenKey.SetTokenID(tokenID) + + // store private token + if err := s.ts.Store(pTokenKey, pToken); err != nil { + return nil, err + } + + // construct response + resp := new(session.CreateResponse) + resp.SetID(tokenID) + resp.SetSessionKey(pkBytes) + + return resp, nil +} diff --git a/services/public/session/service.go b/services/public/session/service.go new file mode 100644 index 000000000..3accd0796 --- /dev/null +++ b/services/public/session/service.go @@ -0,0 +1,66 @@ +package session + +import ( + "github.com/nspcc-dev/neofs-api-go/session" + "github.com/nspcc-dev/neofs-node/modules/grpc" + "go.uber.org/zap" +) + +type ( + sessionService struct { + ts TokenStore + log *zap.Logger + + epochReceiver EpochReceiver + } + + // Service is an interface of the server of Session service. + Service interface { + grpc.Service + session.SessionServer + } + + // EpochReceiver is an interface of the container of epoch number with read access. + EpochReceiver interface { + Epoch() uint64 + } + + // Params groups the parameters of Session service server's constructor. + Params struct { + TokenStore TokenStore + + Logger *zap.Logger + + EpochReceiver EpochReceiver + } + + // TokenStore is a type alias of + // TokenStore from session package of neofs-api-go. + TokenStore = session.PrivateTokenStore + + // CreateRequest is a type alias of + // CreateRequest from session package of neofs-api-go. + CreateRequest = session.CreateRequest + + // CreateResponse is a type alias of + // CreateResponse from session package of neofs-api-go. + CreateResponse = session.CreateResponse +) + +// New is an Session service server's constructor. +func New(p Params) Service { + return &sessionService{ + ts: p.TokenStore, + log: p.Logger, + + epochReceiver: p.EpochReceiver, + } +} + +func (sessionService) Name() string { + return "Session Server" +} + +func (s sessionService) Register(srv *grpc.Server) { + session.RegisterSessionServer(srv, s) +} diff --git a/services/public/session/service_test.go b/services/public/session/service_test.go new file mode 100644 index 000000000..82f85fac1 --- /dev/null +++ b/services/public/session/service_test.go @@ -0,0 +1,3 @@ +package session + +// TODO: write tests diff --git a/services/public/state/service.go b/services/public/state/service.go new file mode 100644 index 000000000..14d19c10f --- /dev/null +++ b/services/public/state/service.go @@ -0,0 +1,324 @@ +package state + +import ( + "context" + "crypto/ecdsa" + "encoding/hex" + "strconv" + + "github.com/nspcc-dev/neofs-api-go/bootstrap" + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-api-go/state" + crypto "github.com/nspcc-dev/neofs-crypto" + "github.com/nspcc-dev/neofs-node/internal" + "github.com/nspcc-dev/neofs-node/lib/core" + "github.com/nspcc-dev/neofs-node/lib/implementations" + "github.com/nspcc-dev/neofs-node/modules/grpc" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/spf13/viper" + "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type ( + // Service is an interface of the server of State service. + Service interface { + state.StatusServer + grpc.Service + Healthy() error + } + + // HealthChecker is an interface of node healthiness checking tool. + HealthChecker interface { + Name() string + Healthy() bool + } + + // Stater is an interface of the node's network state storage with read access. + Stater interface { + NetworkState() *bootstrap.SpreadMap + } + + // Params groups the parameters of State service server's constructor. + Params struct { + Stater Stater + + Logger *zap.Logger + + Viper *viper.Viper + + Checkers []HealthChecker + + PrivateKey *ecdsa.PrivateKey + + MorphNetmapContract *implementations.MorphNetmapContract + } + + stateService struct { + state Stater + config *viper.Viper + checkers []HealthChecker + private *ecdsa.PrivateKey + owners map[refs.OwnerID]struct{} + + stateUpdater *implementations.MorphNetmapContract + } + + // HealthRequest is a type alias of + // HealthRequest from state package of neofs-api-go. + HealthRequest = state.HealthRequest +) + +const ( + errEmptyViper = internal.Error("empty config") + errEmptyLogger = internal.Error("empty logger") + errEmptyStater = internal.Error("empty stater") + errUnknownChangeState = internal.Error("received unknown state") +) + +const msgMissingRequestInitiator = "missing request initiator" + +var requestVerifyFunc = core.VerifyRequestWithSignatures + +// New is an State service server's constructor. +func New(p Params) (Service, error) { + switch { + case p.Logger == nil: + return nil, errEmptyLogger + case p.Viper == nil: + return nil, errEmptyViper + case p.Stater == nil: + return nil, errEmptyStater + case p.PrivateKey == nil: + return nil, crypto.ErrEmptyPrivateKey + } + + svc := &stateService{ + config: p.Viper, + state: p.Stater, + private: p.PrivateKey, + owners: fetchOwners(p.Logger, p.Viper), + checkers: make([]HealthChecker, 0, len(p.Checkers)), + + stateUpdater: p.MorphNetmapContract, + } + + for i, checker := range p.Checkers { + if checker == nil { + p.Logger.Debug("ignore empty checker", + zap.Int("index", i)) + continue + } + + p.Logger.Info("register health-checker", + zap.String("name", checker.Name())) + + svc.checkers = append(svc.checkers, checker) + } + + return svc, nil +} + +func fetchOwners(l *zap.Logger, v *viper.Viper) map[refs.OwnerID]struct{} { + // if config.yml used: + items := v.GetStringSlice("node.rpc.owners") + + for i := 0; ; i++ { + item := v.GetString("node.rpc.owners." + strconv.Itoa(i)) + + if item == "" { + l.Info("stat: skip empty owner", zap.Int("idx", i)) + break + } + + items = append(items, item) + } + + result := make(map[refs.OwnerID]struct{}, len(items)) + + for i := range items { + var owner refs.OwnerID + + if data, err := hex.DecodeString(items[i]); err != nil { + l.Warn("stat: skip wrong hex data", + zap.Int("idx", i), + zap.String("key", items[i]), + zap.Error(err)) + + continue + } else if key := crypto.UnmarshalPublicKey(data); key == nil { + l.Warn("stat: skip wrong key", + zap.Int("idx", i), + zap.String("key", items[i])) + continue + } else if owner, err = refs.NewOwnerID(key); err != nil { + l.Warn("stat: skip wrong key", + zap.Int("idx", i), + zap.String("key", items[i]), + zap.Error(err)) + continue + } + + result[owner] = struct{}{} + + l.Info("rpc owner added", zap.Stringer("owner", owner)) + } + + return result +} + +func nonForwarding(ttl uint32) error { + if ttl != service.NonForwardingTTL { + return status.Error(codes.InvalidArgument, service.ErrInvalidTTL.Error()) + } + + return nil +} + +func requestInitiator(req service.SignKeyPairSource) *ecdsa.PublicKey { + if signKeys := req.GetSignKeyPairs(); len(signKeys) > 0 { + return signKeys[0].GetPublicKey() + } + + return nil +} + +// ChangeState allows to change current node state of node. +// To permit access, used server config options. +// The request should be signed. +func (s *stateService) ChangeState(ctx context.Context, in *state.ChangeStateRequest) (*state.ChangeStateResponse, error) { + // verify request structure + if err := requestVerifyFunc(in); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + // verify change state permission + if key := requestInitiator(in); key == nil { + return nil, status.Error(codes.InvalidArgument, msgMissingRequestInitiator) + } else if owner, err := refs.NewOwnerID(key); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } else if _, ok := s.owners[owner]; !ok { + return nil, status.Error(codes.PermissionDenied, service.ErrWrongOwner.Error()) + } + + // convert State field to NodeState + if in.GetState() != state.ChangeStateRequest_Offline { + return nil, status.Error(codes.InvalidArgument, errUnknownChangeState.Error()) + } + + // set update state parameters + p := implementations.UpdateStateParams{} + p.SetState(implementations.StateOffline) + p.SetKey( + crypto.MarshalPublicKey(&s.private.PublicKey), + ) + + if err := s.stateUpdater.UpdateState(p); err != nil { + return nil, status.Error(codes.Aborted, err.Error()) + } + + return new(state.ChangeStateResponse), nil +} + +// DumpConfig request allows dumping settings for the current node. +// To permit access, used server config options. +// The request should be signed. +func (s *stateService) DumpConfig(_ context.Context, req *state.DumpRequest) (*state.DumpResponse, error) { + if err := service.ProcessRequestTTL(req, nonForwarding); err != nil { + return nil, err + } else if err = requestVerifyFunc(req); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } else if key := requestInitiator(req); key == nil { + return nil, status.Error(codes.InvalidArgument, msgMissingRequestInitiator) + } else if owner, err := refs.NewOwnerID(key); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } else if _, ok := s.owners[owner]; !ok { + return nil, status.Error(codes.PermissionDenied, service.ErrWrongOwner.Error()) + } + + return state.EncodeConfig(s.config) +} + +// Netmap returns SpreadMap from Stater (IRState / Place-component). +func (s *stateService) Netmap(_ context.Context, req *state.NetmapRequest) (*bootstrap.SpreadMap, error) { + if err := service.ProcessRequestTTL(req); err != nil { + return nil, err + } else if err = requestVerifyFunc(req); err != nil { + return nil, err + } + + if s.state != nil { + return s.state.NetworkState(), nil + } + + return nil, status.New(codes.Unavailable, "service unavailable").Err() +} + +func (s *stateService) healthy() error { + for _, svc := range s.checkers { + if !svc.Healthy() { + return errors.Errorf("service(%s) unhealthy", svc.Name()) + } + } + + return nil +} + +// Healthy returns error as status of service, if nil service healthy. +func (s *stateService) Healthy() error { return s.healthy() } + +// Check that all checkers is healthy. +func (s *stateService) HealthCheck(_ context.Context, req *HealthRequest) (*state.HealthResponse, error) { + if err := service.ProcessRequestTTL(req); err != nil { + return nil, err + } else if err = requestVerifyFunc(req); err != nil { + return nil, err + } + + var ( + err = s.healthy() + resp = &state.HealthResponse{Healthy: true, Status: "OK"} + ) + + if err != nil { + resp.Healthy = false + resp.Status = err.Error() + } + + return resp, nil +} + +func (*stateService) Metrics(_ context.Context, req *state.MetricsRequest) (*state.MetricsResponse, error) { + if err := service.ProcessRequestTTL(req); err != nil { + return nil, err + } else if err = requestVerifyFunc(req); err != nil { + return nil, err + } + + return state.EncodeMetrics(prometheus.DefaultGatherer) +} + +func (s *stateService) DumpVars(_ context.Context, req *state.DumpVarsRequest) (*state.DumpVarsResponse, error) { + if err := service.ProcessRequestTTL(req, nonForwarding); err != nil { + return nil, err + } else if err = requestVerifyFunc(req); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } else if key := requestInitiator(req); key == nil { + return nil, status.Error(codes.InvalidArgument, msgMissingRequestInitiator) + } else if owner, err := refs.NewOwnerID(key); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } else if _, ok := s.owners[owner]; !ok { + return nil, status.Error(codes.PermissionDenied, service.ErrWrongOwner.Error()) + } + + return state.EncodeVariables(), nil +} + +// Name of the service. +func (*stateService) Name() string { return "StatusService" } + +// Register service on gRPC server. +func (s *stateService) Register(g *grpc.Server) { state.RegisterStatusServer(g, s) } diff --git a/services/public/state/service_test.go b/services/public/state/service_test.go new file mode 100644 index 000000000..b3a279758 --- /dev/null +++ b/services/public/state/service_test.go @@ -0,0 +1,249 @@ +package state + +import ( + "context" + "crypto/ecdsa" + "encoding/hex" + "encoding/json" + "expvar" + "os" + "strings" + "testing" + + "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-api-go/state" + crypto "github.com/nspcc-dev/neofs-crypto" + "github.com/nspcc-dev/neofs-node/lib/test" + "github.com/spf13/viper" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var requestSignFunc = service.SignRequestData + +func Test_nonForwarding(t *testing.T) { + cases := []struct { + err error + ttl uint32 + name string + }{ + { + name: "ZeroTTL", + ttl: service.ZeroTTL, + err: status.Error(codes.InvalidArgument, service.ErrInvalidTTL.Error()), + }, + { + name: "SingleForwardingTTL", + ttl: service.SingleForwardingTTL, + err: status.Error(codes.InvalidArgument, service.ErrInvalidTTL.Error()), + }, + { + name: "NonForwardingTTL", + ttl: service.NonForwardingTTL, + err: nil, + }, + } + + for i := range cases { + tt := cases[i] + t.Run(tt.name, func(t *testing.T) { + err := nonForwarding(tt.ttl) + switch tt.err { + case nil: + require.NoError(t, err, tt.name) + default: + require.EqualError(t, err, tt.err.Error()) + } + }) + } +} + +func Test_fetchOwners(t *testing.T) { + l := test.NewTestLogger(false) + + t.Run("from config options", func(t *testing.T) { + key0 := test.DecodeKey(0) + require.NotEmpty(t, key0) + + data0 := crypto.MarshalPublicKey(&key0.PublicKey) + hKey0 := hex.EncodeToString(data0) + + owner0, err := refs.NewOwnerID(&key0.PublicKey) + require.NoError(t, err) + + v := viper.New() + v.SetDefault("node.rpc.owners", []string{hKey0}) + + owners := fetchOwners(l, v) + require.Len(t, owners, 1) + require.Contains(t, owners, owner0) + }) + + t.Run("from environment and config options", func(t *testing.T) { + key0 := test.DecodeKey(0) + require.NotEmpty(t, key0) + + data0 := crypto.MarshalPublicKey(&key0.PublicKey) + hKey0 := hex.EncodeToString(data0) + + owner0, err := refs.NewOwnerID(&key0.PublicKey) + require.NoError(t, err) + + key1 := test.DecodeKey(1) + require.NotEmpty(t, key1) + + owner1, err := refs.NewOwnerID(&key1.PublicKey) + require.NoError(t, err) + + data1 := crypto.MarshalPublicKey(&key1.PublicKey) + hKey1 := hex.EncodeToString(data1) + + require.NoError(t, os.Setenv("NEOFS_NODE_RPC_OWNERS_0", hKey1)) + + v := viper.New() + v.AutomaticEnv() + v.SetEnvPrefix("NeoFS") + v.SetConfigType("yaml") + v.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + v.SetDefault("node.rpc.owners", []string{hKey0}) + + require.NoError(t, v.ReadConfig(strings.NewReader(""))) + + owners := fetchOwners(l, v) + + require.Len(t, owners, 2) + require.Contains(t, owners, owner0) + require.Contains(t, owners, owner1) + }) +} + +func TestStateService_DumpConfig(t *testing.T) { + cases := []struct { + err error + ttl uint32 + name string + key *ecdsa.PrivateKey + }{ + { + err: nil, + name: "allow", + key: test.DecodeKey(0), + ttl: service.NonForwardingTTL, + }, + { + name: "wrong ttl", + key: test.DecodeKey(0), + ttl: service.SingleForwardingTTL, + err: status.Error(codes.InvalidArgument, service.ErrInvalidTTL.Error()), + }, + } + key := test.DecodeKey(0) + require.NotEmpty(t, key) + + owner, err := refs.NewOwnerID(&key.PublicKey) + require.NoError(t, err) + + owners := map[refs.OwnerID]struct{}{ + owner: {}, + } + + viper.SetDefault("test", true) + + svc := stateService{ + owners: owners, + config: viper.GetViper(), + } + + for i := range cases { + tt := cases[i] + t.Run(tt.name, func(t *testing.T) { + req := new(state.DumpRequest) + + req.SetTTL(tt.ttl) + if tt.key != nil { + require.NoError(t, requestSignFunc(tt.key, req)) + } + + res, err := svc.DumpConfig(context.Background(), req) + switch tt.err { + case nil: + require.NoError(t, err, tt.name) + require.NotEmpty(t, res) + require.NotEmpty(t, res.Config) + default: + require.EqualError(t, err, tt.err.Error()) + require.Empty(t, res) + } + }) + } +} + +func TestStateService_DumpVars(t *testing.T) { + cases := []struct { + err error + ttl uint32 + name string + key *ecdsa.PrivateKey + }{ + { + err: nil, + name: "allow", + key: test.DecodeKey(0), + ttl: service.NonForwardingTTL, + }, + { + name: "wrong ttl", + key: test.DecodeKey(0), + ttl: service.SingleForwardingTTL, + err: status.Error(codes.InvalidArgument, service.ErrInvalidTTL.Error()), + }, + } + key := test.DecodeKey(0) + require.NotEmpty(t, key) + + owner, err := refs.NewOwnerID(&key.PublicKey) + require.NoError(t, err) + + owners := map[refs.OwnerID]struct{}{ + owner: {}, + } + + svc := stateService{owners: owners} + + expvar.NewString("test1").Set("test1") + expvar.NewString("test2").Set("test2") + + for i := range cases { + tt := cases[i] + t.Run(tt.name, func(t *testing.T) { + req := new(state.DumpVarsRequest) + + req.SetTTL(tt.ttl) + if tt.key != nil { + require.NoError(t, requestSignFunc(tt.key, req)) + } + + res, err := svc.DumpVars(nil, req) + switch tt.err { + case nil: + require.NoError(t, err, tt.name) + require.NotEmpty(t, res) + require.NotEmpty(t, res.Variables) + + dump := make(map[string]interface{}) + require.NoError(t, json.Unmarshal(res.Variables, &dump)) + + require.Contains(t, dump, "test1") + require.Equal(t, dump["test1"], "test1") + + require.Contains(t, dump, "test2") + require.Equal(t, dump["test2"], "test2") + default: + require.EqualError(t, err, tt.err.Error()) + require.Empty(t, res) + } + }) + } +}