forked from TrueCloudLab/frostfs-node
Initial commit
Initial public review release v0.10.0
This commit is contained in:
commit
dadfd90dcd
276 changed files with 43489 additions and 0 deletions
8
.dockerignore
Normal file
8
.dockerignore
Normal file
|
@ -0,0 +1,8 @@
|
|||
.idea
|
||||
.vscode
|
||||
.git
|
||||
docker-compose.yml
|
||||
Dockerfile
|
||||
temp
|
||||
.dockerignore
|
||||
docker
|
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
/**/*.pb.go -diff binary
|
8
.gitignore
vendored
Normal file
8
.gitignore
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
bin
|
||||
temp
|
||||
cmd/test
|
||||
/plugins/
|
||||
/vendor/
|
||||
|
||||
testfile
|
||||
.neofs-cli.yml
|
136
.golangci.yml
Normal file
136
.golangci.yml
Normal file
|
@ -0,0 +1,136 @@
|
|||
# https://habr.com/company/roistat/blog/413175/
|
||||
# https://github.com/golangci/golangci-lint
|
||||
linters-settings:
|
||||
govet:
|
||||
check-shadowing: false
|
||||
golint:
|
||||
# minimal confidence for issues, default is 0.8
|
||||
min-confidence: 0.8
|
||||
gofmt:
|
||||
# simplify code: gofmt with `-s` option, true by default
|
||||
simplify: true
|
||||
gocyclo:
|
||||
min-complexity: 30
|
||||
maligned:
|
||||
suggest-new: true
|
||||
dupl:
|
||||
threshold: 100
|
||||
goconst:
|
||||
min-len: 2
|
||||
min-occurrences: 2
|
||||
gosimple:
|
||||
gocritic:
|
||||
# Which checks should be enabled; can't be combined with 'disabled-checks';
|
||||
# See https://go-critic.github.io/overview#checks-overview
|
||||
# To check which checks are enabled run `GL_DEBUG=gocritic golangci-lint run`
|
||||
# By default list of stable checks is used.
|
||||
# enabled-checks:
|
||||
# - rangeValCopy
|
||||
# Which checks should be disabled; can't be combined with 'enabled-checks'; default is empty
|
||||
disabled-checks:
|
||||
- regexpMust
|
||||
# Enable multiple checks by tags, run `GL_DEBUG=gocritic golangci-lint` run to see all tags and checks.
|
||||
# Empty list by default. See https://github.com/go-critic/go-critic#usage -> section "Tags".
|
||||
enabled-tags:
|
||||
- performance
|
||||
|
||||
settings: # settings passed to gocritic
|
||||
captLocal: # must be valid enabled check name
|
||||
paramsOnly: true
|
||||
rangeValCopy:
|
||||
sizeThreshold: 32
|
||||
# depguard:
|
||||
# list-type: blacklist
|
||||
# include-go-root: false
|
||||
# packages:
|
||||
# - github.com/davecgh/go-spew/spew
|
||||
lll:
|
||||
# max line length, lines longer will be reported. Default is 120.
|
||||
# '\t' is counted as 1 character by default, and can be changed with the tab-width option
|
||||
line-length: 120
|
||||
# tab width in spaces. Default to 1.
|
||||
tab-width: 1
|
||||
unused:
|
||||
# treat code as a program (not a library) and report unused exported identifiers; default is false.
|
||||
# XXX: if you enable this setting, unused will report a lot of false-positives in text editors:
|
||||
# if it's called for subdir of a project it can't find funcs usages. All text editor integrations
|
||||
# with golangci-lint call it on a directory with the changed file.
|
||||
check-exported: false
|
||||
unparam:
|
||||
# Inspect exported functions, default is false. Set to true if no external program/library imports your code.
|
||||
# XXX: if you enable this setting, unparam will report a lot of false-positives in text editors:
|
||||
# if it's called for subdir of a project it can't find external interfaces. All text editor integrations
|
||||
# with golangci-lint call it on a directory with the changed file.
|
||||
check-exported: false
|
||||
nakedret:
|
||||
# make an issue if func has more lines of code than this setting and it has naked returns; default is 30
|
||||
max-func-lines: 30
|
||||
|
||||
|
||||
linters:
|
||||
enable-all: true
|
||||
fast: false
|
||||
disable:
|
||||
- gochecknoglobals
|
||||
# - maligned
|
||||
# - prealloc
|
||||
# disable-all: false
|
||||
# presets:
|
||||
# - bugs
|
||||
# - unused
|
||||
|
||||
# options for analysis running
|
||||
run:
|
||||
# default concurrency is a available CPU number
|
||||
# concurrency: 8
|
||||
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
# deadline: 1m
|
||||
|
||||
# exit code when at least one issue was found, default is 1
|
||||
# issues-exit-code: 1
|
||||
|
||||
# include test files or not, default is true
|
||||
# tests: true
|
||||
|
||||
# list of build tags, all linters use it. Default is empty list.
|
||||
# build-tags:
|
||||
# - mytag
|
||||
|
||||
# which dirs to skip: they won't be analyzed;
|
||||
# can use regexp here: generated.*, regexp is applied on full path;
|
||||
# default value is empty list, but next dirs are always skipped independently
|
||||
# from this option's value:
|
||||
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
||||
# skip-dirs:
|
||||
# - src/external_libs
|
||||
# - autogenerated_by_my_lib
|
||||
|
||||
# which files to skip: they will be analyzed, but issues from them
|
||||
# won't be reported. Default value is empty list, but there is
|
||||
# no need to include all autogenerated files, we confidently recognize
|
||||
# autogenerated files. If it's not please let us know.
|
||||
# skip-files:
|
||||
# - ".*\\.my\\.go$"
|
||||
# - lib/bad.go
|
||||
|
||||
# by default isn't set. If set we pass it to "go list -mod={option}". From "go help modules":
|
||||
# If invoked with -mod=readonly, the go command is disallowed from the implicit
|
||||
# automatic updating of go.mod described above. Instead, it fails when any changes
|
||||
# to go.mod are needed. This setting is most useful to check that go.mod does
|
||||
# not need updates, such as in a continuous integration and testing system.
|
||||
# If invoked with -mod=vendor, the go command assumes that the vendor
|
||||
# directory holds the correct copies of dependencies and ignores
|
||||
# the dependency descriptions in go.mod.
|
||||
# modules-download-mode: readonly|release|vendor
|
||||
|
||||
# output configuration options
|
||||
output:
|
||||
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
|
||||
format: tab
|
||||
|
||||
# print lines of code with issue, default is true
|
||||
print-issued-lines: true
|
||||
|
||||
# print linter name in the end of issue text, default is true
|
||||
print-linter-name: true
|
6
CHANGELOG.md
Normal file
6
CHANGELOG.md
Normal file
|
@ -0,0 +1,6 @@
|
|||
# Changelog
|
||||
Changelog for NeoFS Node
|
||||
|
||||
## [0.10.0] - 2020-07-10
|
||||
|
||||
First public review release.
|
3
CONTRIBUTING.md
Normal file
3
CONTRIBUTING.md
Normal file
|
@ -0,0 +1,3 @@
|
|||
# Contributing
|
||||
|
||||
We do not accept any contributions. As yet.
|
21
Dockerfile
Normal file
21
Dockerfile
Normal file
|
@ -0,0 +1,21 @@
|
|||
FROM golang:1.14-alpine as builder
|
||||
|
||||
ARG BUILD=now
|
||||
ARG VERSION=dev
|
||||
ARG REPO=repository
|
||||
|
||||
WORKDIR /src
|
||||
|
||||
COPY . /src
|
||||
|
||||
RUN apk add --update make bash
|
||||
RUN make bin/neofs-node
|
||||
|
||||
# Executable image
|
||||
FROM scratch AS neofs-node
|
||||
|
||||
WORKDIR /
|
||||
|
||||
COPY --from=builder /src/bin/neofs-node /bin/neofs-node
|
||||
|
||||
CMD ["neofs-node"]
|
674
LICENSE
Normal file
674
LICENSE
Normal file
|
@ -0,0 +1,674 @@
|
|||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU General Public License is a free, copyleft license for
|
||||
software and other kinds of works.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
the GNU General Public License is intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users. We, the Free Software Foundation, use the
|
||||
GNU General Public License for most of our software; it applies also to
|
||||
any other work released this way by its authors. You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to prevent others from denying you
|
||||
these rights or asking you to surrender the rights. Therefore, you have
|
||||
certain responsibilities if you distribute copies of the software, or if
|
||||
you modify it: responsibilities to respect the freedom of others.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must pass on to the recipients the same
|
||||
freedoms that you received. You must make sure that they, too, receive
|
||||
or can get the source code. And you must show them these terms so they
|
||||
know their rights.
|
||||
|
||||
Developers that use the GNU GPL protect your rights with two steps:
|
||||
(1) assert copyright on the software, and (2) offer you this License
|
||||
giving you legal permission to copy, distribute and/or modify it.
|
||||
|
||||
For the developers' and authors' protection, the GPL clearly explains
|
||||
that there is no warranty for this free software. For both users' and
|
||||
authors' sake, the GPL requires that modified versions be marked as
|
||||
changed, so that their problems will not be attributed erroneously to
|
||||
authors of previous versions.
|
||||
|
||||
Some devices are designed to deny users access to install or run
|
||||
modified versions of the software inside them, although the manufacturer
|
||||
can do so. This is fundamentally incompatible with the aim of
|
||||
protecting users' freedom to change the software. The systematic
|
||||
pattern of such abuse occurs in the area of products for individuals to
|
||||
use, which is precisely where it is most unacceptable. Therefore, we
|
||||
have designed this version of the GPL to prohibit the practice for those
|
||||
products. If such problems arise substantially in other domains, we
|
||||
stand ready to extend this provision to those domains in future versions
|
||||
of the GPL, as needed to protect the freedom of users.
|
||||
|
||||
Finally, every program is threatened constantly by software patents.
|
||||
States should not allow patents to restrict development and use of
|
||||
software on general-purpose computers, but in those that do, we wish to
|
||||
avoid the special danger that patents applied to a free program could
|
||||
make it effectively proprietary. To prevent this, the GPL assures that
|
||||
patents cannot be used to render the program non-free.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Use with the GNU Affero General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU Affero General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the special requirements of the GNU Affero General Public License,
|
||||
section 13, concerning interaction through a network will apply to the
|
||||
combination as such.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
99
Makefile
Normal file
99
Makefile
Normal file
|
@ -0,0 +1,99 @@
|
|||
#!/usr/bin/make -f
|
||||
SHELL = bash
|
||||
|
||||
REPO ?= $(shell go list -m)
|
||||
VERSION ?= "$(shell git describe --tags --dirty --always)"
|
||||
|
||||
HUB_IMAGE ?= nspccdev/neofs
|
||||
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
||||
|
||||
BIN = bin
|
||||
DIRS= $(BIN)
|
||||
|
||||
# List of binaries to build. May be automated.
|
||||
CMDS = neofs-node
|
||||
CMS = $(addprefix $(BIN)/, $(CMDS))
|
||||
BINS = $(addprefix $(BIN)/, $(CMDS))
|
||||
|
||||
.PHONY: help dep clean fmt
|
||||
|
||||
# To build a specific binary, use it's name prfixed with bin/ as a target
|
||||
# For example `make bin/neofs-node` will buils only Storage node binary
|
||||
# Just `make` will
|
||||
# Build all possible binaries
|
||||
all: $(DIRS) $(BINS)
|
||||
|
||||
$(BINS): $(DIRS) dep
|
||||
@echo "⇒ Build $@"
|
||||
GOGC=off \
|
||||
CGO_ENABLED=0 \
|
||||
go build -v -mod=vendor -trimpath \
|
||||
-ldflags "-X ${REPO}/misc.Version=$(VERSION) -X ${REPO}/misc.Build=${BUILD}" \
|
||||
-o $@ ./cmd/$(notdir $@)
|
||||
|
||||
$(DIRS):
|
||||
@echo "⇒ Ensure dir: $@"
|
||||
@mkdir -p $@
|
||||
|
||||
# Pull go dependencies
|
||||
dep:
|
||||
@printf "⇒ Ensure vendor: "
|
||||
@go mod tidy -v && echo OK || (echo fail && exit 2)
|
||||
@printf "⇒ Download requirements: "
|
||||
@go mod download && echo OK || (echo fail && exit 2)
|
||||
@printf "⇒ Store vendor localy: "
|
||||
@go mod vendor && echo OK || (echo fail && exit 2)
|
||||
|
||||
# Regenerate proto files:
|
||||
protoc:
|
||||
@GOPRIVATE=github.com/nspcc-dev go mod tidy -v
|
||||
@GOPRIVATE=github.com/nspcc-dev go mod vendor
|
||||
# Install specific version for gogo-proto
|
||||
@go list -f '{{.Path}}/...@{{.Version}}' -m github.com/gogo/protobuf | xargs go get -v
|
||||
# Install specific version for protobuf lib
|
||||
@go list -f '{{.Path}}/...@{{.Version}}' -m github.com/golang/protobuf | xargs go get -v
|
||||
# Protoc generate
|
||||
@for f in `find . -type f -name '*.proto' -not -path './vendor/*'`; do \
|
||||
echo "⇒ Processing $$f "; \
|
||||
protoc \
|
||||
--proto_path=.:./vendor:./vendor/github.com/nspcc-dev/neofs-api-go:/usr/local/include \
|
||||
--gofast_out=plugins=grpc,paths=source_relative:. $$f; \
|
||||
done
|
||||
|
||||
# Build NeoFS Sorage Node docker image
|
||||
image-storage:
|
||||
@echo "⇒ Build NeoFS Sorage Node docker image "
|
||||
@docker build \
|
||||
--build-arg REPO=$(REPO) \
|
||||
--build-arg VERSION=$(VERSION) \
|
||||
-f Dockerfile \
|
||||
-t $(HUB_IMAGE)-storage:$(HUB_TAG) .
|
||||
|
||||
# Build all Docker images
|
||||
images: image-storage
|
||||
|
||||
# Reformat code
|
||||
fmt:
|
||||
@[ ! -z `which goimports` ] || (echo "Install goimports" && exit 2)
|
||||
@for f in `find . -type f -name '*.go' -not -path './vendor/*' -not -name '*.pb.go' -prune`; do \
|
||||
echo "⇒ Processing $$f"; \
|
||||
goimports -w $$f; \
|
||||
done
|
||||
|
||||
# Print version
|
||||
version:
|
||||
@echo $(VERSION)
|
||||
|
||||
# Show this help prompt
|
||||
help:
|
||||
@echo ' Usage:'
|
||||
@echo ''
|
||||
@echo ' make <target>'
|
||||
@echo ''
|
||||
@echo ' Targets:'
|
||||
@echo ''
|
||||
@awk '/^#/{ comment = substr($$0,3) } comment && /^[a-zA-Z][a-zA-Z0-9_-]+ ?:/{ print " ", $$1, comment }' $(MAKEFILE_LIST) | column -t -s ':' | grep -v 'IGNORE' | sort | uniq
|
||||
|
||||
clean:
|
||||
rm -rf vendor
|
||||
rm -rf $(BIN)
|
346
cmd/neofs-node/defaults.go
Normal file
346
cmd/neofs-node/defaults.go
Normal file
|
@ -0,0 +1,346 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/config/netmode"
|
||||
"github.com/nspcc-dev/neofs-node/lib/core"
|
||||
"github.com/nspcc-dev/neofs-node/modules/morph"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func setDefaults(v *viper.Viper) {
|
||||
// Logger section
|
||||
{
|
||||
v.SetDefault("logger.level", "debug")
|
||||
v.SetDefault("logger.format", "console")
|
||||
v.SetDefault("logger.trace_level", "fatal")
|
||||
v.SetDefault("logger.no_disclaimer", false) // to disable app_name and app_version
|
||||
|
||||
v.SetDefault("logger.sampling.initial", 1000) // todo: add description
|
||||
v.SetDefault("logger.sampling.thereafter", 1000) // todo: add description
|
||||
}
|
||||
|
||||
// Transport section
|
||||
{
|
||||
v.SetDefault("transport.attempts_count", 5)
|
||||
v.SetDefault("transport.attempts_ttl", "30s")
|
||||
}
|
||||
|
||||
// Peers section
|
||||
{
|
||||
v.SetDefault("peers.metrics_timeout", "5s")
|
||||
v.SetDefault("peers.connections_ttl", "30s")
|
||||
v.SetDefault("peers.connections_idle", "30s")
|
||||
v.SetDefault("peers.keep_alive.ttl", "30s")
|
||||
v.SetDefault("peers.keep_alive.ping", "100ms")
|
||||
}
|
||||
|
||||
// Muxer session
|
||||
{
|
||||
v.SetDefault("muxer.http.read_buffer_size", 0)
|
||||
v.SetDefault("muxer.http.write_buffer_size", 0)
|
||||
v.SetDefault("muxer.http.read_timeout", 0)
|
||||
v.SetDefault("muxer.http.write_timeout", 0)
|
||||
}
|
||||
|
||||
// Node section
|
||||
{
|
||||
v.SetDefault("node.proto", "tcp") // tcp or udp
|
||||
v.SetDefault("node.address", ":8080")
|
||||
v.SetDefault("node.shutdown_ttl", "30s")
|
||||
v.SetDefault("node.private_key", "keys/node_00.key")
|
||||
|
||||
v.SetDefault("node.grpc.logging", true)
|
||||
v.SetDefault("node.grpc.metrics", true)
|
||||
v.SetDefault("node.grpc.billing", true)
|
||||
|
||||
// Contains public keys, which can send requests to state.DumpConfig
|
||||
// for now, in the future, should be replaced with ACL or something else.
|
||||
v.SetDefault("node.rpc.owners", []string{
|
||||
// By default we add user.key
|
||||
// TODO should be removed before public release:
|
||||
// or add into default Dockerfile `NEOFS_NODE_RPC_OWNERS_0=`
|
||||
"031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a",
|
||||
})
|
||||
}
|
||||
|
||||
// Storage section
|
||||
{
|
||||
storageTypes := []string{
|
||||
core.BlobStore.String(),
|
||||
core.MetaStore.String(),
|
||||
core.SpaceMetricsStore.String(),
|
||||
}
|
||||
|
||||
for i := range storageTypes {
|
||||
v.SetDefault("storage."+storageTypes[i]+".bucket", "boltdb")
|
||||
v.SetDefault("storage."+storageTypes[i]+".path", "./temp/storage/"+storageTypes[i])
|
||||
v.SetDefault("storage."+storageTypes[i]+".perm", 0777)
|
||||
// v.SetDefault("storage."+storageTypes[i]+".no_grow_sync", false)
|
||||
// v.SetDefault("storage."+storageTypes[i]+".lock_timeout", "30s")
|
||||
}
|
||||
}
|
||||
|
||||
// Object section
|
||||
{
|
||||
v.SetDefault("object.max_processing_size", 100) // size in MB, use 0 to remove restriction
|
||||
v.SetDefault("object.workers_count", 5)
|
||||
v.SetDefault("object.assembly", true)
|
||||
v.SetDefault("object.window_size", 3)
|
||||
|
||||
v.SetDefault("object.transformers.payload_limiter.max_payload_size", 5000) // size in KB
|
||||
|
||||
// algorithm used for salt applying in range hash, for now only xor is available
|
||||
v.SetDefault("object.salitor", "xor")
|
||||
|
||||
// set true to check container ACL rules
|
||||
v.SetDefault("object.check_acl", true)
|
||||
|
||||
v.SetDefault("object.dial_timeout", "500ms")
|
||||
rpcs := []string{"put", "get", "delete", "head", "search", "range", "range_hash"}
|
||||
for i := range rpcs {
|
||||
v.SetDefault("object."+rpcs[i]+".timeout", "5s")
|
||||
v.SetDefault("object."+rpcs[i]+".log_errs", false)
|
||||
}
|
||||
}
|
||||
|
||||
// Replication section
|
||||
{
|
||||
v.SetDefault("replication.manager.pool_size", 100)
|
||||
v.SetDefault("replication.manager.pool_expansion_rate", 0.1)
|
||||
v.SetDefault("replication.manager.read_pool_interval", "500ms")
|
||||
v.SetDefault("replication.manager.push_task_timeout", "1s")
|
||||
v.SetDefault("replication.manager.placement_honorer_enabled", true)
|
||||
v.SetDefault("replication.manager.capacities.replicate", 1)
|
||||
v.SetDefault("replication.manager.capacities.restore", 1)
|
||||
v.SetDefault("replication.manager.capacities.garbage", 1)
|
||||
|
||||
v.SetDefault("replication.placement_honorer.chan_capacity", 1)
|
||||
v.SetDefault("replication.placement_honorer.result_timeout", "1s")
|
||||
v.SetDefault("replication.placement_honorer.timeouts.put", "5s")
|
||||
v.SetDefault("replication.placement_honorer.timeouts.get", "5s")
|
||||
|
||||
v.SetDefault("replication.location_detector.chan_capacity", 1)
|
||||
v.SetDefault("replication.location_detector.result_timeout", "1s")
|
||||
v.SetDefault("replication.location_detector.timeouts.search", "5s")
|
||||
|
||||
v.SetDefault("replication.storage_validator.chan_capacity", 1)
|
||||
v.SetDefault("replication.storage_validator.result_timeout", "1s")
|
||||
v.SetDefault("replication.storage_validator.salt_size", 64) // size in bytes
|
||||
v.SetDefault("replication.storage_validator.max_payload_range_size", 64) // size in bytes
|
||||
v.SetDefault("replication.storage_validator.payload_range_count", 3)
|
||||
v.SetDefault("replication.storage_validator.salitor", "xor")
|
||||
v.SetDefault("replication.storage_validator.timeouts.get", "5s")
|
||||
v.SetDefault("replication.storage_validator.timeouts.head", "5s")
|
||||
v.SetDefault("replication.storage_validator.timeouts.range_hash", "5s")
|
||||
|
||||
v.SetDefault("replication.replicator.chan_capacity", 1)
|
||||
v.SetDefault("replication.replicator.result_timeout", "1s")
|
||||
v.SetDefault("replication.replicator.timeouts.put", "5s")
|
||||
|
||||
v.SetDefault("replication.restorer.chan_capacity", 1)
|
||||
v.SetDefault("replication.restorer.result_timeout", "1s")
|
||||
v.SetDefault("replication.restorer.timeouts.get", "5s")
|
||||
v.SetDefault("replication.restorer.timeouts.head", "5s")
|
||||
}
|
||||
|
||||
// PPROF section
|
||||
{
|
||||
v.SetDefault("pprof.enabled", true)
|
||||
v.SetDefault("pprof.address", ":6060")
|
||||
v.SetDefault("pprof.shutdown_ttl", "10s")
|
||||
// v.SetDefault("pprof.read_timeout", "10s")
|
||||
// v.SetDefault("pprof.read_header_timeout", "10s")
|
||||
// v.SetDefault("pprof.write_timeout", "10s")
|
||||
// v.SetDefault("pprof.idle_timeout", "10s")
|
||||
// v.SetDefault("pprof.max_header_bytes", 1024)
|
||||
}
|
||||
|
||||
// Metrics section
|
||||
{
|
||||
v.SetDefault("metrics.enabled", true)
|
||||
v.SetDefault("metrics.address", ":8090")
|
||||
v.SetDefault("metrics.shutdown_ttl", "10s")
|
||||
// v.SetDefault("metrics.read_header_timeout", "10s")
|
||||
// v.SetDefault("metrics.write_timeout", "10s")
|
||||
// v.SetDefault("metrics.idle_timeout", "10s")
|
||||
// v.SetDefault("metrics.max_header_bytes", 1024)
|
||||
}
|
||||
|
||||
// Workers section
|
||||
{
|
||||
workers := []string{
|
||||
"peers",
|
||||
"boot",
|
||||
"replicator",
|
||||
"metrics",
|
||||
"event_listener",
|
||||
}
|
||||
|
||||
for i := range workers {
|
||||
v.SetDefault("workers."+workers[i]+".immediately", true)
|
||||
v.SetDefault("workers."+workers[i]+".disabled", false)
|
||||
// v.SetDefault("workers."+workers[i]+".timer", "5s") // run worker every 5sec and reset timer after job
|
||||
// v.SetDefault("workers."+workers[i]+".ticker", "5s") // run worker every 5sec
|
||||
}
|
||||
}
|
||||
|
||||
// Morph section
|
||||
{
|
||||
|
||||
// Endpoint
|
||||
v.SetDefault(
|
||||
morph.EndpointOptPath(),
|
||||
"http://morph_chain.localtest.nspcc.ru:30333",
|
||||
)
|
||||
|
||||
// Dial timeout
|
||||
v.SetDefault(
|
||||
morph.DialTimeoutOptPath(),
|
||||
5*time.Second,
|
||||
)
|
||||
|
||||
v.SetDefault(
|
||||
morph.MagicNumberOptPath(),
|
||||
uint32(netmode.PrivNet),
|
||||
)
|
||||
|
||||
{ // Event listener
|
||||
// Endpoint
|
||||
v.SetDefault(
|
||||
morph.ListenerEndpointOptPath(),
|
||||
"ws://morph_chain.localtest.nspcc.ru:30333/ws",
|
||||
)
|
||||
|
||||
// Dial timeout
|
||||
v.SetDefault(
|
||||
morph.ListenerDialTimeoutOptPath(),
|
||||
5*time.Second,
|
||||
)
|
||||
}
|
||||
|
||||
{ // Common parameters
|
||||
for _, name := range morph.ContractNames {
|
||||
// Script hash
|
||||
v.SetDefault(
|
||||
morph.ScriptHashOptPath(name),
|
||||
"c77ecae9773ad0c619ad59f7f2dd6f585ddc2e70", // LE
|
||||
)
|
||||
|
||||
// Invocation fee
|
||||
v.SetDefault(
|
||||
morph.InvocationFeeOptPath(name),
|
||||
0,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
{ // Container
|
||||
// Set EACL method name
|
||||
v.SetDefault(
|
||||
morph.ContainerContractSetEACLOptPath(),
|
||||
"SetEACL",
|
||||
)
|
||||
|
||||
// Get EACL method name
|
||||
v.SetDefault(
|
||||
morph.ContainerContractEACLOptPath(),
|
||||
"EACL",
|
||||
)
|
||||
|
||||
// Put method name
|
||||
v.SetDefault(
|
||||
morph.ContainerContractPutOptPath(),
|
||||
"Put",
|
||||
)
|
||||
|
||||
// Get method name
|
||||
v.SetDefault(
|
||||
morph.ContainerContractGetOptPath(),
|
||||
"Get",
|
||||
)
|
||||
|
||||
// Delete method name
|
||||
v.SetDefault(
|
||||
morph.ContainerContractDelOptPath(),
|
||||
"Delete",
|
||||
)
|
||||
|
||||
// List method name
|
||||
v.SetDefault(
|
||||
morph.ContainerContractListOptPath(),
|
||||
"List",
|
||||
)
|
||||
}
|
||||
|
||||
{ // Reputation
|
||||
// Put method name
|
||||
v.SetDefault(
|
||||
morph.ReputationContractPutOptPath(),
|
||||
"Put",
|
||||
)
|
||||
|
||||
// List method name
|
||||
v.SetDefault(
|
||||
morph.ReputationContractListOptPath(),
|
||||
"List",
|
||||
)
|
||||
}
|
||||
|
||||
{ // Netmap
|
||||
// AddPeer method name
|
||||
v.SetDefault(
|
||||
morph.NetmapContractAddPeerOptPath(),
|
||||
"AddPeer",
|
||||
)
|
||||
|
||||
// New epoch method name
|
||||
v.SetDefault(
|
||||
morph.NetmapContractNewEpochOptPath(),
|
||||
"NewEpoch",
|
||||
)
|
||||
|
||||
// Netmap method name
|
||||
v.SetDefault(
|
||||
morph.NetmapContractNetmapOptPath(),
|
||||
"Netmap",
|
||||
)
|
||||
|
||||
// Update state method name
|
||||
v.SetDefault(
|
||||
morph.NetmapContractUpdateStateOptPath(),
|
||||
"UpdateState",
|
||||
)
|
||||
|
||||
// IR list method name
|
||||
v.SetDefault(
|
||||
morph.NetmapContractIRListOptPath(),
|
||||
"InnerRingList",
|
||||
)
|
||||
|
||||
// New epoch event type
|
||||
v.SetDefault(
|
||||
morph.ContractEventOptPath(
|
||||
morph.NetmapContractName,
|
||||
morph.NewEpochEventType,
|
||||
),
|
||||
"NewEpoch",
|
||||
)
|
||||
}
|
||||
|
||||
{ // Balance
|
||||
// balanceOf method name
|
||||
v.SetDefault(
|
||||
morph.BalanceContractBalanceOfOptPath(),
|
||||
"balanceOf",
|
||||
)
|
||||
|
||||
// decimals method name
|
||||
v.SetDefault(
|
||||
morph.BalanceContractDecimalsOfOptPath(),
|
||||
"decimals",
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
146
cmd/neofs-node/main.go
Normal file
146
cmd/neofs-node/main.go
Normal file
|
@ -0,0 +1,146 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"flag"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/nspcc-dev/neofs-api-go/service"
|
||||
state2 "github.com/nspcc-dev/neofs-api-go/state"
|
||||
crypto "github.com/nspcc-dev/neofs-crypto"
|
||||
"github.com/nspcc-dev/neofs-node/lib/fix"
|
||||
"github.com/nspcc-dev/neofs-node/lib/fix/config"
|
||||
"github.com/nspcc-dev/neofs-node/lib/fix/web"
|
||||
"github.com/nspcc-dev/neofs-node/lib/fix/worker"
|
||||
"github.com/nspcc-dev/neofs-node/lib/muxer"
|
||||
"github.com/nspcc-dev/neofs-node/misc"
|
||||
"github.com/nspcc-dev/neofs-node/modules/node"
|
||||
"github.com/nspcc-dev/neofs-node/services/public/state"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/viper"
|
||||
"go.uber.org/dig"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type params struct {
|
||||
dig.In
|
||||
|
||||
Debug web.Profiler `optional:"true"`
|
||||
Metric web.Metrics `optional:"true"`
|
||||
Worker worker.Workers `optional:"true"`
|
||||
Muxer muxer.Mux
|
||||
Logger *zap.Logger
|
||||
}
|
||||
|
||||
var (
|
||||
healthCheck bool
|
||||
configFile string
|
||||
)
|
||||
|
||||
func runner(ctx context.Context, p params) error {
|
||||
// create combined service, that would start/stop all
|
||||
svc := fix.NewServices(p.Debug, p.Metric, p.Muxer, p.Worker)
|
||||
|
||||
p.Logger.Info("start services")
|
||||
svc.Start(ctx)
|
||||
|
||||
<-ctx.Done()
|
||||
|
||||
p.Logger.Info("stop services")
|
||||
svc.Stop()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func check(err error) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: this is a copypaste from node settings constructor
|
||||
func keyFromCfg(v *viper.Viper) (*ecdsa.PrivateKey, error) {
|
||||
switch key := v.GetString("node.private_key"); key {
|
||||
case "":
|
||||
return nil, errors.New("`node.private_key` could not be empty")
|
||||
case "generated":
|
||||
return ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
default:
|
||||
return crypto.LoadPrivateKey(key)
|
||||
}
|
||||
}
|
||||
|
||||
func runHealthCheck() {
|
||||
if !healthCheck {
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
cfg, err := config.NewConfig(config.Params{
|
||||
File: configFile,
|
||||
Prefix: misc.Prefix,
|
||||
Name: misc.NodeName,
|
||||
Version: misc.Version,
|
||||
|
||||
AppDefaults: setDefaults,
|
||||
})
|
||||
check(err)
|
||||
|
||||
addr := cfg.GetString("node.address")
|
||||
|
||||
key, err := keyFromCfg(cfg)
|
||||
if err != nil {
|
||||
check(err)
|
||||
}
|
||||
|
||||
con, err := grpc.DialContext(ctx, addr,
|
||||
// TODO: we must provide grpc.WithInsecure() or set credentials
|
||||
grpc.WithInsecure())
|
||||
check(err)
|
||||
|
||||
req := new(state.HealthRequest)
|
||||
req.SetTTL(service.NonForwardingTTL)
|
||||
if err := service.SignRequestData(key, req); err != nil {
|
||||
check(err)
|
||||
}
|
||||
|
||||
res, err := state2.NewStatusClient(con).
|
||||
HealthCheck(ctx, req)
|
||||
check(errors.Wrapf(err, "address: %q", addr))
|
||||
|
||||
var exitCode int
|
||||
|
||||
if !res.Healthy {
|
||||
exitCode = 2
|
||||
}
|
||||
_, _ = os.Stdout.Write([]byte(res.Status + "\n"))
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.BoolVar(&healthCheck, "health", healthCheck, "run health-check")
|
||||
|
||||
// todo: if configFile is empty, we can check './config.yml' manually
|
||||
flag.StringVar(&configFile, "config", configFile, "use config.yml file")
|
||||
flag.Parse()
|
||||
|
||||
runHealthCheck()
|
||||
|
||||
fix.New(&fix.Settings{
|
||||
File: configFile,
|
||||
Name: misc.NodeName,
|
||||
Prefix: misc.Prefix,
|
||||
Runner: runner,
|
||||
Build: misc.Build,
|
||||
Version: misc.Version,
|
||||
|
||||
AppDefaults: setDefaults,
|
||||
}, node.Module).RunAndCatch()
|
||||
}
|
48
go.mod
Normal file
48
go.mod
Normal file
|
@ -0,0 +1,48 @@
|
|||
module github.com/nspcc-dev/neofs-node
|
||||
|
||||
go 1.14
|
||||
|
||||
require (
|
||||
bou.ke/monkey v1.0.2
|
||||
github.com/cenk/backoff v2.2.1+incompatible // indirect
|
||||
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a // indirect
|
||||
github.com/fasthttp/router v1.0.2
|
||||
github.com/gogo/protobuf v1.3.1
|
||||
github.com/golang/protobuf v1.4.2
|
||||
github.com/google/uuid v1.1.1
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.2.0
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
github.com/mr-tron/base58 v1.1.3
|
||||
github.com/multiformats/go-multiaddr v0.2.0
|
||||
github.com/multiformats/go-multiaddr-net v0.1.2 // v0.1.1 => v0.1.2
|
||||
github.com/multiformats/go-multihash v0.0.13
|
||||
github.com/nspcc-dev/hrw v1.0.9
|
||||
github.com/nspcc-dev/neo-go v0.90.0-pre.0.20200708064050-cf1e5243b90b
|
||||
github.com/nspcc-dev/neofs-api-go v1.2.0
|
||||
github.com/nspcc-dev/neofs-crypto v0.3.0
|
||||
github.com/nspcc-dev/netmap v1.7.0
|
||||
github.com/panjf2000/ants/v2 v2.3.0
|
||||
github.com/peterbourgon/g2s v0.0.0-20170223122336-d4e7ad98afea // indirect
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.6.0
|
||||
github.com/rubyist/circuitbreaker v2.2.1+incompatible
|
||||
github.com/soheilhy/cmux v0.1.4
|
||||
github.com/spaolacci/murmur3 v1.1.0
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/viper v1.7.0
|
||||
github.com/stretchr/testify v1.5.1
|
||||
github.com/valyala/fasthttp v1.9.0
|
||||
go.etcd.io/bbolt v1.3.4
|
||||
go.uber.org/atomic v1.5.1
|
||||
go.uber.org/dig v1.8.0
|
||||
go.uber.org/multierr v1.4.0 // indirect
|
||||
go.uber.org/zap v1.13.0
|
||||
golang.org/x/crypto v0.0.0-20200117160349-530e935923ad // indirect
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f // indirect
|
||||
golang.org/x/tools v0.0.0-20200123022218-593de606220b // indirect
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a
|
||||
google.golang.org/grpc v1.29.1
|
||||
)
|
||||
|
||||
// Used for debug reasons
|
||||
// replace github.com/nspcc-dev/neofs-api-go => ../neofs-api-go
|
BIN
go.sum
Normal file
BIN
go.sum
Normal file
Binary file not shown.
7
internal/error.go
Normal file
7
internal/error.go
Normal file
|
@ -0,0 +1,7 @@
|
|||
package internal
|
||||
|
||||
// Error is a custom error.
|
||||
type Error string
|
||||
|
||||
// Error is an implementation of error interface.
|
||||
func (e Error) Error() string { return string(e) }
|
0
lib/.gitkeep
Normal file
0
lib/.gitkeep
Normal file
94
lib/acl/action.go
Normal file
94
lib/acl/action.go
Normal file
|
@ -0,0 +1,94 @@
|
|||
package acl
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/nspcc-dev/neofs-api-go/acl"
|
||||
)
|
||||
|
||||
// RequestInfo is an interface of request information needed for extended ACL check.
|
||||
type RequestInfo interface {
|
||||
TypedHeaderSource
|
||||
|
||||
// Must return the binary representation of request initiator's key.
|
||||
Key() []byte
|
||||
|
||||
// Must return true if request corresponds to operation type.
|
||||
TypeOf(acl.OperationType) bool
|
||||
|
||||
// Must return true if request has passed target.
|
||||
TargetOf(acl.Target) bool
|
||||
}
|
||||
|
||||
// ExtendedACLChecker is an interface of extended ACL checking tool.
|
||||
type ExtendedACLChecker interface {
|
||||
// Must return an action according to the results of applying the ACL table rules to request.
|
||||
//
|
||||
// Must return ActionUndefined if it is unable to explicitly calculate the action.
|
||||
Action(acl.ExtendedACLTable, RequestInfo) acl.ExtendedACLAction
|
||||
}
|
||||
|
||||
type extendedACLChecker struct{}
|
||||
|
||||
// NewExtendedACLChecker creates a new extended ACL checking tool and returns ExtendedACLChecker interface.
|
||||
func NewExtendedACLChecker() ExtendedACLChecker {
|
||||
return new(extendedACLChecker)
|
||||
}
|
||||
|
||||
// Action returns an action for passed request based on information about it and ACL table.
|
||||
//
|
||||
// Returns action of the first suitable table record, or ActionUndefined in the absence thereof.
|
||||
//
|
||||
// If passed ExtendedACLTable is nil, ActionUndefined returns.
|
||||
// If passed RequestInfo is nil, ActionUndefined returns.
|
||||
func (s extendedACLChecker) Action(table acl.ExtendedACLTable, req RequestInfo) acl.ExtendedACLAction {
|
||||
if table == nil {
|
||||
return acl.ActionUndefined
|
||||
} else if req == nil {
|
||||
return acl.ActionUndefined
|
||||
}
|
||||
|
||||
for _, record := range table.Records() {
|
||||
// check type of operation
|
||||
if !req.TypeOf(record.OperationType()) {
|
||||
continue
|
||||
}
|
||||
|
||||
// check target
|
||||
if !targetMatches(req, record.TargetList()) {
|
||||
continue
|
||||
}
|
||||
|
||||
// check headers
|
||||
switch MatchFilters(req, record.HeaderFilters()) {
|
||||
case mResUndefined:
|
||||
// headers of some type could not be composed => allow
|
||||
return acl.ActionAllow
|
||||
case mResMatch:
|
||||
return record.Action()
|
||||
}
|
||||
}
|
||||
|
||||
return acl.ActionAllow
|
||||
}
|
||||
|
||||
// returns true if one of ExtendedACLTarget has suitable target OR suitable public key.
|
||||
func targetMatches(req RequestInfo, list []acl.ExtendedACLTarget) bool {
|
||||
rKey := req.Key()
|
||||
|
||||
for _, target := range list {
|
||||
// check public key match
|
||||
for _, key := range target.KeyList() {
|
||||
if bytes.Equal(key, rKey) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// check target group match
|
||||
if req.TargetOf(target.Target()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
163
lib/acl/action_test.go
Normal file
163
lib/acl/action_test.go
Normal file
|
@ -0,0 +1,163 @@
|
|||
package acl
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/nspcc-dev/neofs-api-go/acl"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type testExtendedACLTable struct {
|
||||
records []acl.ExtendedACLRecord
|
||||
}
|
||||
|
||||
type testRequestInfo struct {
|
||||
headers []acl.TypedHeader
|
||||
key []byte
|
||||
opType acl.OperationType
|
||||
target acl.Target
|
||||
}
|
||||
|
||||
type testEACLRecord struct {
|
||||
opType acl.OperationType
|
||||
filters []acl.HeaderFilter
|
||||
targets []acl.ExtendedACLTarget
|
||||
action acl.ExtendedACLAction
|
||||
}
|
||||
|
||||
type testEACLTarget struct {
|
||||
target acl.Target
|
||||
keys [][]byte
|
||||
}
|
||||
|
||||
func (s testEACLTarget) Target() acl.Target {
|
||||
return s.target
|
||||
}
|
||||
|
||||
func (s testEACLTarget) KeyList() [][]byte {
|
||||
return s.keys
|
||||
}
|
||||
|
||||
func (s testEACLRecord) OperationType() acl.OperationType {
|
||||
return s.opType
|
||||
}
|
||||
|
||||
func (s testEACLRecord) HeaderFilters() []acl.HeaderFilter {
|
||||
return s.filters
|
||||
}
|
||||
|
||||
func (s testEACLRecord) TargetList() []acl.ExtendedACLTarget {
|
||||
return s.targets
|
||||
}
|
||||
|
||||
func (s testEACLRecord) Action() acl.ExtendedACLAction {
|
||||
return s.action
|
||||
}
|
||||
|
||||
func (s testRequestInfo) HeadersOfType(typ acl.HeaderType) ([]acl.Header, bool) {
|
||||
res := make([]acl.Header, 0, len(s.headers))
|
||||
|
||||
for i := range s.headers {
|
||||
if s.headers[i].HeaderType() == typ {
|
||||
res = append(res, s.headers[i])
|
||||
}
|
||||
}
|
||||
|
||||
return res, true
|
||||
}
|
||||
|
||||
func (s testRequestInfo) Key() []byte {
|
||||
return s.key
|
||||
}
|
||||
|
||||
func (s testRequestInfo) TypeOf(t acl.OperationType) bool {
|
||||
return s.opType == t
|
||||
}
|
||||
|
||||
func (s testRequestInfo) TargetOf(t acl.Target) bool {
|
||||
return s.target == t
|
||||
}
|
||||
|
||||
func (s testExtendedACLTable) Records() []acl.ExtendedACLRecord {
|
||||
return s.records
|
||||
}
|
||||
|
||||
func TestExtendedACLChecker_Action(t *testing.T) {
|
||||
s := NewExtendedACLChecker()
|
||||
|
||||
// nil ExtendedACLTable
|
||||
require.Equal(t, acl.ActionUndefined, s.Action(nil, nil))
|
||||
|
||||
// create test ExtendedACLTable
|
||||
table := new(testExtendedACLTable)
|
||||
|
||||
// nil RequestInfo
|
||||
require.Equal(t, acl.ActionUndefined, s.Action(table, nil))
|
||||
|
||||
// create test RequestInfo
|
||||
req := new(testRequestInfo)
|
||||
|
||||
// create test ExtendedACLRecord
|
||||
record := new(testEACLRecord)
|
||||
table.records = append(table.records, record)
|
||||
|
||||
// set different OperationType
|
||||
record.opType = acl.OperationType(3)
|
||||
req.opType = record.opType + 1
|
||||
|
||||
require.Equal(t, acl.ActionAllow, s.Action(table, req))
|
||||
|
||||
// set equal OperationType
|
||||
req.opType = record.opType
|
||||
|
||||
// create test ExtendedACLTarget through group
|
||||
target := new(testEACLTarget)
|
||||
record.targets = append(record.targets, target)
|
||||
|
||||
// set not matching ExtendedACLTarget
|
||||
target.target = acl.Target(5)
|
||||
req.target = target.target + 1
|
||||
|
||||
require.Equal(t, acl.ActionAllow, s.Action(table, req))
|
||||
|
||||
// set matching ExtendedACLTarget
|
||||
req.target = target.target
|
||||
|
||||
// create test HeaderFilter
|
||||
fHeader := new(testTypedHeader)
|
||||
hFilter := &testHeaderFilter{
|
||||
TypedHeader: fHeader,
|
||||
}
|
||||
record.filters = append(record.filters, hFilter)
|
||||
|
||||
// create test TypedHeader
|
||||
header := new(testTypedHeader)
|
||||
req.headers = append(req.headers, header)
|
||||
|
||||
// set not matching values
|
||||
header.t = hFilter.HeaderType() + 1
|
||||
|
||||
require.Equal(t, acl.ActionAllow, s.Action(table, req))
|
||||
|
||||
// set matching values
|
||||
header.k = "key"
|
||||
header.v = "value"
|
||||
|
||||
fHeader.t = header.HeaderType()
|
||||
fHeader.k = header.Name()
|
||||
fHeader.v = header.Value()
|
||||
|
||||
hFilter.t = acl.StringEqual
|
||||
|
||||
// set ExtendedACLAction
|
||||
record.action = acl.ExtendedACLAction(7)
|
||||
|
||||
require.Equal(t, record.action, s.Action(table, req))
|
||||
|
||||
// set matching ExtendedACLTarget through key
|
||||
target.target = req.target + 1
|
||||
req.key = []byte{1, 2, 3}
|
||||
target.keys = append(target.keys, req.key)
|
||||
|
||||
require.Equal(t, record.action, s.Action(table, req))
|
||||
}
|
179
lib/acl/basic.go
Normal file
179
lib/acl/basic.go
Normal file
|
@ -0,0 +1,179 @@
|
|||
package acl
|
||||
|
||||
import (
|
||||
"github.com/nspcc-dev/neofs-api-go/acl"
|
||||
"github.com/nspcc-dev/neofs-api-go/object"
|
||||
"github.com/nspcc-dev/neofs-node/internal"
|
||||
)
|
||||
|
||||
type (
|
||||
// BasicChecker is an interface of the basic ACL control tool.
|
||||
BasicChecker interface {
|
||||
// Action returns true if request is allowed for this target.
|
||||
Action(uint32, object.RequestType, acl.Target) (bool, error)
|
||||
|
||||
// Bearer returns true if bearer token is allowed for this request.
|
||||
Bearer(uint32, object.RequestType) (bool, error)
|
||||
|
||||
// Extended returns true if extended ACL is allowed for this.
|
||||
Extended(uint32) bool
|
||||
|
||||
// Sticky returns true if sticky bit is set.
|
||||
Sticky(uint32) bool
|
||||
}
|
||||
|
||||
// BasicACLChecker performs basic ACL check.
|
||||
BasicACLChecker struct{}
|
||||
|
||||
// MaskedBasicACLChecker performs all basic ACL checks, but applying
|
||||
// mask on ACL first. It is useful, when some bits must be always
|
||||
// set or unset.
|
||||
MaskedBasicACLChecker struct {
|
||||
BasicACLChecker
|
||||
|
||||
andMask uint32
|
||||
orMask uint32
|
||||
}
|
||||
|
||||
nibble struct {
|
||||
value uint32
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
errUnknownRequest = internal.Error("unknown request type")
|
||||
errUnknownTarget = internal.Error("unknown target type")
|
||||
)
|
||||
|
||||
const (
|
||||
aclFinalBit = 0x10000000 // 29th bit
|
||||
aclStickyBit = 0x20000000 // 30th bit
|
||||
|
||||
nibbleBBit = 0x1
|
||||
nibbleOBit = 0x2
|
||||
nibbleSBit = 0x4
|
||||
nibbleUBit = 0x8
|
||||
|
||||
// DefaultAndFilter is a default AND mask of basic ACL value of container.
|
||||
DefaultAndFilter = 0xFFFFFFFF
|
||||
)
|
||||
|
||||
var (
|
||||
nibbleOffset = map[object.RequestType]uint32{
|
||||
object.RequestGet: 0,
|
||||
object.RequestHead: 1 * 4,
|
||||
object.RequestPut: 2 * 4,
|
||||
object.RequestDelete: 3 * 4,
|
||||
object.RequestSearch: 4 * 4,
|
||||
object.RequestRange: 5 * 4,
|
||||
object.RequestRangeHash: 6 * 4,
|
||||
}
|
||||
)
|
||||
|
||||
// Action returns true if request is allowed for target.
|
||||
func (c *BasicACLChecker) Action(rule uint32, req object.RequestType, t acl.Target) (bool, error) {
|
||||
n, err := fetchNibble(rule, req)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
switch t {
|
||||
case acl.Target_User:
|
||||
return n.U(), nil
|
||||
case acl.Target_System:
|
||||
return n.S(), nil
|
||||
case acl.Target_Others:
|
||||
return n.O(), nil
|
||||
default:
|
||||
return false, errUnknownTarget
|
||||
}
|
||||
}
|
||||
|
||||
// Bearer returns true if bearer token is allowed to use for this request
|
||||
// as source of extended ACL.
|
||||
func (c *BasicACLChecker) Bearer(rule uint32, req object.RequestType) (bool, error) {
|
||||
n, err := fetchNibble(rule, req)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return n.B(), nil
|
||||
}
|
||||
|
||||
// Extended returns true if extended ACL stored in the container are allowed
|
||||
// to use.
|
||||
func (c *BasicACLChecker) Extended(rule uint32) bool {
|
||||
return rule&aclFinalBit != aclFinalBit
|
||||
}
|
||||
|
||||
// Sticky returns true if container is not allowed to store objects with
|
||||
// owners different from request owner.
|
||||
func (c *BasicACLChecker) Sticky(rule uint32) bool {
|
||||
return rule&aclStickyBit == aclStickyBit
|
||||
}
|
||||
|
||||
func fetchNibble(rule uint32, req object.RequestType) (*nibble, error) {
|
||||
offset, ok := nibbleOffset[req]
|
||||
if !ok {
|
||||
return nil, errUnknownRequest
|
||||
}
|
||||
|
||||
return &nibble{value: (rule >> offset) & 0xf}, nil
|
||||
}
|
||||
|
||||
// B returns true if `Bearer` bit set in the nibble.
|
||||
func (n *nibble) B() bool { return n.value&nibbleBBit == nibbleBBit }
|
||||
|
||||
// O returns true if `Others` bit set in the nibble.
|
||||
func (n *nibble) O() bool { return n.value&nibbleOBit == nibbleOBit }
|
||||
|
||||
// S returns true if `System` bit set in the nibble.
|
||||
func (n *nibble) S() bool { return n.value&nibbleSBit == nibbleSBit }
|
||||
|
||||
// U returns true if `User` bit set in the nibble.
|
||||
func (n *nibble) U() bool { return n.value&nibbleUBit == nibbleUBit }
|
||||
|
||||
// NewMaskedBasicACLChecker returns BasicChecker that applies predefined
|
||||
// bit mask on basic ACL value.
|
||||
func NewMaskedBasicACLChecker(or, and uint32) BasicChecker {
|
||||
return MaskedBasicACLChecker{
|
||||
BasicACLChecker: BasicACLChecker{},
|
||||
andMask: and,
|
||||
orMask: or,
|
||||
}
|
||||
}
|
||||
|
||||
// Action returns true if request is allowed for target.
|
||||
func (c MaskedBasicACLChecker) Action(rule uint32, req object.RequestType, t acl.Target) (bool, error) {
|
||||
rule |= c.orMask
|
||||
rule &= c.andMask
|
||||
|
||||
return c.BasicACLChecker.Action(rule, req, t)
|
||||
}
|
||||
|
||||
// Bearer returns true if bearer token is allowed to use for this request
|
||||
// as source of extended ACL.
|
||||
func (c MaskedBasicACLChecker) Bearer(rule uint32, req object.RequestType) (bool, error) {
|
||||
rule |= c.orMask
|
||||
rule &= c.andMask
|
||||
|
||||
return c.BasicACLChecker.Bearer(rule, req)
|
||||
}
|
||||
|
||||
// Extended returns true if extended ACL stored in the container are allowed
|
||||
// to use.
|
||||
func (c MaskedBasicACLChecker) Extended(rule uint32) bool {
|
||||
rule |= c.orMask
|
||||
rule &= c.andMask
|
||||
|
||||
return c.BasicACLChecker.Extended(rule)
|
||||
}
|
||||
|
||||
// Sticky returns true if container is not allowed to store objects with
|
||||
// owners different from request owner.
|
||||
func (c MaskedBasicACLChecker) Sticky(rule uint32) bool {
|
||||
rule |= c.orMask
|
||||
rule &= c.andMask
|
||||
|
||||
return c.BasicACLChecker.Sticky(rule)
|
||||
}
|
116
lib/acl/basic_test.go
Normal file
116
lib/acl/basic_test.go
Normal file
|
@ -0,0 +1,116 @@
|
|||
package acl
|
||||
|
||||
import (
|
||||
"math/bits"
|
||||
"testing"
|
||||
|
||||
"github.com/nspcc-dev/neofs-api-go/acl"
|
||||
"github.com/nspcc-dev/neofs-api-go/object"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBasicACLChecker(t *testing.T) {
|
||||
reqs := []object.RequestType{
|
||||
object.RequestGet,
|
||||
object.RequestHead,
|
||||
object.RequestPut,
|
||||
object.RequestDelete,
|
||||
object.RequestSearch,
|
||||
object.RequestRange,
|
||||
object.RequestRangeHash,
|
||||
}
|
||||
|
||||
targets := []acl.Target{
|
||||
acl.Target_Others,
|
||||
acl.Target_System,
|
||||
acl.Target_User,
|
||||
}
|
||||
|
||||
checker := new(BasicACLChecker)
|
||||
|
||||
t.Run("verb permissions", func(t *testing.T) {
|
||||
mask := uint32(1)
|
||||
|
||||
for i := range reqs {
|
||||
res, err := checker.Bearer(mask, reqs[i])
|
||||
require.NoError(t, err)
|
||||
require.True(t, res)
|
||||
|
||||
mask = bits.Reverse32(mask)
|
||||
res, err = checker.Bearer(mask, reqs[i])
|
||||
require.NoError(t, err)
|
||||
require.False(t, res)
|
||||
|
||||
mask = bits.Reverse32(mask)
|
||||
|
||||
for j := range targets {
|
||||
mask <<= 1
|
||||
res, err = checker.Action(mask, reqs[i], targets[j])
|
||||
require.NoError(t, err)
|
||||
require.True(t, res)
|
||||
|
||||
mask = bits.Reverse32(mask)
|
||||
res, err = checker.Action(mask, reqs[i], targets[j])
|
||||
require.NoError(t, err)
|
||||
require.False(t, res)
|
||||
|
||||
mask = bits.Reverse32(mask)
|
||||
}
|
||||
mask <<= 1
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("unknown verb", func(t *testing.T) {
|
||||
mask := uint32(1)
|
||||
_, err := checker.Bearer(mask, -1)
|
||||
require.Error(t, err)
|
||||
|
||||
mask = 2
|
||||
_, err = checker.Action(mask, -1, acl.Target_Others)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("unknown action", func(t *testing.T) {
|
||||
mask := uint32(2)
|
||||
_, err := checker.Action(mask, object.RequestGet, -1)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("extended acl permission", func(t *testing.T) {
|
||||
// set F-bit
|
||||
mask := uint32(0) | aclFinalBit
|
||||
require.False(t, checker.Extended(mask))
|
||||
|
||||
// unset F-bit
|
||||
mask = bits.Reverse32(mask)
|
||||
require.True(t, checker.Extended(mask))
|
||||
})
|
||||
|
||||
t.Run("sticky bit permission", func(t *testing.T) {
|
||||
mask := uint32(0x20000000)
|
||||
require.True(t, checker.Sticky(mask))
|
||||
|
||||
mask = bits.Reverse32(mask)
|
||||
require.False(t, checker.Sticky(mask))
|
||||
})
|
||||
}
|
||||
|
||||
// todo: add tests like in basic acl checker
|
||||
func TestNeoFSMaskedBasicACLChecker(t *testing.T) {
|
||||
const orFilter = 0x04040444 // this OR filter will be used in neofs-node
|
||||
checker := NewMaskedBasicACLChecker(orFilter, DefaultAndFilter)
|
||||
|
||||
reqs := []object.RequestType{
|
||||
object.RequestGet,
|
||||
object.RequestHead,
|
||||
object.RequestPut,
|
||||
object.RequestSearch,
|
||||
object.RequestRangeHash,
|
||||
}
|
||||
|
||||
for i := range reqs {
|
||||
res, err := checker.Action(0, reqs[i], acl.Target_System)
|
||||
require.NoError(t, err)
|
||||
require.True(t, res)
|
||||
}
|
||||
}
|
129
lib/acl/binary.go
Normal file
129
lib/acl/binary.go
Normal file
|
@ -0,0 +1,129 @@
|
|||
package acl
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
|
||||
"github.com/nspcc-dev/neofs-api-go/refs"
|
||||
"github.com/nspcc-dev/neofs-node/internal"
|
||||
)
|
||||
|
||||
// BinaryEACLKey is a binary EACL storage key.
|
||||
type BinaryEACLKey struct {
|
||||
cid refs.CID
|
||||
}
|
||||
|
||||
// BinaryEACLValue is a binary EACL storage value.
|
||||
type BinaryEACLValue struct {
|
||||
eacl []byte
|
||||
|
||||
sig []byte
|
||||
}
|
||||
|
||||
// BinaryExtendedACLSource is an interface of storage of binary extended ACL tables with read access.
|
||||
type BinaryExtendedACLSource interface {
|
||||
// Must return binary extended ACL table by key.
|
||||
GetBinaryEACL(context.Context, BinaryEACLKey) (BinaryEACLValue, error)
|
||||
}
|
||||
|
||||
// BinaryExtendedACLStore is an interface of storage of binary extended ACL tables.
|
||||
type BinaryExtendedACLStore interface {
|
||||
BinaryExtendedACLSource
|
||||
|
||||
// Must store binary extended ACL table for key.
|
||||
PutBinaryEACL(context.Context, BinaryEACLKey, BinaryEACLValue) error
|
||||
}
|
||||
|
||||
// ErrNilBinaryExtendedACLStore is returned by function that expect a non-nil
|
||||
// BinaryExtendedACLStore, but received nil.
|
||||
const ErrNilBinaryExtendedACLStore = internal.Error("binary extended ACL store is nil")
|
||||
|
||||
const sliceLenSize = 4
|
||||
|
||||
var eaclEndianness = binary.BigEndian
|
||||
|
||||
// CID is a container ID getter.
|
||||
func (s BinaryEACLKey) CID() refs.CID {
|
||||
return s.cid
|
||||
}
|
||||
|
||||
// SetCID is a container ID setter.
|
||||
func (s *BinaryEACLKey) SetCID(v refs.CID) {
|
||||
s.cid = v
|
||||
}
|
||||
|
||||
// EACL is a binary extended ACL table getter.
|
||||
func (s BinaryEACLValue) EACL() []byte {
|
||||
return s.eacl
|
||||
}
|
||||
|
||||
// SetEACL is a binary extended ACL table setter.
|
||||
func (s *BinaryEACLValue) SetEACL(v []byte) {
|
||||
s.eacl = v
|
||||
}
|
||||
|
||||
// Signature is an EACL signature getter.
|
||||
func (s BinaryEACLValue) Signature() []byte {
|
||||
return s.sig
|
||||
}
|
||||
|
||||
// SetSignature is an EACL signature setter.
|
||||
func (s *BinaryEACLValue) SetSignature(v []byte) {
|
||||
s.sig = v
|
||||
}
|
||||
|
||||
// MarshalBinary returns a binary representation of BinaryEACLValue.
|
||||
func (s BinaryEACLValue) MarshalBinary() ([]byte, error) {
|
||||
data := make([]byte, sliceLenSize+len(s.eacl)+sliceLenSize+len(s.sig))
|
||||
|
||||
off := 0
|
||||
|
||||
eaclEndianness.PutUint32(data[off:], uint32(len(s.eacl)))
|
||||
off += sliceLenSize
|
||||
|
||||
off += copy(data[off:], s.eacl)
|
||||
|
||||
eaclEndianness.PutUint32(data[off:], uint32(len(s.sig)))
|
||||
off += sliceLenSize
|
||||
|
||||
copy(data[off:], s.sig)
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary unmarshals BinaryEACLValue from bytes.
|
||||
func (s *BinaryEACLValue) UnmarshalBinary(data []byte) (err error) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
off := 0
|
||||
|
||||
if len(data[off:]) < sliceLenSize {
|
||||
return
|
||||
}
|
||||
|
||||
aclLn := eaclEndianness.Uint32(data[off:])
|
||||
off += 4
|
||||
|
||||
if uint32(len(data[off:])) < aclLn {
|
||||
return
|
||||
}
|
||||
|
||||
s.eacl = make([]byte, aclLn)
|
||||
off += copy(s.eacl, data[off:])
|
||||
|
||||
if len(data[off:]) < sliceLenSize {
|
||||
return
|
||||
}
|
||||
|
||||
sigLn := eaclEndianness.Uint32(data[off:])
|
||||
off += 4
|
||||
|
||||
if uint32(len(data[off:])) < sigLn {
|
||||
return
|
||||
}
|
||||
|
||||
s.sig = make([]byte, sigLn)
|
||||
copy(s.sig, data[off:])
|
||||
|
||||
return nil
|
||||
}
|
27
lib/acl/binary_test.go
Normal file
27
lib/acl/binary_test.go
Normal file
|
@ -0,0 +1,27 @@
|
|||
package acl
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBinaryEACLValue(t *testing.T) {
|
||||
s := BinaryEACLValue{}
|
||||
|
||||
eacl := []byte{1, 2, 3}
|
||||
s.SetEACL(eacl)
|
||||
require.Equal(t, eacl, s.EACL())
|
||||
|
||||
sig := []byte{4, 5, 6}
|
||||
s.SetSignature(sig)
|
||||
require.Equal(t, sig, s.Signature())
|
||||
|
||||
data, err := s.MarshalBinary()
|
||||
require.NoError(t, err)
|
||||
|
||||
s2 := BinaryEACLValue{}
|
||||
require.NoError(t, s2.UnmarshalBinary(data))
|
||||
|
||||
require.Equal(t, s, s2)
|
||||
}
|
29
lib/acl/extended.go
Normal file
29
lib/acl/extended.go
Normal file
|
@ -0,0 +1,29 @@
|
|||
package acl
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/nspcc-dev/neofs-api-go/acl"
|
||||
"github.com/nspcc-dev/neofs-api-go/refs"
|
||||
)
|
||||
|
||||
// TypedHeaderSource is a various types of header set interface.
|
||||
type TypedHeaderSource interface {
|
||||
// Must return list of Header of particular type.
|
||||
// Must return false if there is no ability to compose header list.
|
||||
HeadersOfType(acl.HeaderType) ([]acl.Header, bool)
|
||||
}
|
||||
|
||||
// ExtendedACLSource is an interface of storage of extended ACL tables with read access.
|
||||
type ExtendedACLSource interface {
|
||||
// Must return extended ACL table by container ID key.
|
||||
GetExtendedACLTable(context.Context, refs.CID) (acl.ExtendedACLTable, error)
|
||||
}
|
||||
|
||||
// ExtendedACLStore is an interface of storage of extended ACL tables.
|
||||
type ExtendedACLStore interface {
|
||||
ExtendedACLSource
|
||||
|
||||
// Must store extended ACL table for container ID key.
|
||||
PutExtendedACLTable(context.Context, refs.CID, acl.ExtendedACLTable) error
|
||||
}
|
234
lib/acl/header.go
Normal file
234
lib/acl/header.go
Normal file
|
@ -0,0 +1,234 @@
|
|||
package acl
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/nspcc-dev/neofs-api-go/acl"
|
||||
"github.com/nspcc-dev/neofs-api-go/object"
|
||||
"github.com/nspcc-dev/neofs-api-go/service"
|
||||
)
|
||||
|
||||
type objectHeaderSource struct {
|
||||
obj *object.Object
|
||||
}
|
||||
|
||||
type typedHeader struct {
|
||||
n string
|
||||
v string
|
||||
t acl.HeaderType
|
||||
}
|
||||
|
||||
type extendedHeadersWrapper struct {
|
||||
hdrSrc service.ExtendedHeadersSource
|
||||
}
|
||||
|
||||
type typedExtendedHeader struct {
|
||||
hdr service.ExtendedHeader
|
||||
}
|
||||
|
||||
func newTypedObjSysHdr(name, value string) acl.TypedHeader {
|
||||
return &typedHeader{
|
||||
n: name,
|
||||
v: value,
|
||||
t: acl.HdrTypeObjSys,
|
||||
}
|
||||
}
|
||||
|
||||
// Name is a name field getter.
|
||||
func (s typedHeader) Name() string {
|
||||
return s.n
|
||||
}
|
||||
|
||||
// Value is a value field getter.
|
||||
func (s typedHeader) Value() string {
|
||||
return s.v
|
||||
}
|
||||
|
||||
// HeaderType is a type field getter.
|
||||
func (s typedHeader) HeaderType() acl.HeaderType {
|
||||
return s.t
|
||||
}
|
||||
|
||||
// TypedHeaderSourceFromObject wraps passed object and returns TypedHeaderSource interface.
|
||||
func TypedHeaderSourceFromObject(obj *object.Object) TypedHeaderSource {
|
||||
return &objectHeaderSource{
|
||||
obj: obj,
|
||||
}
|
||||
}
|
||||
|
||||
// HeaderOfType gathers object headers of passed type and returns Header list.
|
||||
//
|
||||
// If value of some header can not be calculated (e.g. nil extended header), it does not appear in list.
|
||||
//
|
||||
// Always returns true.
|
||||
func (s objectHeaderSource) HeadersOfType(typ acl.HeaderType) ([]acl.Header, bool) {
|
||||
if s.obj == nil {
|
||||
return nil, true
|
||||
}
|
||||
|
||||
var res []acl.Header
|
||||
|
||||
switch typ {
|
||||
case acl.HdrTypeObjUsr:
|
||||
objHeaders := s.obj.GetHeaders()
|
||||
|
||||
res = make([]acl.Header, 0, len(objHeaders)) // 7 system header fields
|
||||
|
||||
for i := range objHeaders {
|
||||
if h := newTypedObjectExtendedHeader(objHeaders[i]); h != nil {
|
||||
res = append(res, h)
|
||||
}
|
||||
}
|
||||
case acl.HdrTypeObjSys:
|
||||
res = make([]acl.Header, 0, 7)
|
||||
|
||||
sysHdr := s.obj.GetSystemHeader()
|
||||
|
||||
created := sysHdr.GetCreatedAt()
|
||||
|
||||
res = append(res,
|
||||
// ID
|
||||
newTypedObjSysHdr(
|
||||
acl.HdrObjSysNameID,
|
||||
sysHdr.ID.String(),
|
||||
),
|
||||
|
||||
// CID
|
||||
newTypedObjSysHdr(
|
||||
acl.HdrObjSysNameCID,
|
||||
sysHdr.CID.String(),
|
||||
),
|
||||
|
||||
// OwnerID
|
||||
newTypedObjSysHdr(
|
||||
acl.HdrObjSysNameOwnerID,
|
||||
sysHdr.OwnerID.String(),
|
||||
),
|
||||
|
||||
// Version
|
||||
newTypedObjSysHdr(
|
||||
acl.HdrObjSysNameVersion,
|
||||
strconv.FormatUint(sysHdr.GetVersion(), 10),
|
||||
),
|
||||
|
||||
// PayloadLength
|
||||
newTypedObjSysHdr(
|
||||
acl.HdrObjSysNamePayloadLength,
|
||||
strconv.FormatUint(sysHdr.GetPayloadLength(), 10),
|
||||
),
|
||||
|
||||
// CreatedAt.UnitTime
|
||||
newTypedObjSysHdr(
|
||||
acl.HdrObjSysNameCreatedUnix,
|
||||
strconv.FormatUint(uint64(created.GetUnixTime()), 10),
|
||||
),
|
||||
|
||||
// CreatedAt.Epoch
|
||||
newTypedObjSysHdr(
|
||||
acl.HdrObjSysNameCreatedEpoch,
|
||||
strconv.FormatUint(created.GetEpoch(), 10),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
return res, true
|
||||
}
|
||||
|
||||
func newTypedObjectExtendedHeader(h object.Header) acl.TypedHeader {
|
||||
val := h.GetValue()
|
||||
if val == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
res := new(typedHeader)
|
||||
res.t = acl.HdrTypeObjSys
|
||||
|
||||
switch hdr := val.(type) {
|
||||
case *object.Header_UserHeader:
|
||||
if hdr.UserHeader == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
res.t = acl.HdrTypeObjUsr
|
||||
res.n = hdr.UserHeader.GetKey()
|
||||
res.v = hdr.UserHeader.GetValue()
|
||||
case *object.Header_Link:
|
||||
if hdr.Link == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch hdr.Link.GetType() {
|
||||
case object.Link_Previous:
|
||||
res.n = acl.HdrObjSysLinkPrev
|
||||
case object.Link_Next:
|
||||
res.n = acl.HdrObjSysLinkNext
|
||||
case object.Link_Child:
|
||||
res.n = acl.HdrObjSysLinkChild
|
||||
case object.Link_Parent:
|
||||
res.n = acl.HdrObjSysLinkPar
|
||||
case object.Link_StorageGroup:
|
||||
res.n = acl.HdrObjSysLinkSG
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
res.v = hdr.Link.ID.String()
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// TypedHeaderSourceFromExtendedHeaders wraps passed ExtendedHeadersSource and returns TypedHeaderSource interface.
|
||||
func TypedHeaderSourceFromExtendedHeaders(hdrSrc service.ExtendedHeadersSource) TypedHeaderSource {
|
||||
return &extendedHeadersWrapper{
|
||||
hdrSrc: hdrSrc,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the result of Key method.
|
||||
func (s typedExtendedHeader) Name() string {
|
||||
return s.hdr.Key()
|
||||
}
|
||||
|
||||
// Value returns the result of Value method.
|
||||
func (s typedExtendedHeader) Value() string {
|
||||
return s.hdr.Value()
|
||||
}
|
||||
|
||||
// HeaderType always returns HdrTypeRequest.
|
||||
func (s typedExtendedHeader) HeaderType() acl.HeaderType {
|
||||
return acl.HdrTypeRequest
|
||||
}
|
||||
|
||||
// TypedHeaders gathers extended request headers and returns TypedHeader list.
|
||||
//
|
||||
// Nil headers are ignored.
|
||||
//
|
||||
// Always returns true.
|
||||
func (s extendedHeadersWrapper) HeadersOfType(typ acl.HeaderType) ([]acl.Header, bool) {
|
||||
if s.hdrSrc == nil {
|
||||
return nil, true
|
||||
}
|
||||
|
||||
var res []acl.Header
|
||||
|
||||
if typ == acl.HdrTypeRequest {
|
||||
hs := s.hdrSrc.ExtendedHeaders()
|
||||
|
||||
res = make([]acl.Header, 0, len(hs))
|
||||
|
||||
for i := range hs {
|
||||
if hs[i] == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
res = append(res, &typedExtendedHeader{
|
||||
hdr: hs[i],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return res, true
|
||||
}
|
60
lib/acl/headers_test.go
Normal file
60
lib/acl/headers_test.go
Normal file
|
@ -0,0 +1,60 @@
|
|||
package acl
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/nspcc-dev/neofs-api-go/acl"
|
||||
"github.com/nspcc-dev/neofs-api-go/object"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewTypedObjectExtendedHeader(t *testing.T) {
|
||||
var res acl.TypedHeader
|
||||
|
||||
hdr := object.Header{}
|
||||
|
||||
// nil value
|
||||
require.Nil(t, newTypedObjectExtendedHeader(hdr))
|
||||
|
||||
// UserHeader
|
||||
{
|
||||
key := "key"
|
||||
val := "val"
|
||||
hdr.Value = &object.Header_UserHeader{
|
||||
UserHeader: &object.UserHeader{
|
||||
Key: key,
|
||||
Value: val,
|
||||
},
|
||||
}
|
||||
|
||||
res = newTypedObjectExtendedHeader(hdr)
|
||||
require.Equal(t, acl.HdrTypeObjUsr, res.HeaderType())
|
||||
require.Equal(t, key, res.Name())
|
||||
require.Equal(t, val, res.Value())
|
||||
}
|
||||
|
||||
{ // Link
|
||||
link := new(object.Link)
|
||||
link.ID = object.ID{1, 2, 3}
|
||||
|
||||
hdr.Value = &object.Header_Link{
|
||||
Link: link,
|
||||
}
|
||||
|
||||
check := func(lt object.Link_Type, name string) {
|
||||
link.Type = lt
|
||||
|
||||
res = newTypedObjectExtendedHeader(hdr)
|
||||
|
||||
require.Equal(t, acl.HdrTypeObjSys, res.HeaderType())
|
||||
require.Equal(t, name, res.Name())
|
||||
require.Equal(t, link.ID.String(), res.Value())
|
||||
}
|
||||
|
||||
check(object.Link_Previous, acl.HdrObjSysLinkPrev)
|
||||
check(object.Link_Next, acl.HdrObjSysLinkNext)
|
||||
check(object.Link_Parent, acl.HdrObjSysLinkPar)
|
||||
check(object.Link_Child, acl.HdrObjSysLinkChild)
|
||||
check(object.Link_StorageGroup, acl.HdrObjSysLinkSG)
|
||||
}
|
||||
}
|
94
lib/acl/match.go
Normal file
94
lib/acl/match.go
Normal file
|
@ -0,0 +1,94 @@
|
|||
package acl
|
||||
|
||||
import (
|
||||
"github.com/nspcc-dev/neofs-api-go/acl"
|
||||
)
|
||||
|
||||
// Maps MatchType to corresponding function.
|
||||
// 1st argument of function - header value, 2nd - header filter.
|
||||
var mMatchFns = map[acl.MatchType]func(acl.Header, acl.Header) bool{
|
||||
acl.StringEqual: stringEqual,
|
||||
|
||||
acl.StringNotEqual: stringNotEqual,
|
||||
}
|
||||
|
||||
const (
|
||||
mResUndefined = iota
|
||||
mResMatch
|
||||
mResMismatch
|
||||
)
|
||||
|
||||
// MatchFilters checks if passed source carry at least one header that satisfies passed filters.
|
||||
//
|
||||
// Nil header does not satisfy any filter. Any header does not satisfy nil filter.
|
||||
//
|
||||
// Returns mResMismatch if passed TypedHeaderSource is nil.
|
||||
// Returns mResMatch if passed filters are empty.
|
||||
//
|
||||
// If headers for some of the HeaderType could not be composed, mResUndefined returns.
|
||||
func MatchFilters(src TypedHeaderSource, filters []acl.HeaderFilter) int {
|
||||
if src == nil {
|
||||
return mResMismatch
|
||||
} else if len(filters) == 0 {
|
||||
return mResMatch
|
||||
}
|
||||
|
||||
matched := 0
|
||||
|
||||
for _, filter := range filters {
|
||||
// prevent NPE
|
||||
if filter == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
headers, ok := src.HeadersOfType(filter.HeaderType())
|
||||
if !ok {
|
||||
return mResUndefined
|
||||
}
|
||||
|
||||
// get headers of filtering type
|
||||
for _, header := range headers {
|
||||
// prevent NPE
|
||||
if header == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// check header name
|
||||
if header.Name() != filter.Name() {
|
||||
continue
|
||||
}
|
||||
|
||||
// get match function
|
||||
matchFn, ok := mMatchFns[filter.MatchType()]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// check match
|
||||
if !matchFn(header, filter) {
|
||||
continue
|
||||
}
|
||||
|
||||
// increment match counter
|
||||
matched++
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
res := mResMismatch
|
||||
|
||||
if matched >= len(filters) {
|
||||
res = mResMatch
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func stringEqual(header, filter acl.Header) bool {
|
||||
return header.Value() == filter.Value()
|
||||
}
|
||||
|
||||
func stringNotEqual(header, filter acl.Header) bool {
|
||||
return header.Value() != filter.Value()
|
||||
}
|
192
lib/acl/match_test.go
Normal file
192
lib/acl/match_test.go
Normal file
|
@ -0,0 +1,192 @@
|
|||
package acl
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/nspcc-dev/neofs-api-go/acl"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type testTypedHeader struct {
|
||||
t acl.HeaderType
|
||||
k string
|
||||
v string
|
||||
}
|
||||
|
||||
type testHeaderSrc struct {
|
||||
hs []acl.TypedHeader
|
||||
}
|
||||
|
||||
type testHeaderFilter struct {
|
||||
acl.TypedHeader
|
||||
t acl.MatchType
|
||||
}
|
||||
|
||||
func (s testHeaderFilter) MatchType() acl.MatchType {
|
||||
return s.t
|
||||
}
|
||||
|
||||
func (s testHeaderSrc) HeadersOfType(typ acl.HeaderType) ([]acl.Header, bool) {
|
||||
res := make([]acl.Header, 0, len(s.hs))
|
||||
|
||||
for i := range s.hs {
|
||||
if s.hs[i].HeaderType() == typ {
|
||||
res = append(res, s.hs[i])
|
||||
}
|
||||
}
|
||||
|
||||
return res, true
|
||||
}
|
||||
|
||||
func (s testTypedHeader) Name() string {
|
||||
return s.k
|
||||
}
|
||||
|
||||
func (s testTypedHeader) Value() string {
|
||||
return s.v
|
||||
}
|
||||
|
||||
func (s testTypedHeader) HeaderType() acl.HeaderType {
|
||||
return s.t
|
||||
}
|
||||
|
||||
func TestMatchFilters(t *testing.T) {
|
||||
// nil TypedHeaderSource
|
||||
require.Equal(t, mResMismatch, MatchFilters(nil, nil))
|
||||
|
||||
// empty HeaderFilter list
|
||||
require.Equal(t, mResMatch, MatchFilters(new(testHeaderSrc), nil))
|
||||
|
||||
k := "key"
|
||||
v := "value"
|
||||
ht := acl.HeaderType(1)
|
||||
|
||||
items := []struct {
|
||||
// list of Key-Value-HeaderType for headers construction
|
||||
hs []interface{}
|
||||
// list of Key-Value-HeaderType-MatchType for filters construction
|
||||
fs []interface{}
|
||||
exp int
|
||||
}{
|
||||
{ // different HeaderType
|
||||
hs: []interface{}{
|
||||
k, v, ht,
|
||||
},
|
||||
fs: []interface{}{
|
||||
k, v, ht + 1, acl.StringEqual,
|
||||
},
|
||||
exp: mResMismatch,
|
||||
},
|
||||
{ // different keys
|
||||
hs: []interface{}{
|
||||
k, v, ht,
|
||||
},
|
||||
fs: []interface{}{
|
||||
k + "1", v, ht, acl.StringEqual,
|
||||
},
|
||||
exp: mResMismatch,
|
||||
},
|
||||
{ // equal values, StringEqual
|
||||
hs: []interface{}{
|
||||
k, v, ht,
|
||||
},
|
||||
fs: []interface{}{
|
||||
k, v, ht, acl.StringEqual,
|
||||
},
|
||||
exp: mResMatch,
|
||||
},
|
||||
{ // equal values, StringNotEqual
|
||||
hs: []interface{}{
|
||||
k, v, ht,
|
||||
},
|
||||
fs: []interface{}{
|
||||
k, v, ht, acl.StringNotEqual,
|
||||
},
|
||||
exp: mResMismatch,
|
||||
},
|
||||
{ // not equal values, StringEqual
|
||||
hs: []interface{}{
|
||||
k, v, ht,
|
||||
},
|
||||
fs: []interface{}{
|
||||
k, v + "1", ht, acl.StringEqual,
|
||||
},
|
||||
exp: mResMismatch,
|
||||
},
|
||||
{ // not equal values, StringNotEqual
|
||||
hs: []interface{}{
|
||||
k, v, ht,
|
||||
},
|
||||
fs: []interface{}{
|
||||
k, v + "1", ht, acl.StringNotEqual,
|
||||
},
|
||||
exp: mResMatch,
|
||||
},
|
||||
{ // one header, two filters
|
||||
hs: []interface{}{
|
||||
k, v, ht,
|
||||
},
|
||||
fs: []interface{}{
|
||||
k, v + "1", ht, acl.StringNotEqual,
|
||||
k, v, ht, acl.StringEqual,
|
||||
},
|
||||
exp: mResMatch,
|
||||
},
|
||||
{ // two headers, one filter
|
||||
hs: []interface{}{
|
||||
k, v + "1", ht,
|
||||
k, v, ht,
|
||||
},
|
||||
fs: []interface{}{
|
||||
k, v, ht, acl.StringEqual,
|
||||
},
|
||||
exp: mResMatch,
|
||||
},
|
||||
{
|
||||
hs: []interface{}{
|
||||
k, v + "1", acl.HdrTypeRequest,
|
||||
k, v, acl.HdrTypeObjUsr,
|
||||
},
|
||||
fs: []interface{}{
|
||||
k, v, acl.HdrTypeRequest, acl.StringNotEqual,
|
||||
k, v, acl.HdrTypeObjUsr, acl.StringEqual,
|
||||
},
|
||||
exp: mResMatch,
|
||||
},
|
||||
}
|
||||
|
||||
for _, item := range items {
|
||||
headers := make([]acl.TypedHeader, 0)
|
||||
|
||||
for i := 0; i < len(item.hs); i += 3 {
|
||||
headers = append(headers, &testTypedHeader{
|
||||
t: item.hs[i+2].(acl.HeaderType),
|
||||
k: item.hs[i].(string),
|
||||
v: item.hs[i+1].(string),
|
||||
})
|
||||
}
|
||||
|
||||
filters := make([]acl.HeaderFilter, 0)
|
||||
|
||||
for i := 0; i < len(item.fs); i += 4 {
|
||||
filters = append(filters, &testHeaderFilter{
|
||||
TypedHeader: &testTypedHeader{
|
||||
t: item.fs[i+2].(acl.HeaderType),
|
||||
k: item.fs[i].(string),
|
||||
v: item.fs[i+1].(string),
|
||||
},
|
||||
t: item.fs[i+3].(acl.MatchType),
|
||||
})
|
||||
}
|
||||
|
||||
require.Equal(t,
|
||||
item.exp,
|
||||
MatchFilters(
|
||||
&testHeaderSrc{
|
||||
hs: headers,
|
||||
},
|
||||
filters,
|
||||
),
|
||||
)
|
||||
}
|
||||
}
|
31
lib/blockchain/event/event.go
Normal file
31
lib/blockchain/event/event.go
Normal file
|
@ -0,0 +1,31 @@
|
|||
package event
|
||||
|
||||
// Type is a notification event enumeration type.
|
||||
type Type string
|
||||
|
||||
// Event is an interface that is
|
||||
// provided by Neo:Morph event structures.
|
||||
type Event interface {
|
||||
MorphEvent()
|
||||
}
|
||||
|
||||
// Equal compares two Type values and
|
||||
// returns true if they are equal.
|
||||
func (t Type) Equal(t2 Type) bool {
|
||||
return string(t) == string(t2)
|
||||
}
|
||||
|
||||
// String returns casted to string Type.
|
||||
func (t Type) String() string {
|
||||
return string(t)
|
||||
}
|
||||
|
||||
// TypeFromBytes converts bytes slice to Type.
|
||||
func TypeFromBytes(data []byte) Type {
|
||||
return Type(data)
|
||||
}
|
||||
|
||||
// TypeFromString converts string to Type.
|
||||
func TypeFromString(str string) Type {
|
||||
return Type(str)
|
||||
}
|
22
lib/blockchain/event/handler.go
Normal file
22
lib/blockchain/event/handler.go
Normal file
|
@ -0,0 +1,22 @@
|
|||
package event
|
||||
|
||||
// Handler is an Event processing function.
|
||||
type Handler func(Event)
|
||||
|
||||
// HandlerInfo is a structure that groups
|
||||
// the parameters of the handler of particular
|
||||
// contract event.
|
||||
type HandlerInfo struct {
|
||||
scriptHashWithType
|
||||
|
||||
h Handler
|
||||
}
|
||||
|
||||
// SetHandler is an event handler setter.
|
||||
func (s *HandlerInfo) SetHandler(v Handler) {
|
||||
s.h = v
|
||||
}
|
||||
|
||||
func (s HandlerInfo) handler() Handler {
|
||||
return s.h
|
||||
}
|
309
lib/blockchain/event/listener.go
Normal file
309
lib/blockchain/event/listener.go
Normal file
|
@ -0,0 +1,309 @@
|
|||
package event
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpc/response/result"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/nspcc-dev/neofs-node/internal"
|
||||
"github.com/nspcc-dev/neofs-node/lib/blockchain/goclient"
|
||||
"github.com/nspcc-dev/neofs-node/lib/blockchain/subscriber"
|
||||
"github.com/pkg/errors"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// Listener is an interface of smart contract notification event listener.
|
||||
type Listener interface {
|
||||
// Must start the event listener.
|
||||
//
|
||||
// Must listen to events with the parser installed.
|
||||
//
|
||||
// Must return an error if event listening could not be started.
|
||||
Listen(context.Context)
|
||||
|
||||
// Must set the parser of particular contract event.
|
||||
//
|
||||
// Parser of each event must be set once. All parsers must be set before Listen call.
|
||||
//
|
||||
// Must ignore nil parsers and all calls after listener has been started.
|
||||
SetParser(ParserInfo)
|
||||
|
||||
// Must register the event handler for particular notification event of contract.
|
||||
//
|
||||
// The specified handler must be called after each capture and parsing of the event
|
||||
//
|
||||
// Must ignore nil handlers.
|
||||
RegisterHandler(HandlerInfo)
|
||||
}
|
||||
|
||||
// ListenerParams is a group of parameters
|
||||
// for Listener constructor.
|
||||
type ListenerParams struct {
|
||||
Logger *zap.Logger
|
||||
|
||||
Subscriber subscriber.Subscriber
|
||||
}
|
||||
|
||||
type listener struct {
|
||||
mtx *sync.RWMutex
|
||||
|
||||
once *sync.Once
|
||||
|
||||
started bool
|
||||
|
||||
parsers map[scriptHashWithType]Parser
|
||||
|
||||
handlers map[scriptHashWithType][]Handler
|
||||
|
||||
log *zap.Logger
|
||||
|
||||
subscriber subscriber.Subscriber
|
||||
}
|
||||
|
||||
const (
|
||||
newListenerFailMsg = "could not instantiate Listener"
|
||||
|
||||
errNilLogger = internal.Error("nil logger")
|
||||
|
||||
errNilSubscriber = internal.Error("nil event subscriber")
|
||||
)
|
||||
|
||||
// Listen starts the listening for events with registered handlers.
|
||||
//
|
||||
// Executes once, all subsequent calls do nothing.
|
||||
//
|
||||
// Returns an error if listener was already started.
|
||||
func (s listener) Listen(ctx context.Context) {
|
||||
s.once.Do(func() {
|
||||
if err := s.listen(ctx); err != nil {
|
||||
s.log.Error("could not start listen to events",
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (s listener) listen(ctx context.Context) error {
|
||||
// create the list of listening contract hashes
|
||||
hashes := make([]util.Uint160, 0)
|
||||
|
||||
// fill the list with the contracts with set event parsers.
|
||||
s.mtx.RLock()
|
||||
for hashType := range s.parsers {
|
||||
scHash := hashType.scriptHash()
|
||||
|
||||
// prevent repetitions
|
||||
for _, hash := range hashes {
|
||||
if hash.Equals(scHash) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
hashes = append(hashes, hashType.scriptHash())
|
||||
}
|
||||
|
||||
// mark listener as started
|
||||
s.started = true
|
||||
|
||||
s.mtx.RUnlock()
|
||||
|
||||
chEvent, err := s.subscriber.SubscribeForNotification(hashes...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.listenLoop(ctx, chEvent)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s listener) listenLoop(ctx context.Context, chEvent <-chan *result.NotificationEvent) {
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
s.log.Warn("stop event listener by context",
|
||||
zap.String("error", ctx.Err().Error()),
|
||||
)
|
||||
break loop
|
||||
case notifyEvent, ok := <-chEvent:
|
||||
if !ok {
|
||||
s.log.Warn("stop event listener by channel")
|
||||
break loop
|
||||
} else if notifyEvent == nil {
|
||||
s.log.Warn("nil notification event was caught")
|
||||
continue loop
|
||||
}
|
||||
|
||||
s.parseAndHandle(notifyEvent)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s listener) parseAndHandle(notifyEvent *result.NotificationEvent) {
|
||||
log := s.log.With(
|
||||
zap.String("script hash LE", notifyEvent.Contract.StringLE()),
|
||||
)
|
||||
|
||||
// stack item must be an array of items
|
||||
arr, err := goclient.ArrayFromStackParameter(notifyEvent.Item)
|
||||
if err != nil {
|
||||
log.Warn("stack item is not an array type",
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
||||
return
|
||||
} else if len(arr) == 0 {
|
||||
log.Warn("stack item array is empty")
|
||||
return
|
||||
}
|
||||
|
||||
// first item must be a byte array
|
||||
typBytes, err := goclient.BytesFromStackParameter(arr[0])
|
||||
if err != nil {
|
||||
log.Warn("first array item is not a byte array",
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// calculate event type from bytes
|
||||
typEvent := TypeFromBytes(typBytes)
|
||||
|
||||
log = log.With(
|
||||
zap.Stringer("event type", typEvent),
|
||||
)
|
||||
|
||||
// get the event parser
|
||||
keyEvent := scriptHashWithType{}
|
||||
keyEvent.SetScriptHash(notifyEvent.Contract)
|
||||
keyEvent.SetType(typEvent)
|
||||
|
||||
s.mtx.RLock()
|
||||
parser, ok := s.parsers[keyEvent]
|
||||
s.mtx.RUnlock()
|
||||
|
||||
if !ok {
|
||||
log.Warn("event parser not set")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// parse the notification event
|
||||
event, err := parser(arr[1:])
|
||||
if err != nil {
|
||||
log.Warn("could not parse notification event",
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// handler the event
|
||||
s.mtx.RLock()
|
||||
handlers := s.handlers[keyEvent]
|
||||
s.mtx.RUnlock()
|
||||
|
||||
if len(handlers) == 0 {
|
||||
log.Info("handlers for parsed notification event were not registered",
|
||||
zap.Any("event", event),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
for _, handler := range handlers {
|
||||
handler(event)
|
||||
}
|
||||
}
|
||||
|
||||
// SetParser sets the parser of particular contract event.
|
||||
//
|
||||
// Ignores nil and already set parsers.
|
||||
// Ignores the parser if listener is started.
|
||||
func (s listener) SetParser(p ParserInfo) {
|
||||
log := s.log.With(
|
||||
zap.String("script hash LE", p.scriptHash().StringLE()),
|
||||
zap.Stringer("event type", p.getType()),
|
||||
)
|
||||
|
||||
parser := p.parser()
|
||||
if parser == nil {
|
||||
log.Info("ignore nil event parser")
|
||||
return
|
||||
}
|
||||
|
||||
s.mtx.Lock()
|
||||
defer s.mtx.Unlock()
|
||||
|
||||
// check if the listener was started
|
||||
if s.started {
|
||||
log.Warn("listener has been already started, ignore parser")
|
||||
return
|
||||
}
|
||||
|
||||
// add event parser
|
||||
if _, ok := s.parsers[p.scriptHashWithType]; !ok {
|
||||
s.parsers[p.scriptHashWithType] = p.parser()
|
||||
}
|
||||
|
||||
log.Info("registered new event parser")
|
||||
}
|
||||
|
||||
// RegisterHandler registers the handler for particular notification event of contract.
|
||||
//
|
||||
// Ignores nil handlers.
|
||||
// Ignores handlers of event without parser.
|
||||
func (s listener) RegisterHandler(p HandlerInfo) {
|
||||
log := s.log.With(
|
||||
zap.String("script hash LE", p.scriptHash().StringLE()),
|
||||
zap.Stringer("event type", p.getType()),
|
||||
)
|
||||
|
||||
handler := p.handler()
|
||||
if handler == nil {
|
||||
log.Warn("ignore nil event handler")
|
||||
return
|
||||
}
|
||||
|
||||
// check if parser was set
|
||||
s.mtx.RLock()
|
||||
_, ok := s.parsers[p.scriptHashWithType]
|
||||
s.mtx.RUnlock()
|
||||
|
||||
if !ok {
|
||||
log.Warn("ignore handler of event w/o parser")
|
||||
return
|
||||
}
|
||||
|
||||
// add event handler
|
||||
s.mtx.Lock()
|
||||
s.handlers[p.scriptHashWithType] = append(
|
||||
s.handlers[p.scriptHashWithType],
|
||||
p.handler(),
|
||||
)
|
||||
s.mtx.Unlock()
|
||||
|
||||
log.Info("registered new event handler")
|
||||
}
|
||||
|
||||
// NewListener create the notification event listener instance and returns Listener interface.
|
||||
func NewListener(p ListenerParams) (Listener, error) {
|
||||
switch {
|
||||
case p.Logger == nil:
|
||||
return nil, errors.Wrap(errNilLogger, newListenerFailMsg)
|
||||
case p.Subscriber == nil:
|
||||
return nil, errors.Wrap(errNilSubscriber, newListenerFailMsg)
|
||||
}
|
||||
|
||||
return &listener{
|
||||
mtx: new(sync.RWMutex),
|
||||
once: new(sync.Once),
|
||||
parsers: make(map[scriptHashWithType]Parser),
|
||||
handlers: make(map[scriptHashWithType][]Handler),
|
||||
log: p.Logger,
|
||||
subscriber: p.Subscriber,
|
||||
}, nil
|
||||
}
|
39
lib/blockchain/event/netmap/epoch.go
Normal file
39
lib/blockchain/event/netmap/epoch.go
Normal file
|
@ -0,0 +1,39 @@
|
|||
package netmap
|
||||
|
||||
import (
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
||||
"github.com/nspcc-dev/neofs-node/lib/blockchain/event"
|
||||
"github.com/nspcc-dev/neofs-node/lib/blockchain/goclient"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// NewEpoch is a new epoch Neo:Morph event.
|
||||
type NewEpoch struct {
|
||||
num uint64
|
||||
}
|
||||
|
||||
// MorphEvent implements Neo:Morph Event interface.
|
||||
func (NewEpoch) MorphEvent() {}
|
||||
|
||||
// EpochNumber returns new epoch number.
|
||||
func (s NewEpoch) EpochNumber() uint64 {
|
||||
return s.num
|
||||
}
|
||||
|
||||
// ParseNewEpoch is a parser of new epoch notification event.
|
||||
//
|
||||
// Result is type of NewEpoch.
|
||||
func ParseNewEpoch(prms []smartcontract.Parameter) (event.Event, error) {
|
||||
if ln := len(prms); ln != 1 {
|
||||
return nil, event.WrongNumberOfParameters(1, ln)
|
||||
}
|
||||
|
||||
prmEpochNum, err := goclient.IntFromStackParameter(prms[0])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get integer epoch number")
|
||||
}
|
||||
|
||||
return NewEpoch{
|
||||
num: uint64(prmEpochNum),
|
||||
}, nil
|
||||
}
|
47
lib/blockchain/event/netmap/epoch_test.go
Normal file
47
lib/blockchain/event/netmap/epoch_test.go
Normal file
|
@ -0,0 +1,47 @@
|
|||
package netmap
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
||||
"github.com/nspcc-dev/neofs-node/lib/blockchain/event"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestParseNewEpoch(t *testing.T) {
|
||||
t.Run("wrong number of parameters", func(t *testing.T) {
|
||||
prms := []smartcontract.Parameter{
|
||||
{},
|
||||
{},
|
||||
}
|
||||
|
||||
_, err := ParseNewEpoch(prms)
|
||||
require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error())
|
||||
})
|
||||
|
||||
t.Run("wrong first parameter type", func(t *testing.T) {
|
||||
_, err := ParseNewEpoch([]smartcontract.Parameter{
|
||||
{
|
||||
Type: smartcontract.ByteArrayType,
|
||||
},
|
||||
})
|
||||
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("correct behavior", func(t *testing.T) {
|
||||
epochNum := uint64(100)
|
||||
|
||||
ev, err := ParseNewEpoch([]smartcontract.Parameter{
|
||||
{
|
||||
Type: smartcontract.IntegerType,
|
||||
Value: int64(epochNum),
|
||||
},
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, NewEpoch{
|
||||
num: epochNum,
|
||||
}, ev)
|
||||
})
|
||||
}
|
53
lib/blockchain/event/parser.go
Normal file
53
lib/blockchain/event/parser.go
Normal file
|
@ -0,0 +1,53 @@
|
|||
package event
|
||||
|
||||
import (
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Parser is a function that constructs Event
|
||||
// from the StackItem list.
|
||||
type Parser func([]smartcontract.Parameter) (Event, error)
|
||||
|
||||
// ParserInfo is a structure that groups
|
||||
// the parameters of particular contract
|
||||
// notification event parser.
|
||||
type ParserInfo struct {
|
||||
scriptHashWithType
|
||||
|
||||
p Parser
|
||||
}
|
||||
|
||||
type wrongPrmNumber struct {
|
||||
exp, act int
|
||||
}
|
||||
|
||||
// WrongNumberOfParameters returns an error about wrong number of smart contract parameters.
|
||||
func WrongNumberOfParameters(exp, act int) error {
|
||||
return &wrongPrmNumber{
|
||||
exp: exp,
|
||||
act: act,
|
||||
}
|
||||
}
|
||||
|
||||
func (s wrongPrmNumber) Error() string {
|
||||
return errors.Errorf("wrong parameter count: expected %d, has %d", s.exp, s.act).Error()
|
||||
}
|
||||
|
||||
// SetParser is an event parser setter.
|
||||
func (s *ParserInfo) SetParser(v Parser) {
|
||||
s.p = v
|
||||
}
|
||||
|
||||
func (s ParserInfo) parser() Parser {
|
||||
return s.p
|
||||
}
|
||||
|
||||
// SetType is an event type setter.
|
||||
func (s *ParserInfo) SetType(v Type) {
|
||||
s.typ = v
|
||||
}
|
||||
|
||||
func (s ParserInfo) getType() Type {
|
||||
return s.typ
|
||||
}
|
34
lib/blockchain/event/utils.go
Normal file
34
lib/blockchain/event/utils.go
Normal file
|
@ -0,0 +1,34 @@
|
|||
package event
|
||||
|
||||
import "github.com/nspcc-dev/neo-go/pkg/util"
|
||||
|
||||
type scriptHashValue struct {
|
||||
hash util.Uint160
|
||||
}
|
||||
|
||||
type typeValue struct {
|
||||
typ Type
|
||||
}
|
||||
|
||||
type scriptHashWithType struct {
|
||||
scriptHashValue
|
||||
typeValue
|
||||
}
|
||||
|
||||
// SetScriptHash is a script hash setter.
|
||||
func (s *scriptHashValue) SetScriptHash(v util.Uint160) {
|
||||
s.hash = v
|
||||
}
|
||||
|
||||
func (s scriptHashValue) scriptHash() util.Uint160 {
|
||||
return s.hash
|
||||
}
|
||||
|
||||
// SetType is an event type setter.
|
||||
func (s *typeValue) SetType(v Type) {
|
||||
s.typ = v
|
||||
}
|
||||
|
||||
func (s typeValue) getType() Type {
|
||||
return s.typ
|
||||
}
|
190
lib/blockchain/goclient/client.go
Normal file
190
lib/blockchain/goclient/client.go
Normal file
|
@ -0,0 +1,190 @@
|
|||
package goclient
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"encoding/hex"
|
||||
"time"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/config/netmode"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpc/client"
|
||||
sc "github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||
crypto "github.com/nspcc-dev/neofs-crypto"
|
||||
"github.com/nspcc-dev/neofs-node/internal"
|
||||
"github.com/pkg/errors"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type (
|
||||
// Params is a group of Client's constructor parameters.
|
||||
Params struct {
|
||||
Log *zap.Logger
|
||||
Key *ecdsa.PrivateKey
|
||||
Endpoint string
|
||||
Magic netmode.Magic
|
||||
DialTimeout time.Duration
|
||||
}
|
||||
|
||||
// Client is a neo-go wrapper that provides smart-contract invocation interface.
|
||||
Client struct {
|
||||
log *zap.Logger
|
||||
cli *client.Client
|
||||
acc *wallet.Account
|
||||
}
|
||||
)
|
||||
|
||||
// ErrNilClient is returned by functions that expect
|
||||
// a non-nil Client, but received nil.
|
||||
const ErrNilClient = internal.Error("go client is nil")
|
||||
|
||||
// HaltState returned if TestInvoke function processed without panic.
|
||||
const HaltState = "HALT"
|
||||
|
||||
// ErrMissingFee is returned by functions that expect
|
||||
// a positive invocation fee, but received non-positive.
|
||||
const ErrMissingFee = internal.Error("invocation fee must be positive")
|
||||
|
||||
var (
|
||||
errNilParams = errors.New("chain/client: config was not provided to the constructor")
|
||||
|
||||
errNilLogger = errors.New("chain/client: logger was not provided to the constructor")
|
||||
|
||||
errNilKey = errors.New("chain/client: private key was not provided to the constructor")
|
||||
)
|
||||
|
||||
// Invoke invokes contract method by sending transaction into blockchain.
|
||||
// Supported args types: int64, string, util.Uint160, []byte and bool.
|
||||
//
|
||||
// If passed fee is non-positive, ErrMissingFee returns.
|
||||
func (c *Client) Invoke(contract util.Uint160, fee util.Fixed8, method string, args ...interface{}) error {
|
||||
var params []sc.Parameter
|
||||
for i := range args {
|
||||
param, err := toStackParameter(args[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
params = append(params, param)
|
||||
}
|
||||
|
||||
cosigner := []transaction.Cosigner{
|
||||
{
|
||||
Account: c.acc.PrivateKey().PublicKey().GetScriptHash(),
|
||||
Scopes: transaction.Global,
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := c.cli.InvokeFunction(contract, method, params, cosigner)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(resp.Script) == 0 {
|
||||
return errors.New("chain/client: got empty invocation script from neo node")
|
||||
}
|
||||
|
||||
script, err := hex.DecodeString(resp.Script)
|
||||
if err != nil {
|
||||
return errors.New("chain/client: can't decode invocation script from neo node")
|
||||
}
|
||||
|
||||
txHash, err := c.cli.SignAndPushInvocationTx(script, c.acc, 0, fee, cosigner)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.log.Debug("neo client invoke",
|
||||
zap.String("method", method),
|
||||
zap.Stringer("tx_hash", txHash))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TestInvoke invokes contract method locally in neo-go node. This method should
|
||||
// be used to read data from smart-contract.
|
||||
func (c *Client) TestInvoke(contract util.Uint160, method string, args ...interface{}) ([]sc.Parameter, error) {
|
||||
var params = make([]sc.Parameter, 0, len(args))
|
||||
|
||||
for i := range args {
|
||||
p, err := toStackParameter(args[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
params = append(params, p)
|
||||
}
|
||||
|
||||
cosigner := []transaction.Cosigner{
|
||||
{
|
||||
Account: c.acc.PrivateKey().PublicKey().GetScriptHash(),
|
||||
Scopes: transaction.Global,
|
||||
},
|
||||
}
|
||||
|
||||
val, err := c.cli.InvokeFunction(contract, method, params, cosigner)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if val.State != HaltState {
|
||||
return nil, errors.Errorf("chain/client: contract execution finished with state %s", val.State)
|
||||
}
|
||||
|
||||
return val.Stack, nil
|
||||
}
|
||||
|
||||
// New is a Client constructor.
|
||||
func New(ctx context.Context, p *Params) (*Client, error) {
|
||||
switch {
|
||||
case p == nil:
|
||||
return nil, errNilParams
|
||||
case p.Log == nil:
|
||||
return nil, errNilLogger
|
||||
case p.Key == nil:
|
||||
return nil, errNilKey
|
||||
}
|
||||
|
||||
privKeyBytes := crypto.MarshalPrivateKey(p.Key)
|
||||
|
||||
wif, err := keys.WIFEncode(privKeyBytes, keys.WIFVersion, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
account, err := wallet.NewAccountFromWIF(wif)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cli, err := client.New(ctx, p.Endpoint, client.Options{
|
||||
DialTimeout: p.DialTimeout,
|
||||
Network: p.Magic,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Client{log: p.Log, cli: cli, acc: account}, nil
|
||||
}
|
||||
|
||||
func toStackParameter(value interface{}) (sc.Parameter, error) {
|
||||
var result = sc.Parameter{
|
||||
Value: value,
|
||||
}
|
||||
|
||||
// todo: add more types
|
||||
switch value.(type) {
|
||||
case []byte:
|
||||
result.Type = sc.ByteArrayType
|
||||
case int64: // TODO: add other numerical types
|
||||
result.Type = sc.IntegerType
|
||||
default:
|
||||
return result, errors.Errorf("chain/client: unsupported parameter %v", value)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
33
lib/blockchain/goclient/client_test.go
Normal file
33
lib/blockchain/goclient/client_test.go
Normal file
|
@ -0,0 +1,33 @@
|
|||
package goclient
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
sc "github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestToStackParameter(t *testing.T) {
|
||||
items := []struct {
|
||||
value interface{}
|
||||
expType sc.ParamType
|
||||
}{
|
||||
{
|
||||
value: []byte{1, 2, 3},
|
||||
expType: sc.ByteArrayType,
|
||||
},
|
||||
{
|
||||
value: int64(100),
|
||||
expType: sc.IntegerType,
|
||||
},
|
||||
}
|
||||
|
||||
for _, item := range items {
|
||||
t.Run(item.expType.String()+" to stack parameter", func(t *testing.T) {
|
||||
res, err := toStackParameter(item.value)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, item.expType, res.Type)
|
||||
require.Equal(t, item.value, res.Value)
|
||||
})
|
||||
}
|
||||
}
|
131
lib/blockchain/goclient/util.go
Normal file
131
lib/blockchain/goclient/util.go
Normal file
|
@ -0,0 +1,131 @@
|
|||
package goclient
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
sc "github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
/*
|
||||
Use these function to parse stack parameters obtained from `TestInvoke`
|
||||
function to native go types. You should know upfront return types of invoked
|
||||
method.
|
||||
*/
|
||||
|
||||
// BoolFromStackParameter receives boolean value from the value of a smart contract parameter.
|
||||
func BoolFromStackParameter(param sc.Parameter) (bool, error) {
|
||||
switch param.Type {
|
||||
case sc.BoolType:
|
||||
val, ok := param.Value.(bool)
|
||||
if !ok {
|
||||
return false, errors.Errorf("chain/client: can't convert %T to boolean", param.Value)
|
||||
}
|
||||
|
||||
return val, nil
|
||||
case sc.IntegerType:
|
||||
val, ok := param.Value.(int64)
|
||||
if !ok {
|
||||
return false, errors.Errorf("chain/client: can't convert %T to boolean", param.Value)
|
||||
}
|
||||
|
||||
return val > 0, nil
|
||||
case sc.ByteArrayType:
|
||||
val, ok := param.Value.([]byte)
|
||||
if !ok {
|
||||
return false, errors.Errorf("chain/client: can't convert %T to boolean", param.Value)
|
||||
}
|
||||
|
||||
return len(val) != 0, nil
|
||||
default:
|
||||
return false, errors.Errorf("chain/client: %s is not a bool type", param.Type)
|
||||
}
|
||||
}
|
||||
|
||||
// IntFromStackParameter receives numerical value from the value of a smart contract parameter.
|
||||
func IntFromStackParameter(param sc.Parameter) (int64, error) {
|
||||
switch param.Type {
|
||||
case sc.IntegerType:
|
||||
val, ok := param.Value.(int64)
|
||||
if !ok {
|
||||
return 0, errors.Errorf("chain/client: can't convert %T to integer", param.Value)
|
||||
}
|
||||
|
||||
return val, nil
|
||||
case sc.ByteArrayType:
|
||||
val, ok := param.Value.([]byte)
|
||||
if !ok || len(val) > 8 {
|
||||
return 0, errors.Errorf("chain/client: can't convert %T to integer", param.Value)
|
||||
}
|
||||
|
||||
res := make([]byte, 8)
|
||||
copy(res[:len(val)], val)
|
||||
|
||||
return int64(binary.LittleEndian.Uint64(res)), nil
|
||||
default:
|
||||
return 0, errors.Errorf("chain/client: %s is not an integer type", param.Type)
|
||||
}
|
||||
}
|
||||
|
||||
// BytesFromStackParameter receives binary value from the value of a smart contract parameter.
|
||||
func BytesFromStackParameter(param sc.Parameter) ([]byte, error) {
|
||||
if param.Type != sc.ByteArrayType {
|
||||
return nil, errors.Errorf("chain/client: %s is not a byte array type", param.Type)
|
||||
}
|
||||
|
||||
val, ok := param.Value.([]byte)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("chain/client: can't convert %T to byte slice", param.Value)
|
||||
}
|
||||
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// ArrayFromStackParameter returns the slice contract parameters from passed parameter.
|
||||
//
|
||||
// If passed parameter carries boolean false value, (nil, nil) returns.
|
||||
func ArrayFromStackParameter(param sc.Parameter) ([]sc.Parameter, error) {
|
||||
if param.Type == sc.BoolType && !param.Value.(bool) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if param.Type != sc.ArrayType {
|
||||
return nil, errors.Errorf("chain/client: %s is not an array type", param.Type)
|
||||
}
|
||||
|
||||
val, ok := param.Value.([]sc.Parameter)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("chain/client: can't convert %T to parameter slice", param.Value)
|
||||
}
|
||||
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// StringFromStackParameter receives string value from the value of a smart contract parameter.
|
||||
func StringFromStackParameter(param sc.Parameter) (string, error) {
|
||||
switch param.Type {
|
||||
case sc.StringType:
|
||||
val, ok := param.Value.(string)
|
||||
if !ok {
|
||||
return "", errors.Errorf("chain/client: can't convert %T to string", param.Value)
|
||||
}
|
||||
|
||||
return val, nil
|
||||
case sc.ByteArrayType:
|
||||
val, ok := param.Value.([]byte)
|
||||
if !ok {
|
||||
return "", errors.Errorf("chain/client: can't convert %T to string", param.Value)
|
||||
}
|
||||
|
||||
return string(val), nil
|
||||
default:
|
||||
return "", errors.Errorf("chain/client: %s is not a string type", param.Type)
|
||||
}
|
||||
}
|
||||
|
||||
// ReadStorage of the contract directly. Use it for debug, try to obtain
|
||||
// smart-contract data from contract method with TestInvoke function.
|
||||
func ReadStorage(c *Client, contract util.Uint160, key []byte) ([]byte, error) {
|
||||
return c.cli.GetStorageByHash(contract, key)
|
||||
}
|
145
lib/blockchain/goclient/util_test.go
Normal file
145
lib/blockchain/goclient/util_test.go
Normal file
|
@ -0,0 +1,145 @@
|
|||
package goclient
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
sc "github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
stringParam = sc.Parameter{
|
||||
Type: sc.StringType,
|
||||
Value: "Hello World",
|
||||
}
|
||||
|
||||
intParam = sc.Parameter{
|
||||
Type: sc.IntegerType,
|
||||
Value: int64(1),
|
||||
}
|
||||
|
||||
byteWithIntParam = sc.Parameter{
|
||||
Type: sc.ByteArrayType,
|
||||
Value: []byte{0x0a},
|
||||
}
|
||||
|
||||
byteArrayParam = sc.Parameter{
|
||||
Type: sc.ByteArrayType,
|
||||
Value: []byte("Hello World"),
|
||||
}
|
||||
|
||||
emptyByteArrayParam = sc.Parameter{
|
||||
Type: sc.ByteArrayType,
|
||||
Value: []byte{},
|
||||
}
|
||||
|
||||
trueBoolParam = sc.Parameter{
|
||||
Type: sc.BoolType,
|
||||
Value: true,
|
||||
}
|
||||
|
||||
falseBoolParam = sc.Parameter{
|
||||
Type: sc.BoolType,
|
||||
Value: false,
|
||||
}
|
||||
|
||||
arrayParam = sc.Parameter{
|
||||
Type: sc.ArrayType,
|
||||
Value: []sc.Parameter{intParam, byteArrayParam},
|
||||
}
|
||||
)
|
||||
|
||||
func TestBoolFromStackParameter(t *testing.T) {
|
||||
t.Run("true assert", func(t *testing.T) {
|
||||
val, err := BoolFromStackParameter(trueBoolParam)
|
||||
require.NoError(t, err)
|
||||
require.True(t, val)
|
||||
|
||||
val, err = BoolFromStackParameter(intParam)
|
||||
require.NoError(t, err)
|
||||
require.True(t, val)
|
||||
})
|
||||
|
||||
t.Run("false assert", func(t *testing.T) {
|
||||
val, err := BoolFromStackParameter(falseBoolParam)
|
||||
require.NoError(t, err)
|
||||
require.False(t, val)
|
||||
|
||||
val, err = BoolFromStackParameter(emptyByteArrayParam)
|
||||
require.NoError(t, err)
|
||||
require.False(t, val)
|
||||
})
|
||||
|
||||
t.Run("incorrect assert", func(t *testing.T) {
|
||||
_, err := BoolFromStackParameter(stringParam)
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestArrayFromStackParameter(t *testing.T) {
|
||||
t.Run("correct assert", func(t *testing.T) {
|
||||
val, err := ArrayFromStackParameter(arrayParam)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, val, len(arrayParam.Value.([]sc.Parameter)))
|
||||
})
|
||||
t.Run("incorrect assert", func(t *testing.T) {
|
||||
_, err := ArrayFromStackParameter(byteArrayParam)
|
||||
require.Error(t, err)
|
||||
})
|
||||
t.Run("boolean false case", func(t *testing.T) {
|
||||
val, err := ArrayFromStackParameter(falseBoolParam)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, val)
|
||||
})
|
||||
}
|
||||
|
||||
func TestBytesFromStackParameter(t *testing.T) {
|
||||
t.Run("correct assert", func(t *testing.T) {
|
||||
val, err := BytesFromStackParameter(byteArrayParam)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, byteArrayParam.Value.([]byte), val)
|
||||
})
|
||||
|
||||
t.Run("incorrect assert", func(t *testing.T) {
|
||||
_, err := BytesFromStackParameter(stringParam)
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestIntFromStackParameter(t *testing.T) {
|
||||
t.Run("correct assert", func(t *testing.T) {
|
||||
val, err := IntFromStackParameter(intParam)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, intParam.Value.(int64), val)
|
||||
|
||||
val, err = IntFromStackParameter(byteWithIntParam)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(0x0a), val)
|
||||
|
||||
val, err = IntFromStackParameter(emptyByteArrayParam)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(0), val)
|
||||
})
|
||||
|
||||
t.Run("incorrect assert", func(t *testing.T) {
|
||||
_, err := IntFromStackParameter(byteArrayParam)
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestStringFromStackParameter(t *testing.T) {
|
||||
t.Run("correct assert", func(t *testing.T) {
|
||||
val, err := StringFromStackParameter(stringParam)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, stringParam.Value.(string), val)
|
||||
|
||||
val, err = StringFromStackParameter(byteArrayParam)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(byteArrayParam.Value.([]byte)), val)
|
||||
})
|
||||
|
||||
t.Run("incorrect assert", func(t *testing.T) {
|
||||
_, err := StringFromStackParameter(intParam)
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
151
lib/blockchain/subscriber/subscriber.go
Normal file
151
lib/blockchain/subscriber/subscriber.go
Normal file
|
@ -0,0 +1,151 @@
|
|||
package subscriber
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpc/client"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpc/response"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpc/response/result"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type (
|
||||
// Subscriber is an interface of the NotificationEvent listener.
|
||||
Subscriber interface {
|
||||
SubscribeForNotification(...util.Uint160) (<-chan *result.NotificationEvent, error)
|
||||
UnsubscribeForNotification()
|
||||
}
|
||||
|
||||
subscriber struct {
|
||||
*sync.RWMutex
|
||||
log *zap.Logger
|
||||
client *client.WSClient
|
||||
|
||||
notify chan *result.NotificationEvent
|
||||
notifyIDs map[util.Uint160]string
|
||||
}
|
||||
|
||||
// Params is a group of Subscriber constructor parameters.
|
||||
Params struct {
|
||||
Log *zap.Logger
|
||||
Endpoint string
|
||||
DialTimeout time.Duration
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
errNilParams = errors.New("chain/subscriber: config was not provided to the constructor")
|
||||
|
||||
errNilLogger = errors.New("chain/subscriber: logger was not provided to the constructor")
|
||||
)
|
||||
|
||||
func (s *subscriber) SubscribeForNotification(contracts ...util.Uint160) (<-chan *result.NotificationEvent, error) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
notifyIDs := make(map[util.Uint160]string, len(contracts))
|
||||
|
||||
for i := range contracts {
|
||||
// do not subscribe to already subscribed contracts
|
||||
if _, ok := s.notifyIDs[contracts[i]]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// subscribe to contract notifications
|
||||
id, err := s.client.SubscribeForExecutionNotifications(&contracts[i])
|
||||
if err != nil {
|
||||
// if there is some error, undo all subscriptions and return error
|
||||
for _, id := range notifyIDs {
|
||||
_ = s.client.Unsubscribe(id)
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// save notification id
|
||||
notifyIDs[contracts[i]] = id
|
||||
}
|
||||
|
||||
// update global map of subscribed contracts
|
||||
for contract, id := range notifyIDs {
|
||||
s.notifyIDs[contract] = id
|
||||
}
|
||||
|
||||
return s.notify, nil
|
||||
}
|
||||
|
||||
func (s *subscriber) UnsubscribeForNotification() {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
for i := range s.notifyIDs {
|
||||
err := s.client.Unsubscribe(s.notifyIDs[i])
|
||||
if err != nil {
|
||||
s.log.Error("unsubscribe for notification",
|
||||
zap.String("event", s.notifyIDs[i]),
|
||||
zap.Error(err))
|
||||
}
|
||||
|
||||
delete(s.notifyIDs, i)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *subscriber) routeNotifications(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case notification := <-s.client.Notifications:
|
||||
switch notification.Type {
|
||||
case response.NotificationEventID:
|
||||
notification, ok := notification.Value.(*result.NotificationEvent)
|
||||
if !ok {
|
||||
s.log.Error("can't cast notify event to the notify struct")
|
||||
continue
|
||||
}
|
||||
|
||||
s.notify <- notification
|
||||
default:
|
||||
s.log.Debug("unsupported notification from the chain",
|
||||
zap.Uint8("type", uint8(notification.Type)),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// New is a constructs Neo:Morph event listener and returns Subscriber interface.
|
||||
func New(ctx context.Context, p *Params) (Subscriber, error) {
|
||||
switch {
|
||||
case p == nil:
|
||||
return nil, errNilParams
|
||||
case p.Log == nil:
|
||||
return nil, errNilLogger
|
||||
}
|
||||
|
||||
wsClient, err := client.NewWS(ctx, p.Endpoint, client.Options{
|
||||
DialTimeout: p.DialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sub := &subscriber{
|
||||
RWMutex: new(sync.RWMutex),
|
||||
log: p.Log,
|
||||
client: wsClient,
|
||||
notify: make(chan *result.NotificationEvent),
|
||||
notifyIDs: make(map[util.Uint160]string),
|
||||
}
|
||||
|
||||
// Worker listens all events from neo-go websocket and puts them
|
||||
// into corresponding channel. It may be notifications, transactions,
|
||||
// new blocks. For now only notifications.
|
||||
go sub.routeNotifications(ctx)
|
||||
|
||||
return sub, nil
|
||||
}
|
24
lib/boot/bootstrap_test.go
Normal file
24
lib/boot/bootstrap_test.go
Normal file
|
@ -0,0 +1,24 @@
|
|||
package boot
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/nspcc-dev/neofs-api-go/bootstrap"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBootstrapPeerParams(t *testing.T) {
|
||||
s := BootstrapPeerParams{}
|
||||
|
||||
nodeInfo := &bootstrap.NodeInfo{
|
||||
Address: "address",
|
||||
PubKey: []byte{1, 2, 3},
|
||||
Options: []string{
|
||||
"opt1",
|
||||
"opt2",
|
||||
},
|
||||
}
|
||||
s.SetNodeInfo(nodeInfo)
|
||||
|
||||
require.Equal(t, nodeInfo, s.NodeInfo())
|
||||
}
|
31
lib/boot/bootstrapper.go
Normal file
31
lib/boot/bootstrapper.go
Normal file
|
@ -0,0 +1,31 @@
|
|||
package boot
|
||||
|
||||
import (
|
||||
"github.com/nspcc-dev/neofs-api-go/bootstrap"
|
||||
"github.com/nspcc-dev/neofs-node/internal"
|
||||
)
|
||||
|
||||
// BootstrapPeerParams is a group of parameters
|
||||
// for storage node bootstrap.
|
||||
type BootstrapPeerParams struct {
|
||||
info *bootstrap.NodeInfo
|
||||
}
|
||||
|
||||
// PeerBootstrapper is an interface of the NeoFS node bootstrap tool.
|
||||
type PeerBootstrapper interface {
|
||||
AddPeer(BootstrapPeerParams) error
|
||||
}
|
||||
|
||||
// ErrNilPeerBootstrapper is returned by functions that expect
|
||||
// a non-nil PeerBootstrapper, but received nil.
|
||||
const ErrNilPeerBootstrapper = internal.Error("peer bootstrapper is nil")
|
||||
|
||||
// SetNodeInfo is a node info setter.
|
||||
func (s *BootstrapPeerParams) SetNodeInfo(v *bootstrap.NodeInfo) {
|
||||
s.info = v
|
||||
}
|
||||
|
||||
// NodeInfo is a node info getter.
|
||||
func (s BootstrapPeerParams) NodeInfo() *bootstrap.NodeInfo {
|
||||
return s.info
|
||||
}
|
46
lib/boot/storage.go
Normal file
46
lib/boot/storage.go
Normal file
|
@ -0,0 +1,46 @@
|
|||
package boot
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// StorageBootParams is a group of parameters
|
||||
// for storage node bootstrap operation.
|
||||
type StorageBootParams struct {
|
||||
BootstrapPeerParams
|
||||
}
|
||||
|
||||
// StorageBootController is an entity that performs
|
||||
// registration of a storage node in NeoFS network.
|
||||
type StorageBootController struct {
|
||||
peerBoot PeerBootstrapper
|
||||
|
||||
bootPrm StorageBootParams
|
||||
|
||||
log *zap.Logger
|
||||
}
|
||||
|
||||
// SetPeerBootstrapper is a PeerBootstrapper setter.
|
||||
func (s *StorageBootController) SetPeerBootstrapper(v PeerBootstrapper) {
|
||||
s.peerBoot = v
|
||||
}
|
||||
|
||||
// SetBootParams is a storage node bootstrap parameters setter.
|
||||
func (s *StorageBootController) SetBootParams(v StorageBootParams) {
|
||||
s.bootPrm = v
|
||||
}
|
||||
|
||||
// SetLogger is a logging component setter.
|
||||
func (s *StorageBootController) SetLogger(v *zap.Logger) {
|
||||
s.log = v
|
||||
}
|
||||
|
||||
// Bootstrap registers storage node in NeoFS system.
|
||||
func (s StorageBootController) Bootstrap(context.Context) {
|
||||
// register peer in NeoFS network
|
||||
if err := s.peerBoot.AddPeer(s.bootPrm.BootstrapPeerParams); err != nil && s.log != nil {
|
||||
s.log.Error("could not register storage node in network")
|
||||
}
|
||||
}
|
109
lib/buckets/boltdb/boltdb.go
Normal file
109
lib/buckets/boltdb/boltdb.go
Normal file
|
@ -0,0 +1,109 @@
|
|||
package boltdb
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/nspcc-dev/neofs-node/internal"
|
||||
"github.com/nspcc-dev/neofs-node/lib/core"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/viper"
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
type (
|
||||
bucket struct {
|
||||
db *bbolt.DB
|
||||
name []byte
|
||||
}
|
||||
|
||||
// Options groups the BoltDB bucket's options.
|
||||
Options struct {
|
||||
bbolt.Options
|
||||
Name []byte
|
||||
Path string
|
||||
Perm os.FileMode
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
defaultFilePermission = 0777
|
||||
|
||||
errEmptyPath = internal.Error("database empty path")
|
||||
)
|
||||
|
||||
var _ core.Bucket = (*bucket)(nil)
|
||||
|
||||
func makeCopy(val []byte) []byte {
|
||||
tmp := make([]byte, len(val))
|
||||
copy(tmp, val)
|
||||
|
||||
return tmp
|
||||
}
|
||||
|
||||
// NewOptions prepares options for badger instance.
|
||||
func NewOptions(name core.BucketType, v *viper.Viper) (opts Options, err error) {
|
||||
key := string(name)
|
||||
opts = Options{
|
||||
Options: bbolt.Options{
|
||||
// set defaults:
|
||||
Timeout: bbolt.DefaultOptions.Timeout,
|
||||
FreelistType: bbolt.DefaultOptions.FreelistType,
|
||||
|
||||
// set config options:
|
||||
NoSync: v.GetBool(key + ".no_sync"),
|
||||
ReadOnly: v.GetBool(key + ".read_only"),
|
||||
NoGrowSync: v.GetBool(key + ".no_grow_sync"),
|
||||
NoFreelistSync: v.GetBool(key + ".no_freelist_sync"),
|
||||
|
||||
PageSize: v.GetInt(key + ".page_size"),
|
||||
MmapFlags: v.GetInt(key + ".mmap_flags"),
|
||||
InitialMmapSize: v.GetInt(key + ".initial_mmap_size"),
|
||||
},
|
||||
|
||||
Name: []byte(name),
|
||||
Perm: defaultFilePermission,
|
||||
Path: v.GetString(key + ".path"),
|
||||
}
|
||||
|
||||
if opts.Path == "" {
|
||||
return opts, errEmptyPath
|
||||
}
|
||||
|
||||
if tmp := v.GetDuration(key + ".lock_timeout"); tmp > 0 {
|
||||
opts.Timeout = tmp
|
||||
}
|
||||
|
||||
if perm := v.GetUint32(key + ".perm"); perm != 0 {
|
||||
opts.Perm = os.FileMode(perm)
|
||||
}
|
||||
|
||||
base := path.Dir(opts.Path)
|
||||
if err := os.MkdirAll(base, opts.Perm); err != nil {
|
||||
return opts, errors.Wrapf(err, "could not use `%s` dir", base)
|
||||
}
|
||||
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
// NewBucket creates badger-bucket instance.
|
||||
func NewBucket(opts *Options) (core.Bucket, error) {
|
||||
log.SetOutput(ioutil.Discard) // disable default logger
|
||||
|
||||
db, err := bbolt.Open(opts.Path, opts.Perm, &opts.Options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = db.Update(func(tx *bbolt.Tx) error {
|
||||
_, err := tx.CreateBucketIfNotExists(opts.Name)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &bucket{db: db, name: opts.Name}, nil
|
||||
}
|
94
lib/buckets/boltdb/methods.go
Normal file
94
lib/buckets/boltdb/methods.go
Normal file
|
@ -0,0 +1,94 @@
|
|||
package boltdb
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/mr-tron/base58"
|
||||
"github.com/nspcc-dev/neofs-node/lib/core"
|
||||
"github.com/pkg/errors"
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// Get value by key or return error.
|
||||
func (b *bucket) Get(key []byte) (data []byte, err error) {
|
||||
err = b.db.View(func(txn *bbolt.Tx) error {
|
||||
txn.Bucket(b.name).Cursor().Seek(key)
|
||||
val := txn.Bucket(b.name).Get(key)
|
||||
if val == nil {
|
||||
return errors.Wrapf(core.ErrNotFound, "key=%s", base58.Encode(key))
|
||||
}
|
||||
|
||||
data = makeCopy(val)
|
||||
return nil
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Set value for key.
|
||||
func (b *bucket) Set(key, value []byte) error {
|
||||
return b.db.Update(func(txn *bbolt.Tx) error {
|
||||
k, v := makeCopy(key), makeCopy(value)
|
||||
return txn.Bucket(b.name).Put(k, v)
|
||||
})
|
||||
}
|
||||
|
||||
// Del removes item from bucket by key.
|
||||
func (b *bucket) Del(key []byte) error {
|
||||
return b.db.Update(func(txn *bbolt.Tx) error {
|
||||
return txn.Bucket(b.name).Delete(key)
|
||||
})
|
||||
}
|
||||
|
||||
// Has checks key exists.
|
||||
func (b *bucket) Has(key []byte) bool {
|
||||
_, err := b.Get(key)
|
||||
return !errors.Is(errors.Cause(err), core.ErrNotFound)
|
||||
}
|
||||
|
||||
// Size returns size of database.
|
||||
func (b *bucket) Size() int64 {
|
||||
info, err := os.Stat(b.db.Path())
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return info.Size()
|
||||
}
|
||||
|
||||
// List all items in bucket.
|
||||
func (b *bucket) List() ([][]byte, error) {
|
||||
var items [][]byte
|
||||
|
||||
if err := b.db.View(func(txn *bbolt.Tx) error {
|
||||
return txn.Bucket(b.name).ForEach(func(k, _ []byte) error {
|
||||
items = append(items, makeCopy(k))
|
||||
return nil
|
||||
})
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return items, nil
|
||||
}
|
||||
|
||||
// Filter elements by filter closure.
|
||||
func (b *bucket) Iterate(handler core.FilterHandler) error {
|
||||
if handler == nil {
|
||||
return core.ErrNilFilterHandler
|
||||
}
|
||||
|
||||
return b.db.View(func(txn *bbolt.Tx) error {
|
||||
return txn.Bucket(b.name).ForEach(func(k, v []byte) error {
|
||||
if !handler(makeCopy(k), makeCopy(v)) {
|
||||
return core.ErrIteratingAborted
|
||||
}
|
||||
return nil
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// Close bucket database.
|
||||
func (b *bucket) Close() error {
|
||||
return b.db.Close()
|
||||
}
|
95
lib/buckets/boltdb/methods_test.go
Normal file
95
lib/buckets/boltdb/methods_test.go
Normal file
|
@ -0,0 +1,95 @@
|
|||
package boltdb
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/nspcc-dev/neofs-node/lib/core"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var config = strings.NewReader(`
|
||||
storage:
|
||||
test_bucket:
|
||||
bucket: boltdb
|
||||
path: ./temp/storage/test_bucket
|
||||
perm: 0777
|
||||
`)
|
||||
|
||||
func TestBucket(t *testing.T) {
|
||||
file, err := ioutil.TempFile("", "test_bolt_db")
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, file.Close())
|
||||
|
||||
v := viper.New()
|
||||
require.NoError(t, v.ReadConfig(config))
|
||||
|
||||
// -- //
|
||||
_, err = NewOptions("storage.test_bucket", v)
|
||||
require.EqualError(t, err, errEmptyPath.Error())
|
||||
|
||||
v.SetDefault("storage.test_bucket.path", file.Name())
|
||||
v.SetDefault("storage.test_bucket.timeout", time.Millisecond*100)
|
||||
// -- //
|
||||
|
||||
opts, err := NewOptions("storage.test_bucket", v)
|
||||
require.NoError(t, err)
|
||||
|
||||
db, err := NewBucket(&opts)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotPanics(t, func() { db.Size() })
|
||||
|
||||
var (
|
||||
count = uint64(10)
|
||||
expected = []byte("test")
|
||||
)
|
||||
|
||||
for i := uint64(0); i < count; i++ {
|
||||
key := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(key, i)
|
||||
|
||||
require.False(t, db.Has(key))
|
||||
|
||||
val, err := db.Get(key)
|
||||
require.EqualError(t, errors.Cause(err), core.ErrNotFound.Error())
|
||||
require.Empty(t, val)
|
||||
|
||||
require.NoError(t, db.Set(key, expected))
|
||||
|
||||
require.True(t, db.Has(key))
|
||||
|
||||
val, err = db.Get(key)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, val)
|
||||
|
||||
keys, err := db.List()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, keys, 1)
|
||||
require.Equal(t, key, keys[0])
|
||||
|
||||
require.EqualError(t, db.Iterate(nil), core.ErrNilFilterHandler.Error())
|
||||
|
||||
items, err := core.ListBucketItems(db, func(_, _ []byte) bool { return true })
|
||||
require.NoError(t, err)
|
||||
require.Len(t, items, 1)
|
||||
require.Equal(t, key, items[0].Key)
|
||||
require.Equal(t, val, items[0].Val)
|
||||
|
||||
require.NoError(t, db.Del(key))
|
||||
require.False(t, db.Has(key))
|
||||
|
||||
val, err = db.Get(key)
|
||||
require.EqualError(t, errors.Cause(err), core.ErrNotFound.Error())
|
||||
require.Empty(t, val)
|
||||
}
|
||||
|
||||
require.NoError(t, db.Close())
|
||||
require.NoError(t, os.RemoveAll(file.Name()))
|
||||
}
|
25
lib/buckets/boltdb/plugin/main.go
Normal file
25
lib/buckets/boltdb/plugin/main.go
Normal file
|
@ -0,0 +1,25 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/nspcc-dev/neofs-node/lib/buckets/boltdb"
|
||||
"github.com/nspcc-dev/neofs-node/lib/core"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var _ = PrepareBucket
|
||||
|
||||
// PrepareBucket is interface method for bucket.
|
||||
func PrepareBucket(name core.BucketType, v *viper.Viper) (db core.Bucket, err error) {
|
||||
var opts boltdb.Options
|
||||
|
||||
if opts, err = boltdb.NewOptions("storage."+name, v); err != nil {
|
||||
err = errors.Wrapf(err, "%q: could not prepare options", name)
|
||||
return
|
||||
} else if db, err = boltdb.NewBucket(&opts); err != nil {
|
||||
err = errors.Wrapf(err, "%q: could not prepare bucket", name)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
101
lib/buckets/fsbucket/bucket.go
Normal file
101
lib/buckets/fsbucket/bucket.go
Normal file
|
@ -0,0 +1,101 @@
|
|||
package fsbucket
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/mr-tron/base58"
|
||||
"github.com/nspcc-dev/neofs-node/internal"
|
||||
"github.com/nspcc-dev/neofs-node/lib/core"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/viper"
|
||||
"go.uber.org/atomic"
|
||||
)
|
||||
|
||||
type (
|
||||
bucket struct {
|
||||
dir string
|
||||
perm os.FileMode
|
||||
}
|
||||
|
||||
treeBucket struct {
|
||||
dir string
|
||||
perm os.FileMode
|
||||
|
||||
depth int
|
||||
prefixLength int
|
||||
sz *atomic.Int64
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
defaultDirectory = "fsbucket"
|
||||
defaultPermissions = 0755
|
||||
defaultDepth = 2
|
||||
defaultPrefixLen = 2
|
||||
)
|
||||
|
||||
const errShortKey = internal.Error("key is too short for tree fs bucket")
|
||||
|
||||
var _ core.Bucket = (*bucket)(nil)
|
||||
|
||||
func stringifyKey(key []byte) string {
|
||||
return base58.Encode(key)
|
||||
}
|
||||
|
||||
func decodeKey(key string) []byte {
|
||||
k, err := base58.Decode(key)
|
||||
if err != nil {
|
||||
panic(err) // it can fail only for not base58 strings
|
||||
}
|
||||
|
||||
return k
|
||||
}
|
||||
|
||||
// NewBucket creates new in-memory bucket instance.
|
||||
func NewBucket(name core.BucketType, v *viper.Viper) (core.Bucket, error) {
|
||||
var (
|
||||
key = "storage." + string(name)
|
||||
dir string
|
||||
perm os.FileMode
|
||||
|
||||
prefixLen int
|
||||
depth int
|
||||
)
|
||||
|
||||
if dir = v.GetString(key + ".directory"); dir == "" {
|
||||
dir = defaultDirectory
|
||||
}
|
||||
|
||||
if perm = os.FileMode(v.GetInt(key + ".permissions")); perm == 0 {
|
||||
perm = defaultPermissions
|
||||
}
|
||||
|
||||
if depth = v.GetInt(key + ".depth"); depth <= 0 {
|
||||
depth = defaultDepth
|
||||
}
|
||||
|
||||
if prefixLen = v.GetInt(key + ".prefix_len"); prefixLen <= 0 {
|
||||
prefixLen = defaultPrefixLen
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(dir, perm); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not create bucket %s", string(name))
|
||||
}
|
||||
|
||||
if v.GetBool(key + ".tree_enabled") {
|
||||
b := &treeBucket{
|
||||
dir: dir,
|
||||
perm: perm,
|
||||
depth: depth,
|
||||
prefixLength: prefixLen,
|
||||
}
|
||||
b.sz = atomic.NewInt64(b.size())
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
return &bucket{
|
||||
dir: dir,
|
||||
perm: perm,
|
||||
}, nil
|
||||
}
|
107
lib/buckets/fsbucket/methods.go
Normal file
107
lib/buckets/fsbucket/methods.go
Normal file
|
@ -0,0 +1,107 @@
|
|||
package fsbucket
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/nspcc-dev/neofs-node/lib/core"
|
||||
)
|
||||
|
||||
// Get value by key.
|
||||
func (b *bucket) Get(key []byte) ([]byte, error) {
|
||||
p := path.Join(b.dir, stringifyKey(key))
|
||||
if _, err := os.Stat(p); os.IsNotExist(err) {
|
||||
return nil, core.ErrNotFound
|
||||
}
|
||||
|
||||
return ioutil.ReadFile(p)
|
||||
}
|
||||
|
||||
// Set value by key.
|
||||
func (b *bucket) Set(key, value []byte) error {
|
||||
p := path.Join(b.dir, stringifyKey(key))
|
||||
|
||||
return ioutil.WriteFile(p, value, b.perm)
|
||||
}
|
||||
|
||||
// Del value by key.
|
||||
func (b *bucket) Del(key []byte) error {
|
||||
p := path.Join(b.dir, stringifyKey(key))
|
||||
if _, err := os.Stat(p); os.IsNotExist(err) {
|
||||
return core.ErrNotFound
|
||||
}
|
||||
|
||||
return os.Remove(p)
|
||||
}
|
||||
|
||||
// Has checks key exists.
|
||||
func (b *bucket) Has(key []byte) bool {
|
||||
p := path.Join(b.dir, stringifyKey(key))
|
||||
_, err := os.Stat(p)
|
||||
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func listing(root string, fn func(path string, info os.FileInfo) error) error {
|
||||
return filepath.Walk(root, func(p string, info os.FileInfo, err error) error {
|
||||
if err != nil || info.IsDir() {
|
||||
return err
|
||||
}
|
||||
|
||||
if fn == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fn(p, info)
|
||||
})
|
||||
}
|
||||
|
||||
// Size of bucket.
|
||||
func (b *bucket) Size() (size int64) {
|
||||
err := listing(b.dir, func(_ string, info os.FileInfo) error {
|
||||
size += info.Size()
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
size = 0
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// List all bucket items.
|
||||
func (b *bucket) List() ([][]byte, error) {
|
||||
buckets := make([][]byte, 0)
|
||||
|
||||
err := listing(b.dir, func(p string, info os.FileInfo) error {
|
||||
buckets = append(buckets, decodeKey(info.Name()))
|
||||
return nil
|
||||
})
|
||||
|
||||
return buckets, err
|
||||
}
|
||||
|
||||
// Filter bucket items by closure.
|
||||
func (b *bucket) Iterate(handler core.FilterHandler) error {
|
||||
return listing(b.dir, func(p string, info os.FileInfo) error {
|
||||
key := decodeKey(info.Name())
|
||||
val, err := ioutil.ReadFile(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !handler(key, val) {
|
||||
return core.ErrIteratingAborted
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Close bucket (just empty).
|
||||
func (b *bucket) Close() error {
|
||||
return os.RemoveAll(b.dir)
|
||||
}
|
44
lib/buckets/fsbucket/queue.go
Normal file
44
lib/buckets/fsbucket/queue.go
Normal file
|
@ -0,0 +1,44 @@
|
|||
package fsbucket
|
||||
|
||||
import "sync"
|
||||
|
||||
type (
|
||||
queue struct {
|
||||
*sync.RWMutex
|
||||
buf []elem
|
||||
}
|
||||
|
||||
elem struct {
|
||||
depth int
|
||||
prefix string
|
||||
path string
|
||||
}
|
||||
)
|
||||
|
||||
func newQueue(n int) *queue {
|
||||
return &queue{
|
||||
RWMutex: new(sync.RWMutex),
|
||||
buf: make([]elem, 0, n),
|
||||
}
|
||||
}
|
||||
|
||||
func (q *queue) Len() int {
|
||||
return len(q.buf)
|
||||
}
|
||||
|
||||
func (q *queue) Push(s elem) {
|
||||
q.Lock()
|
||||
q.buf = append(q.buf, s)
|
||||
q.Unlock()
|
||||
}
|
||||
|
||||
func (q *queue) Pop() (s elem) {
|
||||
q.Lock()
|
||||
if len(q.buf) > 0 {
|
||||
s = q.buf[0]
|
||||
q.buf = q.buf[1:]
|
||||
}
|
||||
q.Unlock()
|
||||
|
||||
return
|
||||
}
|
261
lib/buckets/fsbucket/treemethods.go
Normal file
261
lib/buckets/fsbucket/treemethods.go
Normal file
|
@ -0,0 +1,261 @@
|
|||
package fsbucket
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/nspcc-dev/neofs-node/lib/core"
|
||||
)
|
||||
|
||||
const queueCap = 1000
|
||||
|
||||
func stringifyHexKey(key []byte) string {
|
||||
return hex.EncodeToString(key)
|
||||
}
|
||||
|
||||
func decodeHexKey(key string) ([]byte, error) {
|
||||
k, err := hex.DecodeString(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return k, nil
|
||||
}
|
||||
|
||||
// treePath returns slice of the dir names that contain the path
|
||||
// and filename, e.g. 0xabcdef => []string{"ab", "cd"}, "abcdef".
|
||||
// In case of errors - return nil slice.
|
||||
func (b *treeBucket) treePath(key []byte) ([]string, string) {
|
||||
filename := stringifyHexKey(key)
|
||||
if len(filename) <= b.prefixLength*b.depth {
|
||||
return nil, filename
|
||||
}
|
||||
|
||||
filepath := filename
|
||||
dirs := make([]string, 0, b.depth)
|
||||
|
||||
for i := 0; i < b.depth; i++ {
|
||||
dirs = append(dirs, filepath[:b.prefixLength])
|
||||
filepath = filepath[b.prefixLength:]
|
||||
}
|
||||
|
||||
return dirs, filename
|
||||
}
|
||||
|
||||
// Get value by key.
|
||||
func (b *treeBucket) Get(key []byte) ([]byte, error) {
|
||||
dirPaths, filename := b.treePath(key)
|
||||
if dirPaths == nil {
|
||||
return nil, errShortKey
|
||||
}
|
||||
|
||||
p := path.Join(b.dir, path.Join(dirPaths...), filename)
|
||||
|
||||
if _, err := os.Stat(p); os.IsNotExist(err) {
|
||||
return nil, core.ErrNotFound
|
||||
}
|
||||
|
||||
return ioutil.ReadFile(p)
|
||||
}
|
||||
|
||||
// Set value by key.
|
||||
func (b *treeBucket) Set(key, value []byte) error {
|
||||
dirPaths, filename := b.treePath(key)
|
||||
if dirPaths == nil {
|
||||
return errShortKey
|
||||
}
|
||||
|
||||
var (
|
||||
dirPath = path.Join(dirPaths...)
|
||||
p = path.Join(b.dir, dirPath, filename)
|
||||
)
|
||||
|
||||
if err := os.MkdirAll(path.Join(b.dir, dirPath), b.perm); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := ioutil.WriteFile(p, value, b.perm)
|
||||
if err == nil {
|
||||
b.sz.Add(int64(len(value)))
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Del value by key.
|
||||
func (b *treeBucket) Del(key []byte) error {
|
||||
dirPaths, filename := b.treePath(key)
|
||||
if dirPaths == nil {
|
||||
return errShortKey
|
||||
}
|
||||
|
||||
var (
|
||||
err error
|
||||
fi os.FileInfo
|
||||
p = path.Join(b.dir, path.Join(dirPaths...), filename)
|
||||
)
|
||||
|
||||
if fi, err = os.Stat(p); os.IsNotExist(err) {
|
||||
return core.ErrNotFound
|
||||
} else if err = os.Remove(p); err == nil {
|
||||
b.sz.Sub(fi.Size())
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Has checks if key exists.
|
||||
func (b *treeBucket) Has(key []byte) bool {
|
||||
dirPaths, filename := b.treePath(key)
|
||||
if dirPaths == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
p := path.Join(b.dir, path.Join(dirPaths...), filename)
|
||||
|
||||
_, err := os.Stat(p)
|
||||
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// There might be two implementation of listing method: simple with `filepath.Walk()`
|
||||
// or more complex implementation with path checks, BFS etc. `filepath.Walk()` might
|
||||
// be slow in large dirs due to sorting operations and non controllable depth.
|
||||
func (b *treeBucket) listing(root string, fn func(path string, info os.FileInfo) error) error {
|
||||
// todo: DFS might be better since it won't store many files in queue.
|
||||
// todo: queue length can be specified as a parameter
|
||||
q := newQueue(queueCap)
|
||||
q.Push(elem{path: root})
|
||||
|
||||
for q.Len() > 0 {
|
||||
e := q.Pop()
|
||||
|
||||
s, err := os.Lstat(e.path)
|
||||
if err != nil {
|
||||
// might be better to log and ignore
|
||||
return err
|
||||
}
|
||||
|
||||
// check if it is correct file
|
||||
if !s.IsDir() {
|
||||
// we accept files that located in excepted depth and have correct prefix
|
||||
// e.g. file 'abcdef0123' => /ab/cd/abcdef0123
|
||||
if e.depth == b.depth+1 && strings.HasPrefix(s.Name(), e.prefix) {
|
||||
err = fn(e.path, s)
|
||||
if err != nil {
|
||||
// might be better to log and ignore
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// ignore dirs with inappropriate length or depth
|
||||
if e.depth > b.depth || (e.depth > 0 && len(s.Name()) > b.prefixLength) {
|
||||
continue
|
||||
}
|
||||
|
||||
files, err := readDirNames(e.path)
|
||||
if err != nil {
|
||||
// might be better to log and ignore
|
||||
return err
|
||||
}
|
||||
|
||||
for i := range files {
|
||||
// add prefix of all dirs in path except root dir
|
||||
var prefix string
|
||||
if e.depth > 0 {
|
||||
prefix = e.prefix + s.Name()
|
||||
}
|
||||
|
||||
q.Push(elem{
|
||||
depth: e.depth + 1,
|
||||
prefix: prefix,
|
||||
path: path.Join(e.path, files[i]),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Size returns the size of the bucket in bytes.
|
||||
func (b *treeBucket) Size() int64 {
|
||||
return b.sz.Load()
|
||||
}
|
||||
|
||||
func (b *treeBucket) size() (size int64) {
|
||||
err := b.listing(b.dir, func(_ string, info os.FileInfo) error {
|
||||
size += info.Size()
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
size = 0
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// List all bucket items.
|
||||
func (b *treeBucket) List() ([][]byte, error) {
|
||||
buckets := make([][]byte, 0)
|
||||
|
||||
err := b.listing(b.dir, func(p string, info os.FileInfo) error {
|
||||
key, err := decodeHexKey(info.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buckets = append(buckets, key)
|
||||
return nil
|
||||
})
|
||||
|
||||
return buckets, err
|
||||
}
|
||||
|
||||
// Filter bucket items by closure.
|
||||
func (b *treeBucket) Iterate(handler core.FilterHandler) error {
|
||||
return b.listing(b.dir, func(p string, info os.FileInfo) error {
|
||||
val, err := ioutil.ReadFile(path.Join(b.dir, p))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key, err := decodeHexKey(info.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !handler(key, val) {
|
||||
return core.ErrIteratingAborted
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Close bucket (remove all available data).
|
||||
func (b *treeBucket) Close() error {
|
||||
return os.RemoveAll(b.dir)
|
||||
}
|
||||
|
||||
// readDirNames copies `filepath.readDirNames()` without sorting the output.
|
||||
func readDirNames(dirname string) ([]string, error) {
|
||||
f, err := os.Open(dirname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
names, err := f.Readdirnames(-1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f.Close()
|
||||
|
||||
return names, nil
|
||||
}
|
324
lib/buckets/fsbucket/treemethods_test.go
Normal file
324
lib/buckets/fsbucket/treemethods_test.go
Normal file
|
@ -0,0 +1,324 @@
|
|||
package fsbucket
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/atomic"
|
||||
|
||||
"github.com/nspcc-dev/neofs-node/lib/core"
|
||||
)
|
||||
|
||||
func prepareTree(badFiles bool) (string, error) {
|
||||
name := make([]byte, 32)
|
||||
root, err := ioutil.TempDir("", "treeBucket_test")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// paths must contain strings with hex ascii symbols
|
||||
paths := [][]string{
|
||||
{root, "abcd"},
|
||||
{root, "abcd", "cdef"},
|
||||
{root, "abcd", "cd01"},
|
||||
{root, "0123", "2345"},
|
||||
{root, "0123", "2345", "4567"},
|
||||
}
|
||||
|
||||
dirs := make([]string, len(paths))
|
||||
|
||||
for i := range paths {
|
||||
dirs[i] = path.Join(paths[i]...)
|
||||
|
||||
err = os.MkdirAll(dirs[i], 0700)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// create couple correct files
|
||||
for j := 0; j < 2; j++ {
|
||||
_, err := rand.Read(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
filePrefix := new(strings.Builder)
|
||||
for k := 1; k < len(paths[i]); k++ {
|
||||
filePrefix.WriteString(paths[i][k])
|
||||
}
|
||||
filePrefix.WriteString(hex.EncodeToString(name))
|
||||
|
||||
file, err := os.OpenFile(path.Join(dirs[i], filePrefix.String()), os.O_CREATE, 0700)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
file.Close()
|
||||
}
|
||||
|
||||
if !badFiles {
|
||||
continue
|
||||
}
|
||||
|
||||
// create one bad file
|
||||
_, err := rand.Read(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
file, err := os.OpenFile(path.Join(dirs[i], "fff"+hex.EncodeToString(name)), os.O_CREATE, 0700)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
file.Close()
|
||||
}
|
||||
|
||||
return root, nil
|
||||
}
|
||||
|
||||
func TestTreebucket_List(t *testing.T) {
|
||||
root, err := prepareTree(true)
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(root)
|
||||
|
||||
b := treeBucket{
|
||||
dir: root,
|
||||
perm: 0700,
|
||||
depth: 1,
|
||||
prefixLength: 4,
|
||||
}
|
||||
results, err := b.List()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, 2)
|
||||
|
||||
b.depth = 2
|
||||
results, err = b.List()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, 6)
|
||||
|
||||
b.depth = 3
|
||||
results, err = b.List()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, 2)
|
||||
|
||||
b.depth = 4
|
||||
results, err = b.List()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, 0)
|
||||
}
|
||||
|
||||
func TestTreebucket(t *testing.T) {
|
||||
root, err := prepareTree(true)
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(root)
|
||||
|
||||
b := treeBucket{
|
||||
dir: root,
|
||||
perm: 0700,
|
||||
depth: 2,
|
||||
prefixLength: 4,
|
||||
sz: atomic.NewInt64(0),
|
||||
}
|
||||
|
||||
results, err := b.List()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, 6)
|
||||
|
||||
t.Run("Get", func(t *testing.T) {
|
||||
for i := range results {
|
||||
_, err = b.Get(results[i])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
_, err = b.Get([]byte("Hello world!"))
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("Has", func(t *testing.T) {
|
||||
for i := range results {
|
||||
require.True(t, b.Has(results[i]))
|
||||
}
|
||||
require.False(t, b.Has([]byte("Unknown key")))
|
||||
})
|
||||
|
||||
t.Run("Set", func(t *testing.T) {
|
||||
keyHash := sha256.Sum256([]byte("Set this key"))
|
||||
key := keyHash[:]
|
||||
value := make([]byte, 32)
|
||||
rand.Read(value)
|
||||
|
||||
// set sha256 key
|
||||
err := b.Set(key, value)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.True(t, b.Has(key))
|
||||
data, err := b.Get(key)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, data, value)
|
||||
|
||||
filename := hex.EncodeToString(key)
|
||||
_, err = os.Lstat(path.Join(root, filename[:4], filename[4:8], filename))
|
||||
require.NoError(t, err)
|
||||
|
||||
// set key that cannot be placed in the required dir depth
|
||||
key, err = hex.DecodeString("abcdef")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = b.Set(key, value)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("Delete", func(t *testing.T) {
|
||||
keyHash := sha256.Sum256([]byte("Delete this key"))
|
||||
key := keyHash[:]
|
||||
value := make([]byte, 32)
|
||||
rand.Read(value)
|
||||
|
||||
err := b.Set(key, value)
|
||||
require.NoError(t, err)
|
||||
|
||||
// delete sha256 key
|
||||
err = b.Del(key)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = b.Get(key)
|
||||
require.Error(t, err)
|
||||
filename := hex.EncodeToString(key)
|
||||
_, err = os.Lstat(path.Join(root, filename[:4], filename[4:8], filename))
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestTreebucket_Close(t *testing.T) {
|
||||
root, err := prepareTree(true)
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(root)
|
||||
|
||||
b := treeBucket{
|
||||
dir: root,
|
||||
perm: 0700,
|
||||
depth: 2,
|
||||
prefixLength: 4,
|
||||
}
|
||||
err = b.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = os.Lstat(root)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestTreebucket_Size(t *testing.T) {
|
||||
root, err := prepareTree(true)
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(root)
|
||||
|
||||
var size int64 = 1024
|
||||
key := []byte("Set this key")
|
||||
value := make([]byte, size)
|
||||
rand.Read(value)
|
||||
|
||||
b := treeBucket{
|
||||
dir: root,
|
||||
perm: 0700,
|
||||
depth: 2,
|
||||
prefixLength: 4,
|
||||
sz: atomic.NewInt64(0),
|
||||
}
|
||||
|
||||
err = b.Set(key, value)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, size, b.Size())
|
||||
}
|
||||
|
||||
func BenchmarkTreebucket_List(b *testing.B) {
|
||||
root, err := prepareTree(false)
|
||||
defer os.RemoveAll(root)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
|
||||
treeFSBucket := &treeBucket{
|
||||
dir: root,
|
||||
perm: 0755,
|
||||
depth: 2,
|
||||
prefixLength: 4,
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := treeFSBucket.List()
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkFilewalkBucket_List(b *testing.B) {
|
||||
root, err := prepareTree(false)
|
||||
defer os.RemoveAll(root)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
buckets := make([]core.BucketItem, 0)
|
||||
|
||||
filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil || info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
val, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key, err := decodeHexKey(info.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buckets = append(buckets, core.BucketItem{
|
||||
Key: key,
|
||||
Val: val,
|
||||
})
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkTreeBucket_Size(b *testing.B) {
|
||||
root, err := prepareTree(false)
|
||||
defer os.RemoveAll(root)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
|
||||
treeFSBucket := &treeBucket{
|
||||
dir: root,
|
||||
perm: 0755,
|
||||
depth: 2,
|
||||
prefixLength: 4,
|
||||
}
|
||||
|
||||
treeFSBucket.sz = atomic.NewInt64(treeFSBucket.size())
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = treeFSBucket.Size()
|
||||
}
|
||||
}
|
64
lib/buckets/init.go
Normal file
64
lib/buckets/init.go
Normal file
|
@ -0,0 +1,64 @@
|
|||
package buckets
|
||||
|
||||
import (
|
||||
"plugin"
|
||||
"strings"
|
||||
|
||||
"github.com/nspcc-dev/neofs-node/lib/buckets/boltdb"
|
||||
"github.com/nspcc-dev/neofs-node/lib/buckets/fsbucket"
|
||||
"github.com/nspcc-dev/neofs-node/lib/buckets/inmemory"
|
||||
"github.com/nspcc-dev/neofs-node/lib/core"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/viper"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
// BoltDBBucket is a name of BoltDB bucket.
|
||||
BoltDBBucket = "boltdb"
|
||||
|
||||
// InMemoryBucket is a name RAM bucket.
|
||||
InMemoryBucket = "in-memory"
|
||||
|
||||
// FileSystemBucket is a name of file system bucket.
|
||||
FileSystemBucket = "fsbucket"
|
||||
|
||||
bucketSymbol = "PrepareBucket"
|
||||
)
|
||||
|
||||
// NewBucket is a bucket's constructor.
|
||||
func NewBucket(name core.BucketType, l *zap.Logger, v *viper.Viper) (core.Bucket, error) {
|
||||
bucket := v.GetString("storage." + string(name) + ".bucket")
|
||||
|
||||
l.Info("initialize bucket",
|
||||
zap.String("name", string(name)),
|
||||
zap.String("bucket", bucket))
|
||||
|
||||
switch strings.ToLower(bucket) {
|
||||
case FileSystemBucket:
|
||||
return fsbucket.NewBucket(name, v)
|
||||
|
||||
case InMemoryBucket:
|
||||
return inmemory.NewBucket(name, v), nil
|
||||
|
||||
case BoltDBBucket:
|
||||
opts, err := boltdb.NewOptions("storage."+name, v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return boltdb.NewBucket(&opts)
|
||||
default:
|
||||
instance, err := plugin.Open(bucket)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not load bucket: `%s`", bucket)
|
||||
}
|
||||
|
||||
sym, err := instance.Lookup(bucketSymbol)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not find bucket signature: `%s`", bucket)
|
||||
}
|
||||
|
||||
return sym.(func(core.BucketType, *viper.Viper) (core.Bucket, error))(name, v)
|
||||
}
|
||||
}
|
60
lib/buckets/inmemory/bucket.go
Normal file
60
lib/buckets/inmemory/bucket.go
Normal file
|
@ -0,0 +1,60 @@
|
|||
package inmemory
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/mr-tron/base58"
|
||||
"github.com/nspcc-dev/neofs-node/lib/core"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
type (
|
||||
bucket struct {
|
||||
*sync.RWMutex
|
||||
items map[string][]byte
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
defaultCapacity = 100
|
||||
)
|
||||
|
||||
var (
|
||||
_ core.Bucket = (*bucket)(nil)
|
||||
|
||||
// for in usage
|
||||
_ = NewBucket
|
||||
)
|
||||
|
||||
func stringifyKey(key []byte) string {
|
||||
return base58.Encode(key)
|
||||
}
|
||||
|
||||
func decodeKey(key string) []byte {
|
||||
k, err := base58.Decode(key)
|
||||
if err != nil {
|
||||
panic(err) // it can fail only for not base58 strings
|
||||
}
|
||||
|
||||
return k
|
||||
}
|
||||
|
||||
func makeCopy(val []byte) []byte {
|
||||
tmp := make([]byte, len(val))
|
||||
copy(tmp, val)
|
||||
|
||||
return tmp
|
||||
}
|
||||
|
||||
// NewBucket creates new in-memory bucket instance.
|
||||
func NewBucket(name core.BucketType, v *viper.Viper) core.Bucket {
|
||||
var capacity int
|
||||
if capacity = v.GetInt("storage." + string(name) + ".capacity"); capacity <= 0 {
|
||||
capacity = defaultCapacity
|
||||
}
|
||||
|
||||
return &bucket{
|
||||
RWMutex: new(sync.RWMutex),
|
||||
items: make(map[string][]byte, capacity),
|
||||
}
|
||||
}
|
107
lib/buckets/inmemory/methods.go
Normal file
107
lib/buckets/inmemory/methods.go
Normal file
|
@ -0,0 +1,107 @@
|
|||
package inmemory
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/nspcc-dev/neofs-node/lib/core"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Get value by key.
|
||||
func (b *bucket) Get(key []byte) ([]byte, error) {
|
||||
k := stringifyKey(key)
|
||||
|
||||
b.RLock()
|
||||
val, ok := b.items[k]
|
||||
result := makeCopy(val)
|
||||
b.RUnlock()
|
||||
|
||||
if !ok {
|
||||
return nil, errors.Wrapf(core.ErrNotFound, "key=`%s`", k)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Set value by key.
|
||||
func (b *bucket) Set(key, value []byte) error {
|
||||
k := stringifyKey(key)
|
||||
|
||||
b.Lock()
|
||||
b.items[k] = makeCopy(value)
|
||||
b.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Del value by key.
|
||||
func (b *bucket) Del(key []byte) error {
|
||||
k := stringifyKey(key)
|
||||
|
||||
b.Lock()
|
||||
delete(b.items, k)
|
||||
b.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Has checks key exists.
|
||||
func (b *bucket) Has(key []byte) bool {
|
||||
k := stringifyKey(key)
|
||||
|
||||
b.RLock()
|
||||
_, ok := b.items[k]
|
||||
b.RUnlock()
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
// Size size of bucket.
|
||||
func (b *bucket) Size() int64 {
|
||||
b.RLock()
|
||||
// TODO we must replace in future
|
||||
size := unsafe.Sizeof(b.items)
|
||||
b.RUnlock()
|
||||
|
||||
return int64(size)
|
||||
}
|
||||
|
||||
func (b *bucket) List() ([][]byte, error) {
|
||||
var result = make([][]byte, 0)
|
||||
|
||||
b.RLock()
|
||||
for key := range b.items {
|
||||
result = append(result, decodeKey(key))
|
||||
}
|
||||
b.RUnlock()
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Filter items by closure.
|
||||
func (b *bucket) Iterate(handler core.FilterHandler) error {
|
||||
if handler == nil {
|
||||
return core.ErrNilFilterHandler
|
||||
}
|
||||
|
||||
b.RLock()
|
||||
for key, val := range b.items {
|
||||
k, v := decodeKey(key), makeCopy(val)
|
||||
|
||||
if !handler(k, v) {
|
||||
return core.ErrIteratingAborted
|
||||
}
|
||||
}
|
||||
b.RUnlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close bucket (just empty).
|
||||
func (b *bucket) Close() error {
|
||||
b.Lock()
|
||||
b.items = make(map[string][]byte)
|
||||
b.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
15
lib/container/alias.go
Normal file
15
lib/container/alias.go
Normal file
|
@ -0,0 +1,15 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"github.com/nspcc-dev/neofs-api-go/container"
|
||||
"github.com/nspcc-dev/neofs-api-go/refs"
|
||||
)
|
||||
|
||||
// Container is a type alias of Container.
|
||||
type Container = container.Container
|
||||
|
||||
// CID is a type alias of CID.
|
||||
type CID = refs.CID
|
||||
|
||||
// OwnerID is a type alias of OwnerID.
|
||||
type OwnerID = refs.OwnerID
|
134
lib/container/storage.go
Normal file
134
lib/container/storage.go
Normal file
|
@ -0,0 +1,134 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// GetParams is a group of parameters for container receiving operation.
|
||||
type GetParams struct {
|
||||
ctxValue
|
||||
|
||||
cidValue
|
||||
}
|
||||
|
||||
// GetResult is a group of values returned by container receiving operation.
|
||||
type GetResult struct {
|
||||
cnrValue
|
||||
}
|
||||
|
||||
// PutParams is a group of parameters for container storing operation.
|
||||
type PutParams struct {
|
||||
ctxValue
|
||||
|
||||
cnrValue
|
||||
}
|
||||
|
||||
// PutResult is a group of values returned by container storing operation.
|
||||
type PutResult struct {
|
||||
cidValue
|
||||
}
|
||||
|
||||
// DeleteParams is a group of parameters for container removal operation.
|
||||
type DeleteParams struct {
|
||||
ctxValue
|
||||
|
||||
cidValue
|
||||
|
||||
ownerID OwnerID
|
||||
}
|
||||
|
||||
// DeleteResult is a group of values returned by container removal operation.
|
||||
type DeleteResult struct{}
|
||||
|
||||
// ListParams is a group of parameters for container listing operation.
|
||||
type ListParams struct {
|
||||
ctxValue
|
||||
|
||||
ownerIDList []OwnerID
|
||||
}
|
||||
|
||||
// ListResult is a group of values returned by container listing operation.
|
||||
type ListResult struct {
|
||||
cidList []CID
|
||||
}
|
||||
|
||||
type cnrValue struct {
|
||||
cnr *Container
|
||||
}
|
||||
|
||||
type cidValue struct {
|
||||
cid CID
|
||||
}
|
||||
|
||||
type ctxValue struct {
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// Storage is an interface of the storage of NeoFS containers.
|
||||
type Storage interface {
|
||||
GetContainer(GetParams) (*GetResult, error)
|
||||
PutContainer(PutParams) (*PutResult, error)
|
||||
DeleteContainer(DeleteParams) (*DeleteResult, error)
|
||||
ListContainers(ListParams) (*ListResult, error)
|
||||
// TODO: add EACL methods
|
||||
}
|
||||
|
||||
// Context is a context getter.
|
||||
func (s ctxValue) Context() context.Context {
|
||||
return s.ctx
|
||||
}
|
||||
|
||||
// SetContext is a context setter.
|
||||
func (s *ctxValue) SetContext(v context.Context) {
|
||||
s.ctx = v
|
||||
}
|
||||
|
||||
// CID is a container ID getter.
|
||||
func (s cidValue) CID() CID {
|
||||
return s.cid
|
||||
}
|
||||
|
||||
// SetCID is a container ID getter.
|
||||
func (s *cidValue) SetCID(v CID) {
|
||||
s.cid = v
|
||||
}
|
||||
|
||||
// Container is a container getter.
|
||||
func (s cnrValue) Container() *Container {
|
||||
return s.cnr
|
||||
}
|
||||
|
||||
// SetContainer is a container setter.
|
||||
func (s *cnrValue) SetContainer(v *Container) {
|
||||
s.cnr = v
|
||||
}
|
||||
|
||||
// OwnerID is an owner ID getter.
|
||||
func (s DeleteParams) OwnerID() OwnerID {
|
||||
return s.ownerID
|
||||
}
|
||||
|
||||
// SetOwnerID is an owner ID setter.
|
||||
func (s *DeleteParams) SetOwnerID(v OwnerID) {
|
||||
s.ownerID = v
|
||||
}
|
||||
|
||||
// OwnerIDList is an owner ID list getter.
|
||||
func (s ListParams) OwnerIDList() []OwnerID {
|
||||
return s.ownerIDList
|
||||
}
|
||||
|
||||
// SetOwnerIDList is an owner ID list setter.
|
||||
func (s *ListParams) SetOwnerIDList(v ...OwnerID) {
|
||||
s.ownerIDList = v
|
||||
}
|
||||
|
||||
// CIDList is a container ID list getter.
|
||||
func (s ListResult) CIDList() []CID {
|
||||
return s.cidList
|
||||
}
|
||||
|
||||
// SetCIDList is a container ID list setter.
|
||||
func (s *ListResult) SetCIDList(v []CID) {
|
||||
s.cidList = v
|
||||
}
|
83
lib/container/storage_test.go
Normal file
83
lib/container/storage_test.go
Normal file
|
@ -0,0 +1,83 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetParams(t *testing.T) {
|
||||
p := new(GetParams)
|
||||
|
||||
cid := CID{1, 2, 3}
|
||||
p.SetCID(cid)
|
||||
|
||||
require.Equal(t, cid, p.CID())
|
||||
}
|
||||
|
||||
func TestGetResult(t *testing.T) {
|
||||
r := new(GetResult)
|
||||
|
||||
cnr := &Container{
|
||||
OwnerID: OwnerID{1, 2, 3},
|
||||
}
|
||||
r.SetContainer(cnr)
|
||||
|
||||
require.Equal(t, cnr, r.Container())
|
||||
}
|
||||
|
||||
func TestPutParams(t *testing.T) {
|
||||
p := new(PutParams)
|
||||
|
||||
cnr := &Container{
|
||||
OwnerID: OwnerID{1, 2, 3},
|
||||
}
|
||||
p.SetContainer(cnr)
|
||||
|
||||
require.Equal(t, cnr, p.Container())
|
||||
}
|
||||
|
||||
func TestPutResult(t *testing.T) {
|
||||
r := new(PutResult)
|
||||
|
||||
cid := CID{1, 2, 3}
|
||||
r.SetCID(cid)
|
||||
|
||||
require.Equal(t, cid, r.CID())
|
||||
}
|
||||
|
||||
func TestDeleteParams(t *testing.T) {
|
||||
p := new(DeleteParams)
|
||||
|
||||
ownerID := OwnerID{1, 2, 3}
|
||||
p.SetOwnerID(ownerID)
|
||||
require.Equal(t, ownerID, p.OwnerID())
|
||||
|
||||
cid := CID{4, 5, 6}
|
||||
p.SetCID(cid)
|
||||
require.Equal(t, cid, p.CID())
|
||||
}
|
||||
|
||||
func TestListParams(t *testing.T) {
|
||||
p := new(ListParams)
|
||||
|
||||
ownerIDList := []OwnerID{
|
||||
{1, 2, 3},
|
||||
{4, 5, 6},
|
||||
}
|
||||
p.SetOwnerIDList(ownerIDList...)
|
||||
|
||||
require.Equal(t, ownerIDList, p.OwnerIDList())
|
||||
}
|
||||
|
||||
func TestListResult(t *testing.T) {
|
||||
r := new(ListResult)
|
||||
|
||||
cidList := []CID{
|
||||
{1, 2, 3},
|
||||
{4, 5, 6},
|
||||
}
|
||||
r.SetCIDList(cidList)
|
||||
|
||||
require.Equal(t, cidList, r.CIDList())
|
||||
}
|
94
lib/core/storage.go
Normal file
94
lib/core/storage.go
Normal file
|
@ -0,0 +1,94 @@
|
|||
package core
|
||||
|
||||
import (
|
||||
"github.com/nspcc-dev/neofs-node/internal"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type (
|
||||
// BucketType is name of bucket
|
||||
BucketType string
|
||||
|
||||
// FilterHandler where you receive key/val in your closure
|
||||
FilterHandler func(key, val []byte) bool
|
||||
|
||||
// BucketItem used in filter
|
||||
BucketItem struct {
|
||||
Key []byte
|
||||
Val []byte
|
||||
}
|
||||
|
||||
// Bucket is sub-store interface
|
||||
Bucket interface {
|
||||
Get(key []byte) ([]byte, error)
|
||||
Set(key, value []byte) error
|
||||
Del(key []byte) error
|
||||
Has(key []byte) bool
|
||||
Size() int64
|
||||
List() ([][]byte, error)
|
||||
Iterate(FilterHandler) error
|
||||
// Steam can be implemented by badger.Stream, but not for now
|
||||
// Stream(ctx context.Context, key []byte, cb func(io.ReadWriter) error) error
|
||||
Close() error
|
||||
}
|
||||
|
||||
// Storage component interface
|
||||
Storage interface {
|
||||
GetBucket(name BucketType) (Bucket, error)
|
||||
Size() int64
|
||||
Close() error
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
// BlobStore is a blob bucket name.
|
||||
BlobStore BucketType = "blob"
|
||||
|
||||
// MetaStore is a meta bucket name.
|
||||
MetaStore BucketType = "meta"
|
||||
|
||||
// SpaceMetricsStore is a space metrics bucket name.
|
||||
SpaceMetricsStore BucketType = "space-metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNilFilterHandler when FilterHandler is empty
|
||||
ErrNilFilterHandler = errors.New("handler can't be nil")
|
||||
|
||||
// ErrNotFound is returned by key-value storage methods
|
||||
// that could not find element by key.
|
||||
ErrNotFound = internal.Error("key not found")
|
||||
)
|
||||
|
||||
// ErrIteratingAborted is returned by storage iterator
|
||||
// after iteration has been interrupted.
|
||||
var ErrIteratingAborted = errors.New("iteration aborted")
|
||||
|
||||
var errEmptyBucket = errors.New("empty bucket")
|
||||
|
||||
func (t BucketType) String() string { return string(t) }
|
||||
|
||||
// ListBucketItems performs iteration over Bucket and returns the full list of its items.
|
||||
func ListBucketItems(b Bucket, h FilterHandler) ([]BucketItem, error) {
|
||||
if b == nil {
|
||||
return nil, errEmptyBucket
|
||||
} else if h == nil {
|
||||
return nil, ErrNilFilterHandler
|
||||
}
|
||||
|
||||
items := make([]BucketItem, 0)
|
||||
|
||||
if err := b.Iterate(func(key, val []byte) bool {
|
||||
if h(key, val) {
|
||||
items = append(items, BucketItem{
|
||||
Key: key,
|
||||
Val: val,
|
||||
})
|
||||
}
|
||||
return true
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return items, nil
|
||||
}
|
65
lib/core/storage_test.go
Normal file
65
lib/core/storage_test.go
Normal file
|
@ -0,0 +1,65 @@
|
|||
package core
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type testBucket struct {
|
||||
Bucket
|
||||
|
||||
items []BucketItem
|
||||
}
|
||||
|
||||
func (s *testBucket) Iterate(f FilterHandler) error {
|
||||
for i := range s.items {
|
||||
if !f(s.items[i].Key, s.items[i].Val) {
|
||||
return ErrIteratingAborted
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestListBucketItems(t *testing.T) {
|
||||
_, err := ListBucketItems(nil, nil)
|
||||
require.EqualError(t, err, errEmptyBucket.Error())
|
||||
|
||||
b := new(testBucket)
|
||||
|
||||
_, err = ListBucketItems(b, nil)
|
||||
require.EqualError(t, err, ErrNilFilterHandler.Error())
|
||||
|
||||
var (
|
||||
count = 10
|
||||
ln = 10
|
||||
items = make([]BucketItem, 0, count)
|
||||
)
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
items = append(items, BucketItem{
|
||||
Key: testData(t, ln),
|
||||
Val: testData(t, ln),
|
||||
})
|
||||
}
|
||||
|
||||
b.items = items
|
||||
|
||||
res, err := ListBucketItems(b, func(key, val []byte) bool { return true })
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, items, res)
|
||||
|
||||
res, err = ListBucketItems(b, func(key, val []byte) bool { return false })
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, res)
|
||||
}
|
||||
|
||||
func testData(t *testing.T, sz int) []byte {
|
||||
d := make([]byte, sz)
|
||||
_, err := rand.Read(d)
|
||||
require.NoError(t, err)
|
||||
|
||||
return d
|
||||
}
|
22
lib/core/validator.go
Normal file
22
lib/core/validator.go
Normal file
|
@ -0,0 +1,22 @@
|
|||
package core
|
||||
|
||||
import (
|
||||
"github.com/nspcc-dev/neofs-api-go/service"
|
||||
"github.com/nspcc-dev/neofs-node/internal"
|
||||
)
|
||||
|
||||
// ErrMissingKeySignPairs is returned by functions that expect
|
||||
// a non-empty SignKeyPair slice, but received empty.
|
||||
const ErrMissingKeySignPairs = internal.Error("missing key-signature pairs")
|
||||
|
||||
// VerifyRequestWithSignatures checks if request has signatures and all of them are valid.
|
||||
//
|
||||
// Returns ErrMissingKeySignPairs if request does not have signatures.
|
||||
// Otherwise, behaves like service.VerifyRequestData.
|
||||
func VerifyRequestWithSignatures(req service.RequestVerifyData) error {
|
||||
if len(req.GetSignKeyPairs()) == 0 {
|
||||
return ErrMissingKeySignPairs
|
||||
}
|
||||
|
||||
return service.VerifyRequestData(req)
|
||||
}
|
69
lib/core/verify.go
Normal file
69
lib/core/verify.go
Normal file
|
@ -0,0 +1,69 @@
|
|||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/nspcc-dev/neofs-api-go/refs"
|
||||
"github.com/nspcc-dev/neofs-api-go/service"
|
||||
crypto "github.com/nspcc-dev/neofs-crypto"
|
||||
"github.com/nspcc-dev/neofs-node/internal"
|
||||
)
|
||||
|
||||
// OwnerKeyContainer is an interface of the container of owner's ID and key pair with read access.
|
||||
type OwnerKeyContainer interface {
|
||||
GetOwnerID() refs.OwnerID
|
||||
GetOwnerKey() []byte
|
||||
}
|
||||
|
||||
// OwnerKeyVerifier is an interface of OwnerKeyContainer validator.
|
||||
type OwnerKeyVerifier interface {
|
||||
// Must check if OwnerKeyContainer satisfies a certain criterion.
|
||||
// Nil error is equivalent to matching the criterion.
|
||||
VerifyKey(context.Context, OwnerKeyContainer) error
|
||||
}
|
||||
|
||||
type neoKeyVerifier struct{}
|
||||
|
||||
// ErrNilOwnerKeyContainer is returned by functions that expect a non-nil
|
||||
// OwnerKeyContainer, but received nil.
|
||||
const ErrNilOwnerKeyContainer = internal.Error("owner-key container is nil")
|
||||
|
||||
// ErrNilOwnerKeyVerifier is returned by functions that expect a non-nil
|
||||
// OwnerKeyVerifier, but received nil.
|
||||
const ErrNilOwnerKeyVerifier = internal.Error("owner-key verifier is nil")
|
||||
|
||||
// NewNeoKeyVerifier creates a new Neo owner key verifier and return a OwnerKeyVerifier interface.
|
||||
func NewNeoKeyVerifier() OwnerKeyVerifier {
|
||||
return new(neoKeyVerifier)
|
||||
}
|
||||
|
||||
// VerifyKey checks if the public key converts to owner ID.
|
||||
//
|
||||
// If passed OwnerKeyContainer is nil, ErrNilOwnerKeyContainer returns.
|
||||
// If public key cannot be unmarshaled, service.ErrInvalidPublicKeyBytes returns.
|
||||
// If public key is not converted to owner ID, service.ErrWrongOwner returns.
|
||||
// With neo:morph adoption public key can be unrelated to owner ID. In this
|
||||
// case VerifyKey should call NeoFS.ID smart-contract to check whether public
|
||||
// key is bounded with owner ID. If there is no bound, then return
|
||||
// service.ErrWrongOwner.
|
||||
func (s neoKeyVerifier) VerifyKey(_ context.Context, src OwnerKeyContainer) error {
|
||||
if src == nil {
|
||||
return ErrNilOwnerKeyContainer
|
||||
}
|
||||
|
||||
pubKey := crypto.UnmarshalPublicKey(src.GetOwnerKey())
|
||||
if pubKey == nil {
|
||||
return service.ErrInvalidPublicKeyBytes
|
||||
}
|
||||
|
||||
ownerFromKey, err := refs.NewOwnerID(pubKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !ownerFromKey.Equal(src.GetOwnerID()) {
|
||||
return service.ErrWrongOwner
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
59
lib/fix/catch.go
Normal file
59
lib/fix/catch.go
Normal file
|
@ -0,0 +1,59 @@
|
|||
package fix
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (a *app) Catch(err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if a.log == nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
a.log.Fatal("Can't run app",
|
||||
zap.Error(err))
|
||||
}
|
||||
|
||||
// CatchTrace catch errors for debugging
|
||||
// use that function just for debug your application.
|
||||
func (a *app) CatchTrace(err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// digging into the root of the problem
|
||||
for {
|
||||
var (
|
||||
ok bool
|
||||
v = reflect.ValueOf(err)
|
||||
fn reflect.Value
|
||||
)
|
||||
|
||||
if v.Type().Kind() != reflect.Struct {
|
||||
break
|
||||
}
|
||||
|
||||
if !v.FieldByName("Reason").IsValid() {
|
||||
break
|
||||
}
|
||||
|
||||
if v.FieldByName("Func").IsValid() {
|
||||
fn = v.FieldByName("Func")
|
||||
}
|
||||
|
||||
fmt.Printf("Place: %#v\nReason: %s\n\n", fn, err)
|
||||
|
||||
if err, ok = v.FieldByName("Reason").Interface().(error); !ok {
|
||||
err = v.Interface().(error)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
panic(err)
|
||||
}
|
53
lib/fix/config/config.go
Normal file
53
lib/fix/config/config.go
Normal file
|
@ -0,0 +1,53 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
// Params groups the parameters of configuration.
|
||||
type Params struct {
|
||||
File string
|
||||
Type string
|
||||
Prefix string
|
||||
Name string
|
||||
Version string
|
||||
|
||||
AppDefaults func(v *viper.Viper)
|
||||
}
|
||||
|
||||
// NewConfig is a configuration tool's constructor.
|
||||
func NewConfig(p Params) (v *viper.Viper, err error) {
|
||||
v = viper.New()
|
||||
v.SetEnvPrefix(p.Prefix)
|
||||
v.AutomaticEnv()
|
||||
v.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
|
||||
|
||||
v.SetDefault("app.name", p.Name)
|
||||
v.SetDefault("app.version", p.Version)
|
||||
|
||||
if p.AppDefaults != nil {
|
||||
p.AppDefaults(v)
|
||||
}
|
||||
|
||||
if p.fromFile() {
|
||||
v.SetConfigFile(p.File)
|
||||
v.SetConfigType(p.safeType())
|
||||
|
||||
err = v.ReadInConfig()
|
||||
}
|
||||
|
||||
return v, err
|
||||
}
|
||||
|
||||
func (p Params) fromFile() bool {
|
||||
return p.File != ""
|
||||
}
|
||||
|
||||
func (p Params) safeType() string {
|
||||
if p.Type == "" {
|
||||
p.Type = "yaml"
|
||||
}
|
||||
return strings.ToLower(p.Type)
|
||||
}
|
112
lib/fix/fix.go
Normal file
112
lib/fix/fix.go
Normal file
|
@ -0,0 +1,112 @@
|
|||
package fix
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/nspcc-dev/neofs-node/lib/fix/config"
|
||||
"github.com/nspcc-dev/neofs-node/lib/fix/logger"
|
||||
"github.com/nspcc-dev/neofs-node/lib/fix/module"
|
||||
"github.com/nspcc-dev/neofs-node/misc"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/viper"
|
||||
"go.uber.org/dig"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type (
|
||||
// App is an interface of executable application.
|
||||
App interface {
|
||||
Run() error
|
||||
RunAndCatch()
|
||||
}
|
||||
|
||||
app struct {
|
||||
err error
|
||||
log *zap.Logger
|
||||
di *dig.Container
|
||||
runner interface{}
|
||||
}
|
||||
|
||||
// Settings groups the application parameters.
|
||||
Settings struct {
|
||||
File string
|
||||
Type string
|
||||
Name string
|
||||
Prefix string
|
||||
Build string
|
||||
Version string
|
||||
Runner interface{}
|
||||
|
||||
AppDefaults func(v *viper.Viper)
|
||||
}
|
||||
)
|
||||
|
||||
func (a *app) RunAndCatch() {
|
||||
err := a.Run()
|
||||
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return
|
||||
}
|
||||
|
||||
if ok, _ := strconv.ParseBool(misc.Debug); ok {
|
||||
a.CatchTrace(err)
|
||||
}
|
||||
|
||||
a.Catch(err)
|
||||
}
|
||||
|
||||
func (a *app) Run() error {
|
||||
if a.err != nil {
|
||||
return a.err
|
||||
}
|
||||
|
||||
// setup app logger:
|
||||
if err := a.di.Invoke(func(l *zap.Logger) {
|
||||
a.log = l
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return a.di.Invoke(a.runner)
|
||||
}
|
||||
|
||||
// New is an application constructor.
|
||||
func New(s *Settings, mod module.Module) App {
|
||||
var (
|
||||
a app
|
||||
err error
|
||||
)
|
||||
|
||||
a.di = dig.New(dig.DeferAcyclicVerification())
|
||||
a.runner = s.Runner
|
||||
|
||||
if s.Prefix == "" {
|
||||
s.Prefix = s.Name
|
||||
}
|
||||
|
||||
mod = mod.Append(
|
||||
module.Module{
|
||||
{Constructor: logger.NewLogger},
|
||||
{Constructor: NewGracefulContext},
|
||||
{Constructor: func() (*viper.Viper, error) {
|
||||
return config.NewConfig(config.Params{
|
||||
File: s.File,
|
||||
Type: s.Type,
|
||||
Prefix: strings.ToUpper(s.Prefix),
|
||||
Name: s.Name,
|
||||
Version: fmt.Sprintf("%s(%s)", s.Version, s.Build),
|
||||
|
||||
AppDefaults: s.AppDefaults,
|
||||
})
|
||||
}},
|
||||
})
|
||||
|
||||
if err = module.Provide(a.di, mod); err != nil {
|
||||
a.err = err
|
||||
}
|
||||
|
||||
return &a
|
||||
}
|
26
lib/fix/grace.go
Normal file
26
lib/fix/grace.go
Normal file
|
@ -0,0 +1,26 @@
|
|||
package fix
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// NewGracefulContext returns graceful context.
|
||||
func NewGracefulContext(l *zap.Logger) context.Context {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
go func() {
|
||||
ch := make(chan os.Signal, 1)
|
||||
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
|
||||
sig := <-ch
|
||||
l.Info("received signal",
|
||||
zap.String("signal", sig.String()))
|
||||
cancel()
|
||||
}()
|
||||
|
||||
return ctx
|
||||
}
|
90
lib/fix/logger/logger.go
Normal file
90
lib/fix/logger/logger.go
Normal file
|
@ -0,0 +1,90 @@
|
|||
package logger
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
const (
|
||||
formatJSON = "json"
|
||||
formatConsole = "console"
|
||||
|
||||
defaultSamplingInitial = 100
|
||||
defaultSamplingThereafter = 100
|
||||
)
|
||||
|
||||
func safeLevel(lvl string) zap.AtomicLevel {
|
||||
switch strings.ToLower(lvl) {
|
||||
case "debug":
|
||||
return zap.NewAtomicLevelAt(zap.DebugLevel)
|
||||
case "warn":
|
||||
return zap.NewAtomicLevelAt(zap.WarnLevel)
|
||||
case "error":
|
||||
return zap.NewAtomicLevelAt(zap.ErrorLevel)
|
||||
case "fatal":
|
||||
return zap.NewAtomicLevelAt(zap.FatalLevel)
|
||||
case "panic":
|
||||
return zap.NewAtomicLevelAt(zap.PanicLevel)
|
||||
default:
|
||||
return zap.NewAtomicLevelAt(zap.InfoLevel)
|
||||
}
|
||||
}
|
||||
|
||||
// NewLogger is a logger's constructor.
|
||||
func NewLogger(v *viper.Viper) (*zap.Logger, error) {
|
||||
c := zap.NewProductionConfig()
|
||||
|
||||
c.OutputPaths = []string{"stdout"}
|
||||
c.ErrorOutputPaths = []string{"stdout"}
|
||||
|
||||
if v.IsSet("logger.sampling") {
|
||||
c.Sampling = &zap.SamplingConfig{
|
||||
Initial: defaultSamplingInitial,
|
||||
Thereafter: defaultSamplingThereafter,
|
||||
}
|
||||
|
||||
if val := v.GetInt("logger.sampling.initial"); val > 0 {
|
||||
c.Sampling.Initial = val
|
||||
}
|
||||
|
||||
if val := v.GetInt("logger.sampling.thereafter"); val > 0 {
|
||||
c.Sampling.Thereafter = val
|
||||
}
|
||||
}
|
||||
|
||||
// logger level
|
||||
c.Level = safeLevel(v.GetString("logger.level"))
|
||||
traceLvl := safeLevel(v.GetString("logger.trace_level"))
|
||||
|
||||
// logger format
|
||||
switch f := v.GetString("logger.format"); strings.ToLower(f) {
|
||||
case formatConsole:
|
||||
c.Encoding = formatConsole
|
||||
default:
|
||||
c.Encoding = formatJSON
|
||||
}
|
||||
|
||||
// logger time
|
||||
c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
|
||||
l, err := c.Build(
|
||||
// enable trace only for current log-level
|
||||
zap.AddStacktrace(traceLvl))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if v.GetBool("logger.no_disclaimer") {
|
||||
return l, nil
|
||||
}
|
||||
|
||||
name := v.GetString("app.name")
|
||||
version := v.GetString("app.version")
|
||||
|
||||
return l.With(
|
||||
zap.String("app_name", name),
|
||||
zap.String("app_version", version)), nil
|
||||
}
|
35
lib/fix/module/module.go
Normal file
35
lib/fix/module/module.go
Normal file
|
@ -0,0 +1,35 @@
|
|||
package module
|
||||
|
||||
import (
|
||||
"go.uber.org/dig"
|
||||
)
|
||||
|
||||
type (
|
||||
// Module type
|
||||
Module []*Provider
|
||||
|
||||
// Provider struct
|
||||
Provider struct {
|
||||
Constructor interface{}
|
||||
Options []dig.ProvideOption
|
||||
}
|
||||
)
|
||||
|
||||
// Append module to target module and return new module
|
||||
func (m Module) Append(mods ...Module) Module {
|
||||
var result = m
|
||||
for _, mod := range mods {
|
||||
result = append(result, mod...)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Provide set providers functions to DI container
|
||||
func Provide(dic *dig.Container, providers Module) error {
|
||||
for _, p := range providers {
|
||||
if err := dic.Provide(p.Constructor, p.Options...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
46
lib/fix/services.go
Normal file
46
lib/fix/services.go
Normal file
|
@ -0,0 +1,46 @@
|
|||
package fix
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
type (
|
||||
// Service interface
|
||||
Service interface {
|
||||
Start(context.Context)
|
||||
Stop()
|
||||
}
|
||||
|
||||
combiner []Service
|
||||
)
|
||||
|
||||
var _ Service = (combiner)(nil)
|
||||
|
||||
// NewServices creates single runner.
|
||||
func NewServices(items ...Service) Service {
|
||||
var svc = make(combiner, 0, len(items))
|
||||
|
||||
for _, item := range items {
|
||||
if item == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
svc = append(svc, item)
|
||||
}
|
||||
|
||||
return svc
|
||||
}
|
||||
|
||||
// Start all services.
|
||||
func (c combiner) Start(ctx context.Context) {
|
||||
for _, svc := range c {
|
||||
svc.Start(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// Stop all services.
|
||||
func (c combiner) Stop() {
|
||||
for _, svc := range c {
|
||||
svc.Stop()
|
||||
}
|
||||
}
|
114
lib/fix/web/http.go
Normal file
114
lib/fix/web/http.go
Normal file
|
@ -0,0 +1,114 @@
|
|||
package web
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type (
|
||||
httpParams struct {
|
||||
Key string
|
||||
Viper *viper.Viper
|
||||
Logger *zap.Logger
|
||||
Handler http.Handler
|
||||
}
|
||||
|
||||
httpServer struct {
|
||||
name string
|
||||
started *int32
|
||||
logger *zap.Logger
|
||||
shutdownTTL time.Duration
|
||||
server server
|
||||
}
|
||||
)
|
||||
|
||||
func (h *httpServer) Start(ctx context.Context) {
|
||||
if h == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !atomic.CompareAndSwapInt32(h.started, 0, 1) {
|
||||
h.logger.Info("http: already started",
|
||||
zap.String("server", h.name))
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err := h.server.serve(ctx); err != nil {
|
||||
if err != http.ErrServerClosed {
|
||||
h.logger.Error("http: could not start server",
|
||||
zap.Error(err))
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (h *httpServer) Stop() {
|
||||
if h == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !atomic.CompareAndSwapInt32(h.started, 1, 0) {
|
||||
h.logger.Info("http: already stopped",
|
||||
zap.String("server", h.name))
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), h.shutdownTTL)
|
||||
defer cancel()
|
||||
|
||||
h.logger.Debug("http: try to stop server",
|
||||
zap.String("server", h.name))
|
||||
|
||||
if err := h.server.shutdown(ctx); err != nil {
|
||||
h.logger.Error("http: could not stop server",
|
||||
zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
const defaultShutdownTTL = 30 * time.Second
|
||||
|
||||
func newHTTPServer(p httpParams) *httpServer {
|
||||
var (
|
||||
address string
|
||||
shutdown time.Duration
|
||||
)
|
||||
|
||||
if address = p.Viper.GetString(p.Key + ".address"); address == "" {
|
||||
p.Logger.Info("Empty bind address, skip",
|
||||
zap.String("server", p.Key))
|
||||
return nil
|
||||
}
|
||||
if p.Handler == nil {
|
||||
p.Logger.Info("Empty handler, skip",
|
||||
zap.String("server", p.Key))
|
||||
return nil
|
||||
}
|
||||
|
||||
p.Logger.Info("Create http.Server",
|
||||
zap.String("server", p.Key),
|
||||
zap.String("address", address))
|
||||
|
||||
if shutdown = p.Viper.GetDuration(p.Key + ".shutdown_ttl"); shutdown <= 0 {
|
||||
shutdown = defaultShutdownTTL
|
||||
}
|
||||
|
||||
return &httpServer{
|
||||
name: p.Key,
|
||||
started: new(int32),
|
||||
logger: p.Logger,
|
||||
shutdownTTL: shutdown,
|
||||
server: newServer(params{
|
||||
Address: address,
|
||||
Name: p.Key,
|
||||
Config: p.Viper,
|
||||
Logger: p.Logger,
|
||||
Handler: p.Handler,
|
||||
}),
|
||||
}
|
||||
}
|
32
lib/fix/web/metrics.go
Normal file
32
lib/fix/web/metrics.go
Normal file
|
@ -0,0 +1,32 @@
|
|||
package web
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/spf13/viper"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// Metrics is an interface of metric tool.
|
||||
type Metrics interface {
|
||||
Start(ctx context.Context)
|
||||
Stop()
|
||||
}
|
||||
|
||||
const metricsKey = "metrics"
|
||||
|
||||
// NewMetrics is a metric tool's constructor.
|
||||
func NewMetrics(l *zap.Logger, v *viper.Viper) Metrics {
|
||||
if !v.GetBool(metricsKey + ".enabled") {
|
||||
l.Debug("metrics server disabled")
|
||||
return nil
|
||||
}
|
||||
|
||||
return newHTTPServer(httpParams{
|
||||
Key: metricsKey,
|
||||
Viper: v,
|
||||
Logger: l,
|
||||
Handler: promhttp.Handler(),
|
||||
})
|
||||
}
|
44
lib/fix/web/pprof.go
Normal file
44
lib/fix/web/pprof.go
Normal file
|
@ -0,0 +1,44 @@
|
|||
package web
|
||||
|
||||
import (
|
||||
"context"
|
||||
"expvar"
|
||||
"net/http"
|
||||
"net/http/pprof"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// Profiler is an interface of profiler.
|
||||
type Profiler interface {
|
||||
Start(ctx context.Context)
|
||||
Stop()
|
||||
}
|
||||
|
||||
const profilerKey = "pprof"
|
||||
|
||||
// NewProfiler is a profiler's constructor.
|
||||
func NewProfiler(l *zap.Logger, v *viper.Viper) Profiler {
|
||||
if !v.GetBool(profilerKey + ".enabled") {
|
||||
l.Debug("pprof server disabled")
|
||||
return nil
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
|
||||
mux.Handle("/debug/vars", expvar.Handler())
|
||||
|
||||
mux.HandleFunc("/debug/pprof/", pprof.Index)
|
||||
mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
|
||||
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
|
||||
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
|
||||
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
|
||||
|
||||
return newHTTPServer(httpParams{
|
||||
Key: profilerKey,
|
||||
Viper: v,
|
||||
Logger: l,
|
||||
Handler: mux,
|
||||
})
|
||||
}
|
62
lib/fix/web/server.go
Normal file
62
lib/fix/web/server.go
Normal file
|
@ -0,0 +1,62 @@
|
|||
package web
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type (
|
||||
// Server is an interface of server.
|
||||
server interface {
|
||||
serve(ctx context.Context) error
|
||||
shutdown(ctx context.Context) error
|
||||
}
|
||||
|
||||
contextServer struct {
|
||||
logger *zap.Logger
|
||||
server *http.Server
|
||||
}
|
||||
|
||||
params struct {
|
||||
Address string
|
||||
Name string
|
||||
Config *viper.Viper
|
||||
Logger *zap.Logger
|
||||
Handler http.Handler
|
||||
}
|
||||
)
|
||||
|
||||
func newServer(p params) server {
|
||||
return &contextServer{
|
||||
logger: p.Logger,
|
||||
server: &http.Server{
|
||||
Addr: p.Address,
|
||||
Handler: p.Handler,
|
||||
ReadTimeout: p.Config.GetDuration(p.Name + ".read_timeout"),
|
||||
ReadHeaderTimeout: p.Config.GetDuration(p.Name + ".read_header_timeout"),
|
||||
WriteTimeout: p.Config.GetDuration(p.Name + ".write_timeout"),
|
||||
IdleTimeout: p.Config.GetDuration(p.Name + ".idle_timeout"),
|
||||
MaxHeaderBytes: p.Config.GetInt(p.Name + ".max_header_bytes"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (cs *contextServer) serve(ctx context.Context) error {
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
|
||||
if err := cs.server.Close(); err != nil {
|
||||
cs.logger.Info("something went wrong",
|
||||
zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
return cs.server.ListenAndServe()
|
||||
}
|
||||
|
||||
func (cs *contextServer) shutdown(ctx context.Context) error {
|
||||
return cs.server.Shutdown(ctx)
|
||||
}
|
79
lib/fix/worker/worker.go
Normal file
79
lib/fix/worker/worker.go
Normal file
|
@ -0,0 +1,79 @@
|
|||
package worker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
type (
|
||||
// Workers is an interface of worker tool.
|
||||
Workers interface {
|
||||
Start(context.Context)
|
||||
Stop()
|
||||
|
||||
Add(Job Handler)
|
||||
}
|
||||
|
||||
workers struct {
|
||||
cancel context.CancelFunc
|
||||
started *int32
|
||||
wg *sync.WaitGroup
|
||||
jobs []Handler
|
||||
}
|
||||
|
||||
// Handler is a worker's handling function.
|
||||
Handler func(ctx context.Context)
|
||||
|
||||
// Jobs is a map of worker names to handlers.
|
||||
Jobs map[string]Handler
|
||||
|
||||
// Job groups the parameters of worker's job.
|
||||
Job struct {
|
||||
Disabled bool
|
||||
Immediately bool
|
||||
Timer time.Duration
|
||||
Ticker time.Duration
|
||||
Handler Handler
|
||||
}
|
||||
)
|
||||
|
||||
// New is a constructor of workers.
|
||||
func New() Workers {
|
||||
return &workers{
|
||||
started: new(int32),
|
||||
wg: new(sync.WaitGroup),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *workers) Add(job Handler) {
|
||||
w.jobs = append(w.jobs, job)
|
||||
}
|
||||
|
||||
func (w *workers) Stop() {
|
||||
if !atomic.CompareAndSwapInt32(w.started, 1, 0) {
|
||||
// already stopped
|
||||
return
|
||||
}
|
||||
|
||||
w.cancel()
|
||||
w.wg.Wait()
|
||||
}
|
||||
|
||||
func (w *workers) Start(ctx context.Context) {
|
||||
if !atomic.CompareAndSwapInt32(w.started, 0, 1) {
|
||||
// already started
|
||||
return
|
||||
}
|
||||
|
||||
ctx, w.cancel = context.WithCancel(ctx)
|
||||
for _, job := range w.jobs {
|
||||
w.wg.Add(1)
|
||||
|
||||
go func(handler Handler) {
|
||||
defer w.wg.Done()
|
||||
handler(ctx)
|
||||
}(job)
|
||||
}
|
||||
}
|
392
lib/implementations/acl.go
Normal file
392
lib/implementations/acl.go
Normal file
|
@ -0,0 +1,392 @@
|
|||
package implementations
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
sc "github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
||||
libacl "github.com/nspcc-dev/neofs-api-go/acl"
|
||||
"github.com/nspcc-dev/neofs-node/internal"
|
||||
"github.com/nspcc-dev/neofs-node/lib/acl"
|
||||
"github.com/nspcc-dev/neofs-node/lib/blockchain/goclient"
|
||||
"github.com/nspcc-dev/neofs-node/lib/container"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/nspcc-dev/neofs-api-go/refs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Consider moving ACLHelper implementation to the ACL library.
|
||||
|
||||
type (
|
||||
// ACLHelper is an interface, that provides useful functions
|
||||
// for ACL object pre-processor.
|
||||
ACLHelper interface {
|
||||
BasicACLGetter
|
||||
ContainerOwnerChecker
|
||||
}
|
||||
|
||||
// BasicACLGetter helper provides function to return basic ACL value.
|
||||
BasicACLGetter interface {
|
||||
GetBasicACL(context.Context, CID) (uint32, error)
|
||||
}
|
||||
|
||||
// ContainerOwnerChecker checks owner of the container.
|
||||
ContainerOwnerChecker interface {
|
||||
IsContainerOwner(context.Context, CID, refs.OwnerID) (bool, error)
|
||||
}
|
||||
|
||||
aclHelper struct {
|
||||
cnr container.Storage
|
||||
}
|
||||
)
|
||||
|
||||
type binaryEACLSource struct {
|
||||
binaryStore acl.BinaryExtendedACLSource
|
||||
}
|
||||
|
||||
// StaticContractClient is a wrapper over Neo:Morph client
|
||||
// that invokes single smart contract methods with fixed fee.
|
||||
type StaticContractClient struct {
|
||||
// neo-go client instance
|
||||
client *goclient.Client
|
||||
|
||||
// contract script-hash
|
||||
scScriptHash util.Uint160
|
||||
|
||||
// invocation fee
|
||||
fee util.Fixed8
|
||||
}
|
||||
|
||||
// MorphContainerContract is a wrapper over StaticContractClient
|
||||
// for Container contract calls.
|
||||
type MorphContainerContract struct {
|
||||
// NeoFS Container smart-contract
|
||||
containerContract StaticContractClient
|
||||
|
||||
// set EACL method name of container contract
|
||||
eaclSetMethodName string
|
||||
|
||||
// get EACL method name of container contract
|
||||
eaclGetMethodName string
|
||||
|
||||
// get container method name of container contract
|
||||
cnrGetMethodName string
|
||||
|
||||
// put container method name of container contract
|
||||
cnrPutMethodName string
|
||||
|
||||
// delete container method name of container contract
|
||||
cnrDelMethodName string
|
||||
|
||||
// list containers method name of container contract
|
||||
cnrListMethodName string
|
||||
}
|
||||
|
||||
const (
|
||||
errNewACLHelper = internal.Error("cannot create ACLHelper instance")
|
||||
)
|
||||
|
||||
// GetBasicACL returns basic ACL of the container.
|
||||
func (h aclHelper) GetBasicACL(ctx context.Context, cid CID) (uint32, error) {
|
||||
gp := container.GetParams{}
|
||||
gp.SetContext(ctx)
|
||||
gp.SetCID(cid)
|
||||
|
||||
gResp, err := h.cnr.GetContainer(gp)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return gResp.Container().BasicACL, nil
|
||||
}
|
||||
|
||||
// IsContainerOwner returns true if provided id is an owner container.
|
||||
func (h aclHelper) IsContainerOwner(ctx context.Context, cid CID, id refs.OwnerID) (bool, error) {
|
||||
gp := container.GetParams{}
|
||||
gp.SetContext(ctx)
|
||||
gp.SetCID(cid)
|
||||
|
||||
gResp, err := h.cnr.GetContainer(gp)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return gResp.Container().OwnerID.Equal(id), nil
|
||||
}
|
||||
|
||||
// NewACLHelper returns implementation of the ACLHelper interface.
|
||||
func NewACLHelper(cnr container.Storage) (ACLHelper, error) {
|
||||
if cnr == nil {
|
||||
return nil, errNewACLHelper
|
||||
}
|
||||
|
||||
return aclHelper{cnr}, nil
|
||||
}
|
||||
|
||||
// ExtendedACLSourceFromBinary wraps BinaryExtendedACLSource and returns ExtendedACLSource.
|
||||
//
|
||||
// If passed BinaryExtendedACLSource is nil, acl.ErrNilBinaryExtendedACLStore returns.
|
||||
func ExtendedACLSourceFromBinary(v acl.BinaryExtendedACLSource) (acl.ExtendedACLSource, error) {
|
||||
if v == nil {
|
||||
return nil, acl.ErrNilBinaryExtendedACLStore
|
||||
}
|
||||
|
||||
return &binaryEACLSource{
|
||||
binaryStore: v,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetExtendedACLTable receives eACL table in a binary representation from storage,
|
||||
// unmarshals it and returns ExtendedACLTable interface.
|
||||
func (s binaryEACLSource) GetExtendedACLTable(ctx context.Context, cid refs.CID) (libacl.ExtendedACLTable, error) {
|
||||
key := acl.BinaryEACLKey{}
|
||||
key.SetCID(cid)
|
||||
|
||||
val, err := s.binaryStore.GetBinaryEACL(ctx, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
eacl := val.EACL()
|
||||
|
||||
// TODO: verify signature
|
||||
|
||||
res := libacl.WrapEACLTable(nil)
|
||||
|
||||
return res, res.UnmarshalBinary(eacl)
|
||||
}
|
||||
|
||||
// NewStaticContractClient initializes a new StaticContractClient.
|
||||
//
|
||||
// If passed Client is nil, goclient.ErrNilClient returns.
|
||||
func NewStaticContractClient(client *goclient.Client, scHash util.Uint160, fee util.Fixed8) (StaticContractClient, error) {
|
||||
res := StaticContractClient{
|
||||
client: client,
|
||||
scScriptHash: scHash,
|
||||
fee: fee,
|
||||
}
|
||||
|
||||
var err error
|
||||
if client == nil {
|
||||
err = goclient.ErrNilClient
|
||||
}
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
// Invoke calls Invoke method of goclient with predefined script hash and fee.
|
||||
// Supported args types are the same as in goclient.
|
||||
//
|
||||
// If Client is not initialized, goclient.ErrNilClient returns.
|
||||
func (s StaticContractClient) Invoke(method string, args ...interface{}) error {
|
||||
if s.client == nil {
|
||||
return goclient.ErrNilClient
|
||||
}
|
||||
|
||||
return s.client.Invoke(
|
||||
s.scScriptHash,
|
||||
s.fee,
|
||||
method,
|
||||
args...,
|
||||
)
|
||||
}
|
||||
|
||||
// TestInvoke calls TestInvoke method of goclient with predefined script hash.
|
||||
//
|
||||
// If Client is not initialized, goclient.ErrNilClient returns.
|
||||
func (s StaticContractClient) TestInvoke(method string, args ...interface{}) ([]sc.Parameter, error) {
|
||||
if s.client == nil {
|
||||
return nil, goclient.ErrNilClient
|
||||
}
|
||||
|
||||
return s.client.TestInvoke(
|
||||
s.scScriptHash,
|
||||
method,
|
||||
args...,
|
||||
)
|
||||
}
|
||||
|
||||
// SetContainerContractClient is a container contract client setter.
|
||||
func (s *MorphContainerContract) SetContainerContractClient(v StaticContractClient) {
|
||||
s.containerContract = v
|
||||
}
|
||||
|
||||
// SetEACLGetMethodName is a container contract Get EACL method name setter.
|
||||
func (s *MorphContainerContract) SetEACLGetMethodName(v string) {
|
||||
s.eaclGetMethodName = v
|
||||
}
|
||||
|
||||
// SetEACLSetMethodName is a container contract Set EACL method name setter.
|
||||
func (s *MorphContainerContract) SetEACLSetMethodName(v string) {
|
||||
s.eaclSetMethodName = v
|
||||
}
|
||||
|
||||
// SetContainerGetMethodName is a container contract Get method name setter.
|
||||
func (s *MorphContainerContract) SetContainerGetMethodName(v string) {
|
||||
s.cnrGetMethodName = v
|
||||
}
|
||||
|
||||
// SetContainerPutMethodName is a container contract Put method name setter.
|
||||
func (s *MorphContainerContract) SetContainerPutMethodName(v string) {
|
||||
s.cnrPutMethodName = v
|
||||
}
|
||||
|
||||
// SetContainerDeleteMethodName is a container contract Delete method name setter.
|
||||
func (s *MorphContainerContract) SetContainerDeleteMethodName(v string) {
|
||||
s.cnrDelMethodName = v
|
||||
}
|
||||
|
||||
// SetContainerListMethodName is a container contract List method name setter.
|
||||
func (s *MorphContainerContract) SetContainerListMethodName(v string) {
|
||||
s.cnrListMethodName = v
|
||||
}
|
||||
|
||||
// GetBinaryEACL performs the test invocation call of GetEACL method of NeoFS Container contract.
|
||||
func (s *MorphContainerContract) GetBinaryEACL(_ context.Context, key acl.BinaryEACLKey) (acl.BinaryEACLValue, error) {
|
||||
res := acl.BinaryEACLValue{}
|
||||
|
||||
prms, err := s.containerContract.TestInvoke(
|
||||
s.eaclGetMethodName,
|
||||
key.CID().Bytes(),
|
||||
)
|
||||
if err != nil {
|
||||
return res, err
|
||||
} else if ln := len(prms); ln != 1 {
|
||||
return res, errors.Errorf("unexpected stack parameter count: %d", ln)
|
||||
}
|
||||
|
||||
eacl, err := goclient.BytesFromStackParameter(prms[0])
|
||||
if err == nil {
|
||||
res.SetEACL(eacl)
|
||||
}
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
// PutBinaryEACL invokes the call of SetEACL method of NeoFS Container contract.
|
||||
func (s *MorphContainerContract) PutBinaryEACL(_ context.Context, key acl.BinaryEACLKey, val acl.BinaryEACLValue) error {
|
||||
return s.containerContract.Invoke(
|
||||
s.eaclSetMethodName,
|
||||
key.CID().Bytes(),
|
||||
val.EACL(),
|
||||
val.Signature(),
|
||||
)
|
||||
}
|
||||
|
||||
// GetContainer performs the test invocation call of Get method of NeoFS Container contract.
|
||||
func (s *MorphContainerContract) GetContainer(p container.GetParams) (*container.GetResult, error) {
|
||||
prms, err := s.containerContract.TestInvoke(
|
||||
s.cnrGetMethodName,
|
||||
p.CID().Bytes(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not perform test invocation")
|
||||
} else if ln := len(prms); ln != 1 {
|
||||
return nil, errors.Errorf("unexpected stack item count: %d", ln)
|
||||
}
|
||||
|
||||
cnrBytes, err := goclient.BytesFromStackParameter(prms[0])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get byte array from stack item")
|
||||
}
|
||||
|
||||
cnr := new(container.Container)
|
||||
if err := cnr.Unmarshal(cnrBytes); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal container from bytes")
|
||||
}
|
||||
|
||||
res := new(container.GetResult)
|
||||
res.SetContainer(cnr)
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// PutContainer invokes the call of Put method of NeoFS Container contract.
|
||||
func (s *MorphContainerContract) PutContainer(p container.PutParams) (*container.PutResult, error) {
|
||||
cnr := p.Container()
|
||||
|
||||
cid, err := cnr.ID()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not calculate container ID")
|
||||
}
|
||||
|
||||
cnrBytes, err := cnr.Marshal()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not marshal container")
|
||||
}
|
||||
|
||||
if err := s.containerContract.Invoke(
|
||||
s.cnrPutMethodName,
|
||||
cnr.OwnerID.Bytes(),
|
||||
cnrBytes,
|
||||
[]byte{},
|
||||
); err != nil {
|
||||
return nil, errors.Wrap(err, "could not invoke contract method")
|
||||
}
|
||||
|
||||
res := new(container.PutResult)
|
||||
res.SetCID(cid)
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// DeleteContainer invokes the call of Delete method of NeoFS Container contract.
|
||||
func (s *MorphContainerContract) DeleteContainer(p container.DeleteParams) (*container.DeleteResult, error) {
|
||||
if err := s.containerContract.Invoke(
|
||||
s.cnrDelMethodName,
|
||||
p.CID().Bytes(),
|
||||
p.OwnerID().Bytes(),
|
||||
[]byte{},
|
||||
); err != nil {
|
||||
return nil, errors.Wrap(err, "could not invoke contract method")
|
||||
}
|
||||
|
||||
return new(container.DeleteResult), nil
|
||||
}
|
||||
|
||||
// ListContainers performs the test invocation call of Get method of NeoFS Container contract.
|
||||
//
|
||||
// If owner ID list in parameters is non-empty, bytes of first owner are attached to call.
|
||||
func (s *MorphContainerContract) ListContainers(p container.ListParams) (*container.ListResult, error) {
|
||||
args := make([]interface{}, 0, 1)
|
||||
|
||||
if ownerIDList := p.OwnerIDList(); len(ownerIDList) > 0 {
|
||||
args = append(args, ownerIDList[0].Bytes())
|
||||
}
|
||||
|
||||
prms, err := s.containerContract.TestInvoke(
|
||||
s.cnrListMethodName,
|
||||
args...,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not perform test invocation")
|
||||
} else if ln := len(prms); ln != 1 {
|
||||
return nil, errors.Errorf("unexpected stack item count: %d", ln)
|
||||
}
|
||||
|
||||
prms, err = goclient.ArrayFromStackParameter(prms[0])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get stack item array from stack item")
|
||||
}
|
||||
|
||||
cidList := make([]CID, 0, len(prms))
|
||||
|
||||
for i := range prms {
|
||||
cidBytes, err := goclient.BytesFromStackParameter(prms[i])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get byte array from stack item")
|
||||
}
|
||||
|
||||
cid, err := refs.CIDFromBytes(cidBytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get container ID from bytes")
|
||||
}
|
||||
|
||||
cidList = append(cidList, cid)
|
||||
}
|
||||
|
||||
res := new(container.ListResult)
|
||||
res.SetCIDList(cidList)
|
||||
|
||||
return res, nil
|
||||
}
|
19
lib/implementations/acl_test.go
Normal file
19
lib/implementations/acl_test.go
Normal file
|
@ -0,0 +1,19 @@
|
|||
package implementations
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestStaticContractClient(t *testing.T) {
|
||||
s := new(StaticContractClient)
|
||||
|
||||
require.NotPanics(t, func() {
|
||||
_, _ = s.TestInvoke("")
|
||||
})
|
||||
|
||||
require.NotPanics(t, func() {
|
||||
_ = s.Invoke("")
|
||||
})
|
||||
}
|
141
lib/implementations/balance.go
Normal file
141
lib/implementations/balance.go
Normal file
|
@ -0,0 +1,141 @@
|
|||
package implementations
|
||||
|
||||
import (
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||
"github.com/nspcc-dev/neofs-api-go/refs"
|
||||
"github.com/nspcc-dev/neofs-node/lib/blockchain/goclient"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// MorphBalanceContract is a wrapper over NeoFS Balance contract client
|
||||
// that provides an interface of manipulations with user funds.
|
||||
type MorphBalanceContract struct {
|
||||
// NeoFS Balance smart-contract
|
||||
balanceContract StaticContractClient
|
||||
|
||||
// "balance of" method name of balance contract
|
||||
balanceOfMethodName string
|
||||
|
||||
// decimals method name of balance contract
|
||||
decimalsMethodName string
|
||||
}
|
||||
|
||||
// BalanceOfParams is a structure that groups the parameters
|
||||
// for NeoFS user balance receiving operation.
|
||||
type BalanceOfParams struct {
|
||||
owner refs.OwnerID
|
||||
}
|
||||
|
||||
// BalanceOfResult is a structure that groups the values
|
||||
// of the result of NeoFS user balance receiving operation.
|
||||
type BalanceOfResult struct {
|
||||
amount int64
|
||||
}
|
||||
|
||||
// DecimalsParams is a structure that groups the parameters
|
||||
// for NeoFS token decimals receiving operation.
|
||||
type DecimalsParams struct {
|
||||
}
|
||||
|
||||
// DecimalsResult is a structure that groups the values
|
||||
// of the result of NeoFS token decimals receiving operation.
|
||||
type DecimalsResult struct {
|
||||
dec int64
|
||||
}
|
||||
|
||||
// SetBalanceContractClient is a Balance contract client setter.
|
||||
func (s *MorphBalanceContract) SetBalanceContractClient(v StaticContractClient) {
|
||||
s.balanceContract = v
|
||||
}
|
||||
|
||||
// SetBalanceOfMethodName is a Balance contract balanceOf method name setter.
|
||||
func (s *MorphBalanceContract) SetBalanceOfMethodName(v string) {
|
||||
s.balanceOfMethodName = v
|
||||
}
|
||||
|
||||
// SetDecimalsMethodName is a Balance contract decimals method name setter.
|
||||
func (s *MorphBalanceContract) SetDecimalsMethodName(v string) {
|
||||
s.decimalsMethodName = v
|
||||
}
|
||||
|
||||
// BalanceOf performs the test invocation call of balanceOf method of NeoFS Balance contract.
|
||||
func (s MorphBalanceContract) BalanceOf(p BalanceOfParams) (*BalanceOfResult, error) {
|
||||
owner := p.OwnerID()
|
||||
|
||||
u160, err := address.StringToUint160(owner.String())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert wallet address to Uint160")
|
||||
}
|
||||
|
||||
prms, err := s.balanceContract.TestInvoke(
|
||||
s.balanceOfMethodName,
|
||||
u160.BytesBE(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not perform test invocation")
|
||||
} else if ln := len(prms); ln != 1 {
|
||||
return nil, errors.Errorf("unexpected stack item count (balanceOf): %d", ln)
|
||||
}
|
||||
|
||||
amount, err := goclient.IntFromStackParameter(prms[0])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get integer stack item from stack item (amount)")
|
||||
}
|
||||
|
||||
res := new(BalanceOfResult)
|
||||
res.SetAmount(amount)
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Decimals performs the test invocation call of decimals method of NeoFS Balance contract.
|
||||
func (s MorphBalanceContract) Decimals(DecimalsParams) (*DecimalsResult, error) {
|
||||
prms, err := s.balanceContract.TestInvoke(
|
||||
s.decimalsMethodName,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not perform test invocation")
|
||||
} else if ln := len(prms); ln != 1 {
|
||||
return nil, errors.Errorf("unexpected stack item count (decimals): %d", ln)
|
||||
}
|
||||
|
||||
dec, err := goclient.IntFromStackParameter(prms[0])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get integer stack item from stack item (decimal)")
|
||||
}
|
||||
|
||||
res := new(DecimalsResult)
|
||||
res.SetDecimals(dec)
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// SetOwnerID is an owner ID setter.
|
||||
func (s *BalanceOfParams) SetOwnerID(v refs.OwnerID) {
|
||||
s.owner = v
|
||||
}
|
||||
|
||||
// OwnerID is an owner ID getter.
|
||||
func (s BalanceOfParams) OwnerID() refs.OwnerID {
|
||||
return s.owner
|
||||
}
|
||||
|
||||
// SetAmount is an funds amount setter.
|
||||
func (s *BalanceOfResult) SetAmount(v int64) {
|
||||
s.amount = v
|
||||
}
|
||||
|
||||
// Amount is an funds amount getter.
|
||||
func (s BalanceOfResult) Amount() int64 {
|
||||
return s.amount
|
||||
}
|
||||
|
||||
// SetDecimals is a decimals setter.
|
||||
func (s *DecimalsResult) SetDecimals(v int64) {
|
||||
s.dec = v
|
||||
}
|
||||
|
||||
// Decimals is a decimals getter.
|
||||
func (s DecimalsResult) Decimals() int64 {
|
||||
return s.dec
|
||||
}
|
35
lib/implementations/balance_test.go
Normal file
35
lib/implementations/balance_test.go
Normal file
|
@ -0,0 +1,35 @@
|
|||
package implementations
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/nspcc-dev/neofs-api-go/refs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBalanceOfParams(t *testing.T) {
|
||||
s := BalanceOfParams{}
|
||||
|
||||
owner := refs.OwnerID{1, 2, 3}
|
||||
s.SetOwnerID(owner)
|
||||
|
||||
require.Equal(t, owner, s.OwnerID())
|
||||
}
|
||||
|
||||
func TestBalanceOfResult(t *testing.T) {
|
||||
s := BalanceOfResult{}
|
||||
|
||||
amount := int64(100)
|
||||
s.SetAmount(amount)
|
||||
|
||||
require.Equal(t, amount, s.Amount())
|
||||
}
|
||||
|
||||
func TestDecimalsResult(t *testing.T) {
|
||||
s := DecimalsResult{}
|
||||
|
||||
dec := int64(100)
|
||||
s.SetDecimals(dec)
|
||||
|
||||
require.Equal(t, dec, s.Decimals())
|
||||
}
|
311
lib/implementations/bootstrap.go
Normal file
311
lib/implementations/bootstrap.go
Normal file
|
@ -0,0 +1,311 @@
|
|||
package implementations
|
||||
|
||||
import (
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
||||
"github.com/nspcc-dev/neofs-api-go/bootstrap"
|
||||
"github.com/nspcc-dev/neofs-node/lib/blockchain/goclient"
|
||||
"github.com/nspcc-dev/neofs-node/lib/boot"
|
||||
"github.com/nspcc-dev/neofs-node/lib/ir"
|
||||
"github.com/nspcc-dev/neofs-node/lib/netmap"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// MorphNetmapContract is a wrapper over NeoFS Netmap contract client
|
||||
// that provides an interface of network map manipulations.
|
||||
type MorphNetmapContract struct {
|
||||
// NeoFS Netmap smart-contract
|
||||
netmapContract StaticContractClient
|
||||
|
||||
// add peer method name of netmap contract
|
||||
addPeerMethodName string
|
||||
|
||||
// new epoch method name of netmap contract
|
||||
newEpochMethodName string
|
||||
|
||||
// get netmap method name of netmap contract
|
||||
getNetMapMethodName string
|
||||
|
||||
// update state method name of netmap contract
|
||||
updStateMethodName string
|
||||
|
||||
// IR list method name of netmap contract
|
||||
irListMethodName string
|
||||
}
|
||||
|
||||
// UpdateEpochParams is a structure that groups the parameters
|
||||
// for NeoFS epoch number updating.
|
||||
type UpdateEpochParams struct {
|
||||
epoch uint64
|
||||
}
|
||||
|
||||
// UpdateStateParams is a structure that groups the parameters
|
||||
// for NeoFS node state updating.
|
||||
type UpdateStateParams struct {
|
||||
st NodeState
|
||||
|
||||
key []byte
|
||||
}
|
||||
|
||||
// NodeState is a type of node states enumeration.
|
||||
type NodeState int64
|
||||
|
||||
const (
|
||||
_ NodeState = iota
|
||||
|
||||
// StateOffline is an offline node state value.
|
||||
StateOffline
|
||||
)
|
||||
|
||||
const addPeerFixedArgNumber = 2
|
||||
|
||||
const nodeInfoFixedPrmNumber = 3
|
||||
|
||||
// SetNetmapContractClient is a Netmap contract client setter.
|
||||
func (s *MorphNetmapContract) SetNetmapContractClient(v StaticContractClient) {
|
||||
s.netmapContract = v
|
||||
}
|
||||
|
||||
// SetAddPeerMethodName is a Netmap contract AddPeer method name setter.
|
||||
func (s *MorphNetmapContract) SetAddPeerMethodName(v string) {
|
||||
s.addPeerMethodName = v
|
||||
}
|
||||
|
||||
// SetNewEpochMethodName is a Netmap contract NewEpoch method name setter.
|
||||
func (s *MorphNetmapContract) SetNewEpochMethodName(v string) {
|
||||
s.newEpochMethodName = v
|
||||
}
|
||||
|
||||
// SetNetMapMethodName is a Netmap contract Netmap method name setter.
|
||||
func (s *MorphNetmapContract) SetNetMapMethodName(v string) {
|
||||
s.getNetMapMethodName = v
|
||||
}
|
||||
|
||||
// SetUpdateStateMethodName is a Netmap contract UpdateState method name setter.
|
||||
func (s *MorphNetmapContract) SetUpdateStateMethodName(v string) {
|
||||
s.updStateMethodName = v
|
||||
}
|
||||
|
||||
// SetIRListMethodName is a Netmap contract InnerRingList method name setter.
|
||||
func (s *MorphNetmapContract) SetIRListMethodName(v string) {
|
||||
s.irListMethodName = v
|
||||
}
|
||||
|
||||
// AddPeer invokes the call of AddPeer method of NeoFS Netmap contract.
|
||||
func (s *MorphNetmapContract) AddPeer(p boot.BootstrapPeerParams) error {
|
||||
info := p.NodeInfo()
|
||||
opts := info.GetOptions()
|
||||
|
||||
args := make([]interface{}, 0, addPeerFixedArgNumber+len(opts))
|
||||
|
||||
args = append(args,
|
||||
// Address
|
||||
[]byte(info.GetAddress()),
|
||||
|
||||
// Public key
|
||||
info.GetPubKey(),
|
||||
)
|
||||
|
||||
// Options
|
||||
for i := range opts {
|
||||
args = append(args, []byte(opts[i]))
|
||||
}
|
||||
|
||||
return s.netmapContract.Invoke(
|
||||
s.addPeerMethodName,
|
||||
args...,
|
||||
)
|
||||
}
|
||||
|
||||
// UpdateEpoch invokes the call of NewEpoch method of NeoFS Netmap contract.
|
||||
func (s *MorphNetmapContract) UpdateEpoch(p UpdateEpochParams) error {
|
||||
return s.netmapContract.Invoke(
|
||||
s.newEpochMethodName,
|
||||
int64(p.Number()), // TODO: do not cast after uint64 type will become supported in client
|
||||
)
|
||||
}
|
||||
|
||||
// GetNetMap performs the test invocation call of Netmap method of NeoFS Netmap contract.
|
||||
func (s *MorphNetmapContract) GetNetMap(p netmap.GetParams) (*netmap.GetResult, error) {
|
||||
prms, err := s.netmapContract.TestInvoke(
|
||||
s.getNetMapMethodName,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not perform test invocation")
|
||||
} else if ln := len(prms); ln != 1 {
|
||||
return nil, errors.Errorf("unexpected stack item count (Nodes): %d", ln)
|
||||
}
|
||||
|
||||
prms, err = goclient.ArrayFromStackParameter(prms[0])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get stack item array from stack item (Nodes)")
|
||||
}
|
||||
|
||||
nm := netmap.NewNetmap()
|
||||
|
||||
for i := range prms {
|
||||
nodeInfo, err := nodeInfoFromStackItem(prms[i])
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not parse stack item (Node #%d)", i)
|
||||
}
|
||||
|
||||
if err := nm.AddNode(nodeInfo); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not add node #%d to network map", i)
|
||||
}
|
||||
}
|
||||
|
||||
res := new(netmap.GetResult)
|
||||
res.SetNetMap(nm)
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func nodeInfoFromStackItem(prm smartcontract.Parameter) (*bootstrap.NodeInfo, error) {
|
||||
prms, err := goclient.ArrayFromStackParameter(prm)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get stack item array (NodeInfo)")
|
||||
} else if ln := len(prms); ln != nodeInfoFixedPrmNumber {
|
||||
return nil, errors.Errorf("unexpected stack item count (NodeInfo): expected %d, has %d", 3, ln)
|
||||
}
|
||||
|
||||
res := new(bootstrap.NodeInfo)
|
||||
|
||||
// Address
|
||||
addrBytes, err := goclient.BytesFromStackParameter(prms[0])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get byte array from stack item (Address)")
|
||||
}
|
||||
|
||||
res.Address = string(addrBytes)
|
||||
|
||||
// Public key
|
||||
res.PubKey, err = goclient.BytesFromStackParameter(prms[1])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get byte array from stack item (Public key)")
|
||||
}
|
||||
|
||||
// Options
|
||||
prms, err = goclient.ArrayFromStackParameter(prms[2])
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get stack item array (Options)")
|
||||
}
|
||||
|
||||
res.Options = make([]string, 0, len(prms))
|
||||
|
||||
for i := range prms {
|
||||
optBytes, err := goclient.BytesFromStackParameter(prms[i])
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get byte array from stack item (Option #%d)", i)
|
||||
}
|
||||
|
||||
res.Options = append(res.Options, string(optBytes))
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// UpdateState invokes the call of UpdateState method of NeoFS Netmap contract.
|
||||
func (s *MorphNetmapContract) UpdateState(p UpdateStateParams) error {
|
||||
return s.netmapContract.Invoke(
|
||||
s.updStateMethodName,
|
||||
p.State().Int64(),
|
||||
p.Key(),
|
||||
)
|
||||
}
|
||||
|
||||
// GetIRInfo performs the test invocation call of InnerRingList method of NeoFS Netmap contract.
|
||||
func (s *MorphNetmapContract) GetIRInfo(ir.GetInfoParams) (*ir.GetInfoResult, error) {
|
||||
prms, err := s.netmapContract.TestInvoke(
|
||||
s.irListMethodName,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not perform test invocation")
|
||||
} else if ln := len(prms); ln != 1 {
|
||||
return nil, errors.Errorf("unexpected stack item count (Nodes): %d", ln)
|
||||
}
|
||||
|
||||
irInfo, err := irInfoFromStackItem(prms[0])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get IR info from stack item")
|
||||
}
|
||||
|
||||
res := new(ir.GetInfoResult)
|
||||
res.SetInfo(*irInfo)
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func irInfoFromStackItem(prm smartcontract.Parameter) (*ir.Info, error) {
|
||||
prms, err := goclient.ArrayFromStackParameter(prm)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get stack item array")
|
||||
}
|
||||
|
||||
nodes := make([]ir.Node, 0, len(prms))
|
||||
|
||||
for i := range prms {
|
||||
node, err := irNodeFromStackItem(prms[i])
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get node info from stack item (IRNode #%d)", i)
|
||||
}
|
||||
|
||||
nodes = append(nodes, *node)
|
||||
}
|
||||
|
||||
info := new(ir.Info)
|
||||
info.SetNodes(nodes)
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func irNodeFromStackItem(prm smartcontract.Parameter) (*ir.Node, error) {
|
||||
prms, err := goclient.ArrayFromStackParameter(prm)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get stack item array (IRNode)")
|
||||
}
|
||||
|
||||
// Public key
|
||||
keyBytes, err := goclient.BytesFromStackParameter(prms[0])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get byte array from stack item (Key)")
|
||||
}
|
||||
|
||||
node := new(ir.Node)
|
||||
node.SetKey(keyBytes)
|
||||
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// SetNumber is an epoch number setter.
|
||||
func (s *UpdateEpochParams) SetNumber(v uint64) {
|
||||
s.epoch = v
|
||||
}
|
||||
|
||||
// Number is an epoch number getter.
|
||||
func (s UpdateEpochParams) Number() uint64 {
|
||||
return s.epoch
|
||||
}
|
||||
|
||||
// SetState is a state setter.
|
||||
func (s *UpdateStateParams) SetState(v NodeState) {
|
||||
s.st = v
|
||||
}
|
||||
|
||||
// State is a state getter.
|
||||
func (s UpdateStateParams) State() NodeState {
|
||||
return s.st
|
||||
}
|
||||
|
||||
// SetKey is a public key setter.
|
||||
func (s *UpdateStateParams) SetKey(v []byte) {
|
||||
s.key = v
|
||||
}
|
||||
|
||||
// Key is a public key getter.
|
||||
func (s UpdateStateParams) Key() []byte {
|
||||
return s.key
|
||||
}
|
||||
|
||||
// Int64 converts NodeState to int64.
|
||||
func (s NodeState) Int64() int64 {
|
||||
return int64(s)
|
||||
}
|
30
lib/implementations/bootstrap_test.go
Normal file
30
lib/implementations/bootstrap_test.go
Normal file
|
@ -0,0 +1,30 @@
|
|||
package implementations
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestUpdateEpochParams(t *testing.T) {
|
||||
s := UpdateEpochParams{}
|
||||
|
||||
e := uint64(100)
|
||||
s.SetNumber(e)
|
||||
|
||||
require.Equal(t, e, s.Number())
|
||||
}
|
||||
|
||||
func TestUpdateStateParams(t *testing.T) {
|
||||
s := UpdateStateParams{}
|
||||
|
||||
st := NodeState(1)
|
||||
s.SetState(st)
|
||||
|
||||
require.Equal(t, st, s.State())
|
||||
|
||||
key := []byte{1, 2, 3}
|
||||
s.SetKey(key)
|
||||
|
||||
require.Equal(t, key, s.Key())
|
||||
}
|
7
lib/implementations/epoch.go
Normal file
7
lib/implementations/epoch.go
Normal file
|
@ -0,0 +1,7 @@
|
|||
package implementations
|
||||
|
||||
// EpochReceiver is an interface of the container
|
||||
// of NeoFS epoch number with read access.
|
||||
type EpochReceiver interface {
|
||||
Epoch() uint64
|
||||
}
|
78
lib/implementations/locator.go
Normal file
78
lib/implementations/locator.go
Normal file
|
@ -0,0 +1,78 @@
|
|||
package implementations
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/nspcc-dev/neofs-api-go/query"
|
||||
"github.com/nspcc-dev/neofs-api-go/refs"
|
||||
"github.com/nspcc-dev/neofs-api-go/service"
|
||||
"github.com/nspcc-dev/neofs-node/lib/replication"
|
||||
"github.com/nspcc-dev/neofs-node/lib/transport"
|
||||
"github.com/pkg/errors"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type (
|
||||
locator struct {
|
||||
executor SelectiveContainerExecutor
|
||||
log *zap.Logger
|
||||
}
|
||||
|
||||
// LocatorParams groups the parameters of ObjectLocator constructor.
|
||||
LocatorParams struct {
|
||||
SelectiveContainerExecutor SelectiveContainerExecutor
|
||||
Logger *zap.Logger
|
||||
}
|
||||
)
|
||||
|
||||
const locatorInstanceFailMsg = "could not create object locator"
|
||||
|
||||
var errEmptyObjectsContainerHandler = errors.New("empty container objects container handler")
|
||||
|
||||
func (s *locator) LocateObject(ctx context.Context, addr Address) (res []multiaddr.Multiaddr, err error) {
|
||||
queryBytes, err := (&query.Query{
|
||||
Filters: []query.Filter{
|
||||
{
|
||||
Type: query.Filter_Exact,
|
||||
Name: transport.KeyID,
|
||||
Value: addr.ObjectID.String(),
|
||||
},
|
||||
},
|
||||
}).Marshal()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "locate object failed on query marshal")
|
||||
}
|
||||
|
||||
err = s.executor.Search(ctx, &SearchParams{
|
||||
SelectiveParams: SelectiveParams{
|
||||
CID: addr.CID,
|
||||
TTL: service.NonForwardingTTL,
|
||||
IDList: make([]ObjectID, 1),
|
||||
},
|
||||
SearchCID: addr.CID,
|
||||
SearchQuery: queryBytes,
|
||||
Handler: func(node multiaddr.Multiaddr, addrList []refs.Address) {
|
||||
if len(addrList) > 0 {
|
||||
res = append(res, node)
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// NewObjectLocator constructs replication.ObjectLocator from SelectiveContainerExecutor.
|
||||
func NewObjectLocator(p LocatorParams) (replication.ObjectLocator, error) {
|
||||
switch {
|
||||
case p.SelectiveContainerExecutor == nil:
|
||||
return nil, errors.Wrap(errEmptyObjectsContainerHandler, locatorInstanceFailMsg)
|
||||
case p.Logger == nil:
|
||||
return nil, errors.Wrap(errEmptyLogger, locatorInstanceFailMsg)
|
||||
}
|
||||
|
||||
return &locator{
|
||||
executor: p.SelectiveContainerExecutor,
|
||||
log: p.Logger,
|
||||
}, nil
|
||||
}
|
38
lib/implementations/locator_test.go
Normal file
38
lib/implementations/locator_test.go
Normal file
|
@ -0,0 +1,38 @@
|
|||
package implementations
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type testExecutor struct {
|
||||
SelectiveContainerExecutor
|
||||
}
|
||||
|
||||
func TestNewObjectLocator(t *testing.T) {
|
||||
validParams := LocatorParams{
|
||||
SelectiveContainerExecutor: new(testExecutor),
|
||||
Logger: zap.L(),
|
||||
}
|
||||
|
||||
t.Run("valid params", func(t *testing.T) {
|
||||
s, err := NewObjectLocator(validParams)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, s)
|
||||
})
|
||||
t.Run("empty logger", func(t *testing.T) {
|
||||
p := validParams
|
||||
p.Logger = nil
|
||||
_, err := NewObjectLocator(p)
|
||||
require.EqualError(t, err, errors.Wrap(errEmptyLogger, locatorInstanceFailMsg).Error())
|
||||
})
|
||||
t.Run("empty container handler", func(t *testing.T) {
|
||||
p := validParams
|
||||
p.SelectiveContainerExecutor = nil
|
||||
_, err := NewObjectLocator(p)
|
||||
require.EqualError(t, err, errors.Wrap(errEmptyObjectsContainerHandler, locatorInstanceFailMsg).Error())
|
||||
})
|
||||
}
|
131
lib/implementations/object.go
Normal file
131
lib/implementations/object.go
Normal file
|
@ -0,0 +1,131 @@
|
|||
package implementations
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/nspcc-dev/neofs-api-go/object"
|
||||
"github.com/nspcc-dev/neofs-api-go/refs"
|
||||
"github.com/nspcc-dev/neofs-api-go/service"
|
||||
"github.com/nspcc-dev/neofs-node/lib/localstore"
|
||||
"github.com/nspcc-dev/neofs-node/lib/replication"
|
||||
"github.com/pkg/errors"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type (
|
||||
// ObjectStorage is an interface of encapsulated ObjectReceptacle and ObjectSource pair.
|
||||
ObjectStorage interface {
|
||||
replication.ObjectReceptacle
|
||||
replication.ObjectSource
|
||||
}
|
||||
|
||||
objectStorage struct {
|
||||
ls localstore.Localstore
|
||||
executor SelectiveContainerExecutor
|
||||
log *zap.Logger
|
||||
}
|
||||
|
||||
// ObjectStorageParams groups the parameters of ObjectStorage constructor.
|
||||
ObjectStorageParams struct {
|
||||
Localstore localstore.Localstore
|
||||
SelectiveContainerExecutor SelectiveContainerExecutor
|
||||
Logger *zap.Logger
|
||||
}
|
||||
)
|
||||
|
||||
const objectSourceInstanceFailMsg = "could not create object source"
|
||||
|
||||
var errNilObject = errors.New("object is nil")
|
||||
|
||||
var errCouldNotGetObject = errors.New("could not get object from any node")
|
||||
|
||||
func (s *objectStorage) Put(ctx context.Context, params replication.ObjectStoreParams) error {
|
||||
if params.Object == nil {
|
||||
return errNilObject
|
||||
} else if len(params.Nodes) == 0 {
|
||||
if s.ls == nil {
|
||||
return errEmptyLocalstore
|
||||
}
|
||||
return s.ls.Put(ctx, params.Object)
|
||||
}
|
||||
|
||||
nodes := make([]multiaddr.Multiaddr, len(params.Nodes))
|
||||
for i := range params.Nodes {
|
||||
nodes[i] = params.Nodes[i].Node
|
||||
}
|
||||
|
||||
return s.executor.Put(ctx, &PutParams{
|
||||
SelectiveParams: SelectiveParams{
|
||||
CID: params.Object.SystemHeader.CID,
|
||||
Nodes: nodes,
|
||||
TTL: service.NonForwardingTTL,
|
||||
IDList: make([]ObjectID, 1),
|
||||
},
|
||||
Object: params.Object,
|
||||
Handler: func(node multiaddr.Multiaddr, valid bool) {
|
||||
if params.Handler == nil {
|
||||
return
|
||||
}
|
||||
for i := range params.Nodes {
|
||||
if params.Nodes[i].Node.Equal(node) {
|
||||
params.Handler(params.Nodes[i], valid)
|
||||
return
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (s *objectStorage) Get(ctx context.Context, addr Address) (res *Object, err error) {
|
||||
if s.ls != nil {
|
||||
if has, err := s.ls.Has(addr); err == nil && has {
|
||||
if res, err = s.ls.Get(addr); err == nil {
|
||||
return res, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err = s.executor.Get(ctx, &GetParams{
|
||||
SelectiveParams: SelectiveParams{
|
||||
CID: addr.CID,
|
||||
TTL: service.NonForwardingTTL,
|
||||
IDList: []ObjectID{addr.ObjectID},
|
||||
Breaker: func(refs.Address) (cFlag ProgressControlFlag) {
|
||||
if res != nil {
|
||||
cFlag = BreakProgress
|
||||
}
|
||||
return
|
||||
},
|
||||
},
|
||||
Handler: func(node multiaddr.Multiaddr, obj *object.Object) { res = obj },
|
||||
}); err != nil {
|
||||
return
|
||||
} else if res == nil {
|
||||
return nil, errCouldNotGetObject
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// NewObjectStorage encapsulates Localstore and SelectiveContainerExecutor
|
||||
// and returns ObjectStorage interface.
|
||||
func NewObjectStorage(p ObjectStorageParams) (ObjectStorage, error) {
|
||||
if p.Logger == nil {
|
||||
return nil, errors.Wrap(errEmptyLogger, objectSourceInstanceFailMsg)
|
||||
}
|
||||
|
||||
if p.Localstore == nil {
|
||||
p.Logger.Warn("local storage not provided")
|
||||
}
|
||||
|
||||
if p.SelectiveContainerExecutor == nil {
|
||||
p.Logger.Warn("object container handler not provided")
|
||||
}
|
||||
|
||||
return &objectStorage{
|
||||
ls: p.Localstore,
|
||||
executor: p.SelectiveContainerExecutor,
|
||||
log: p.Logger,
|
||||
}, nil
|
||||
}
|
74
lib/implementations/peerstore.go
Normal file
74
lib/implementations/peerstore.go
Normal file
|
@ -0,0 +1,74 @@
|
|||
package implementations
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/nspcc-dev/neofs-node/internal"
|
||||
"github.com/nspcc-dev/neofs-node/lib/peers"
|
||||
"github.com/pkg/errors"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type (
|
||||
// AddressStoreComponent is an interface of encapsulated AddressStore and NodePublicKeyReceiver pair.
|
||||
AddressStoreComponent interface {
|
||||
AddressStore
|
||||
NodePublicKeyReceiver
|
||||
}
|
||||
|
||||
// AddressStore is an interface of the container of local Multiaddr.
|
||||
AddressStore interface {
|
||||
SelfAddr() (multiaddr.Multiaddr, error)
|
||||
}
|
||||
|
||||
// NodePublicKeyReceiver is an interface of Multiaddr to PublicKey converter.
|
||||
NodePublicKeyReceiver interface {
|
||||
PublicKey(multiaddr.Multiaddr) *ecdsa.PublicKey
|
||||
}
|
||||
|
||||
addressStore struct {
|
||||
ps peers.Store
|
||||
|
||||
log *zap.Logger
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
addressStoreInstanceFailMsg = "could not create address store"
|
||||
errEmptyPeerStore = internal.Error("empty peer store")
|
||||
|
||||
errEmptyAddressStore = internal.Error("empty address store")
|
||||
)
|
||||
|
||||
func (s addressStore) SelfAddr() (multiaddr.Multiaddr, error) { return s.ps.GetAddr(s.ps.SelfID()) }
|
||||
|
||||
func (s addressStore) PublicKey(mAddr multiaddr.Multiaddr) (res *ecdsa.PublicKey) {
|
||||
if peerID, err := s.ps.AddressID(mAddr); err != nil {
|
||||
s.log.Error("could not peer ID",
|
||||
zap.Stringer("node", mAddr),
|
||||
zap.Error(err),
|
||||
)
|
||||
} else if res, err = s.ps.GetPublicKey(peerID); err != nil {
|
||||
s.log.Error("could not receive public key",
|
||||
zap.Stringer("peer", peerID),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// NewAddressStore wraps peer store and returns AddressStoreComponent.
|
||||
func NewAddressStore(ps peers.Store, log *zap.Logger) (AddressStoreComponent, error) {
|
||||
if ps == nil {
|
||||
return nil, errors.Wrap(errEmptyPeerStore, addressStoreInstanceFailMsg)
|
||||
} else if log == nil {
|
||||
return nil, errors.Wrap(errEmptyLogger, addressStoreInstanceFailMsg)
|
||||
}
|
||||
|
||||
return &addressStore{
|
||||
ps: ps,
|
||||
log: log,
|
||||
}, nil
|
||||
}
|
152
lib/implementations/placement.go
Normal file
152
lib/implementations/placement.go
Normal file
|
@ -0,0 +1,152 @@
|
|||
package implementations
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/nspcc-dev/neofs-api-go/bootstrap"
|
||||
"github.com/nspcc-dev/neofs-api-go/container"
|
||||
"github.com/nspcc-dev/neofs-api-go/object"
|
||||
"github.com/nspcc-dev/neofs-api-go/refs"
|
||||
"github.com/nspcc-dev/neofs-node/internal"
|
||||
"github.com/nspcc-dev/neofs-node/lib/netmap"
|
||||
"github.com/nspcc-dev/neofs-node/lib/placement"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
/*
|
||||
File source code includes implementations of placement-related solutions.
|
||||
Highly specialized interfaces give the opportunity to hide placement implementation in a black box for the reasons:
|
||||
* placement is implementation-tied entity working with graphs, filters, etc.;
|
||||
* NeoFS components are mostly needed in a small part of the solutions provided by placement;
|
||||
* direct dependency from placement avoidance helps other components do not touch crucial changes in placement.
|
||||
*/
|
||||
|
||||
type (
|
||||
// CID is a type alias of
|
||||
// CID from refs package of neofs-api-go.
|
||||
CID = refs.CID
|
||||
|
||||
// SGID is a type alias of
|
||||
// SGID from refs package of neofs-api-go.
|
||||
SGID = refs.SGID
|
||||
|
||||
// ObjectID is a type alias of
|
||||
// ObjectID from refs package of neofs-api-go.
|
||||
ObjectID = refs.ObjectID
|
||||
|
||||
// Object is a type alias of
|
||||
// Object from object package of neofs-api-go.
|
||||
Object = object.Object
|
||||
|
||||
// Address is a type alias of
|
||||
// Address from refs package of neofs-api-go.
|
||||
Address = refs.Address
|
||||
|
||||
// Netmap is a type alias of
|
||||
// NetMap from netmap package.
|
||||
Netmap = netmap.NetMap
|
||||
|
||||
// ObjectPlacer is an interface of placement utility.
|
||||
ObjectPlacer interface {
|
||||
ContainerNodesLister
|
||||
ContainerInvolvementChecker
|
||||
GetNodes(ctx context.Context, addr Address, usePreviousNetMap bool, excl ...multiaddr.Multiaddr) ([]multiaddr.Multiaddr, error)
|
||||
Epoch() uint64
|
||||
}
|
||||
|
||||
// ContainerNodesLister is an interface of container placement vector builder.
|
||||
ContainerNodesLister interface {
|
||||
ContainerNodes(ctx context.Context, cid CID) ([]multiaddr.Multiaddr, error)
|
||||
ContainerNodesInfo(ctx context.Context, cid CID, prev int) ([]bootstrap.NodeInfo, error)
|
||||
}
|
||||
|
||||
// ContainerInvolvementChecker is an interface of container affiliation checker.
|
||||
ContainerInvolvementChecker interface {
|
||||
IsContainerNode(ctx context.Context, addr multiaddr.Multiaddr, cid CID, previousNetMap bool) (bool, error)
|
||||
}
|
||||
|
||||
objectPlacer struct {
|
||||
pl placement.Component
|
||||
}
|
||||
)
|
||||
|
||||
const errEmptyPlacement = internal.Error("could not create storage lister: empty placement component")
|
||||
|
||||
// NewObjectPlacer wraps placement.Component and returns ObjectPlacer interface.
|
||||
func NewObjectPlacer(pl placement.Component) (ObjectPlacer, error) {
|
||||
if pl == nil {
|
||||
return nil, errEmptyPlacement
|
||||
}
|
||||
|
||||
return &objectPlacer{pl}, nil
|
||||
}
|
||||
|
||||
func (v objectPlacer) ContainerNodes(ctx context.Context, cid CID) ([]multiaddr.Multiaddr, error) {
|
||||
graph, err := v.pl.Query(ctx, placement.ContainerID(cid))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "objectPlacer.ContainerNodes failed on graph query")
|
||||
}
|
||||
|
||||
return graph.NodeList()
|
||||
}
|
||||
|
||||
func (v objectPlacer) ContainerNodesInfo(ctx context.Context, cid CID, prev int) ([]bootstrap.NodeInfo, error) {
|
||||
graph, err := v.pl.Query(ctx, placement.ContainerID(cid), placement.UsePreviousNetmap(prev))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "objectPlacer.ContainerNodesInfo failed on graph query")
|
||||
}
|
||||
|
||||
return graph.NodeInfo()
|
||||
}
|
||||
|
||||
func (v objectPlacer) GetNodes(ctx context.Context, addr Address, usePreviousNetMap bool, excl ...multiaddr.Multiaddr) ([]multiaddr.Multiaddr, error) {
|
||||
queryOptions := make([]placement.QueryOption, 1, 2)
|
||||
queryOptions[0] = placement.ContainerID(addr.CID)
|
||||
|
||||
if usePreviousNetMap {
|
||||
queryOptions = append(queryOptions, placement.UsePreviousNetmap(1))
|
||||
}
|
||||
|
||||
graph, err := v.pl.Query(ctx, queryOptions...)
|
||||
if err != nil {
|
||||
if st, ok := status.FromError(errors.Cause(err)); ok && st.Code() == codes.NotFound {
|
||||
return nil, container.ErrNotFound
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(err, "placer.GetNodes failed on graph query")
|
||||
}
|
||||
|
||||
filter := func(group netmap.SFGroup, bucket *netmap.Bucket) *netmap.Bucket {
|
||||
return bucket
|
||||
}
|
||||
|
||||
if !addr.ObjectID.Empty() {
|
||||
filter = func(group netmap.SFGroup, bucket *netmap.Bucket) *netmap.Bucket {
|
||||
return bucket.GetSelection(group.Selectors, addr.ObjectID.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
return graph.Exclude(excl).Filter(filter).NodeList()
|
||||
}
|
||||
|
||||
func (v objectPlacer) IsContainerNode(ctx context.Context, addr multiaddr.Multiaddr, cid CID, previousNetMap bool) (bool, error) {
|
||||
nodes, err := v.GetNodes(ctx, Address{
|
||||
CID: cid,
|
||||
}, previousNetMap)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "placer.FromContainer failed on placer.GetNodes")
|
||||
}
|
||||
|
||||
for i := range nodes {
|
||||
if nodes[i].Equal(addr) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (v objectPlacer) Epoch() uint64 { return v.pl.NetworkState().Epoch }
|
41
lib/implementations/reputation.go
Normal file
41
lib/implementations/reputation.go
Normal file
|
@ -0,0 +1,41 @@
|
|||
package implementations
|
||||
|
||||
import (
|
||||
"github.com/nspcc-dev/neofs-node/lib/peers"
|
||||
)
|
||||
|
||||
// MorphReputationContract is a wrapper over NeoFS Reputation contract client
|
||||
// that provides an interface of the storage of global trust values.
|
||||
type MorphReputationContract struct {
|
||||
// NeoFS Reputation smart-contract
|
||||
repContract StaticContractClient
|
||||
|
||||
// put method name of reputation contract
|
||||
putMethodName string
|
||||
|
||||
// list method name of reputation contract
|
||||
listMethodName string
|
||||
|
||||
// public key storage
|
||||
pkStore peers.PublicKeyStore
|
||||
}
|
||||
|
||||
// SetReputationContractClient is a Reputation contract client setter.
|
||||
func (s *MorphReputationContract) SetReputationContractClient(v StaticContractClient) {
|
||||
s.repContract = v
|
||||
}
|
||||
|
||||
// SetPublicKeyStore is a public key store setter.
|
||||
func (s *MorphReputationContract) SetPublicKeyStore(v peers.PublicKeyStore) {
|
||||
s.pkStore = v
|
||||
}
|
||||
|
||||
// SetPutMethodName is a Reputation contract Put method name setter.
|
||||
func (s *MorphReputationContract) SetPutMethodName(v string) {
|
||||
s.putMethodName = v
|
||||
}
|
||||
|
||||
// SetListMethodName is a Reputation contract List method name setter.
|
||||
func (s *MorphReputationContract) SetListMethodName(v string) {
|
||||
s.listMethodName = v
|
||||
}
|
136
lib/implementations/sg.go
Normal file
136
lib/implementations/sg.go
Normal file
|
@ -0,0 +1,136 @@
|
|||
package implementations
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/nspcc-dev/neofs-api-go/hash"
|
||||
"github.com/nspcc-dev/neofs-api-go/object"
|
||||
"github.com/nspcc-dev/neofs-api-go/refs"
|
||||
"github.com/nspcc-dev/neofs-api-go/service"
|
||||
"github.com/nspcc-dev/neofs-api-go/storagegroup"
|
||||
"github.com/nspcc-dev/neofs-node/internal"
|
||||
"github.com/pkg/errors"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type (
|
||||
// StorageGroupInfoReceiverParams groups the parameters of
|
||||
// storage group information receiver.
|
||||
StorageGroupInfoReceiverParams struct {
|
||||
SelectiveContainerExecutor SelectiveContainerExecutor
|
||||
Logger *zap.Logger
|
||||
}
|
||||
|
||||
sgInfoRecv struct {
|
||||
executor SelectiveContainerExecutor
|
||||
log *zap.Logger
|
||||
}
|
||||
)
|
||||
|
||||
const locationFinderInstanceFailMsg = "could not create object location finder"
|
||||
|
||||
// ErrIncompleteSGInfo is returned by storage group information receiver
|
||||
// that could not receive full information.
|
||||
const ErrIncompleteSGInfo = internal.Error("could not receive full storage group info")
|
||||
|
||||
// PublicSessionToken is a context key for SessionToken.
|
||||
// FIXME: temp solution for cycle import fix.
|
||||
// Unify with same const from transformer pkg.
|
||||
const PublicSessionToken = "public token"
|
||||
|
||||
// BearerToken is a context key for BearerToken.
|
||||
const BearerToken = "bearer token"
|
||||
|
||||
// ExtendedHeaders is a context key for X-headers.
|
||||
const ExtendedHeaders = "extended headers"
|
||||
|
||||
func (s *sgInfoRecv) GetSGInfo(ctx context.Context, cid CID, group []ObjectID) (*storagegroup.StorageGroup, error) {
|
||||
var (
|
||||
err error
|
||||
res = new(storagegroup.StorageGroup)
|
||||
hashList = make([]hash.Hash, 0, len(group))
|
||||
)
|
||||
|
||||
m := make(map[string]struct{}, len(group))
|
||||
for i := range group {
|
||||
m[group[i].String()] = struct{}{}
|
||||
}
|
||||
|
||||
// FIXME: hardcoded for simplicity.
|
||||
// Function is called in next cases:
|
||||
// - SG transformation on trusted node side (only in this case session token is needed);
|
||||
// - SG info check on container nodes (token is not needed since system group has extra access);
|
||||
// - data audit on inner ring nodes (same as previous).
|
||||
var token service.SessionToken
|
||||
if v, ok := ctx.Value(PublicSessionToken).(service.SessionToken); ok {
|
||||
token = v
|
||||
}
|
||||
|
||||
var bearer service.BearerToken
|
||||
if v, ok := ctx.Value(BearerToken).(service.BearerToken); ok {
|
||||
bearer = v
|
||||
}
|
||||
|
||||
var extHdrs []service.ExtendedHeader
|
||||
if v, ok := ctx.Value(ExtendedHeaders).([]service.ExtendedHeader); ok {
|
||||
extHdrs = v
|
||||
}
|
||||
|
||||
if err = s.executor.Head(ctx, &HeadParams{
|
||||
GetParams: GetParams{
|
||||
SelectiveParams: SelectiveParams{
|
||||
CID: cid,
|
||||
TTL: service.SingleForwardingTTL,
|
||||
IDList: group,
|
||||
Breaker: func(addr refs.Address) (cFlag ProgressControlFlag) {
|
||||
if len(m) == 0 {
|
||||
cFlag = BreakProgress
|
||||
} else if _, ok := m[addr.ObjectID.String()]; !ok {
|
||||
cFlag = NextAddress
|
||||
}
|
||||
return
|
||||
},
|
||||
Token: token,
|
||||
|
||||
Bearer: bearer,
|
||||
|
||||
ExtendedHeaders: extHdrs,
|
||||
},
|
||||
Handler: func(_ multiaddr.Multiaddr, obj *object.Object) {
|
||||
_, hashHeader := obj.LastHeader(object.HeaderType(object.HomoHashHdr))
|
||||
if hashHeader == nil {
|
||||
return
|
||||
}
|
||||
|
||||
hashList = append(hashList, hashHeader.Value.(*object.Header_HomoHash).HomoHash)
|
||||
res.ValidationDataSize += obj.SystemHeader.PayloadLength
|
||||
delete(m, obj.SystemHeader.ID.String())
|
||||
},
|
||||
},
|
||||
FullHeaders: true,
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
} else if len(m) > 0 {
|
||||
return nil, ErrIncompleteSGInfo
|
||||
}
|
||||
|
||||
res.ValidationHash, err = hash.Concat(hashList)
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
// NewStorageGroupInfoReceiver constructs storagegroup.InfoReceiver from SelectiveContainerExecutor.
|
||||
func NewStorageGroupInfoReceiver(p StorageGroupInfoReceiverParams) (storagegroup.InfoReceiver, error) {
|
||||
switch {
|
||||
case p.Logger == nil:
|
||||
return nil, errors.Wrap(errEmptyLogger, locationFinderInstanceFailMsg)
|
||||
case p.SelectiveContainerExecutor == nil:
|
||||
return nil, errors.Wrap(errEmptyObjectsContainerHandler, locationFinderInstanceFailMsg)
|
||||
}
|
||||
|
||||
return &sgInfoRecv{
|
||||
executor: p.SelectiveContainerExecutor,
|
||||
log: p.Logger,
|
||||
}, nil
|
||||
}
|
657
lib/implementations/transport.go
Normal file
657
lib/implementations/transport.go
Normal file
|
@ -0,0 +1,657 @@
|
|||
package implementations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/nspcc-dev/neofs-api-go/hash"
|
||||
"github.com/nspcc-dev/neofs-api-go/object"
|
||||
"github.com/nspcc-dev/neofs-api-go/refs"
|
||||
"github.com/nspcc-dev/neofs-api-go/service"
|
||||
"github.com/nspcc-dev/neofs-node/internal"
|
||||
"github.com/nspcc-dev/neofs-node/lib/transport"
|
||||
"github.com/pkg/errors"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
/*
|
||||
File source code includes implementation of unified objects container handler.
|
||||
Implementation provides the opportunity to perform any logic over object container distributed in network.
|
||||
Implementation holds placement and object transport implementations in a black box.
|
||||
Any special logic could be tuned through passing handle parameters.
|
||||
NOTE: Although the implementation of the other interfaces via OCH is the same, they are still separated in order to avoid mess.
|
||||
*/
|
||||
|
||||
type (
|
||||
// SelectiveContainerExecutor is an interface the tool that performs
|
||||
// object operations in container with preconditions.
|
||||
SelectiveContainerExecutor interface {
|
||||
Put(context.Context, *PutParams) error
|
||||
Get(context.Context, *GetParams) error
|
||||
Head(context.Context, *HeadParams) error
|
||||
Search(context.Context, *SearchParams) error
|
||||
RangeHash(context.Context, *RangeHashParams) error
|
||||
}
|
||||
|
||||
// PutParams groups the parameters
|
||||
// of selective object Put.
|
||||
PutParams struct {
|
||||
SelectiveParams
|
||||
Object *object.Object
|
||||
Handler func(multiaddr.Multiaddr, bool)
|
||||
|
||||
CopiesNumber uint32
|
||||
}
|
||||
|
||||
// GetParams groups the parameters
|
||||
// of selective object Get.
|
||||
GetParams struct {
|
||||
SelectiveParams
|
||||
Handler func(multiaddr.Multiaddr, *object.Object)
|
||||
}
|
||||
|
||||
// HeadParams groups the parameters
|
||||
// of selective object Head.
|
||||
HeadParams struct {
|
||||
GetParams
|
||||
FullHeaders bool
|
||||
}
|
||||
|
||||
// SearchParams groups the parameters
|
||||
// of selective object Search.
|
||||
SearchParams struct {
|
||||
SelectiveParams
|
||||
SearchCID refs.CID
|
||||
SearchQuery []byte
|
||||
Handler func(multiaddr.Multiaddr, []refs.Address)
|
||||
}
|
||||
|
||||
// RangeHashParams groups the parameters
|
||||
// of selective object GetRangeHash.
|
||||
RangeHashParams struct {
|
||||
SelectiveParams
|
||||
Ranges []object.Range
|
||||
Salt []byte
|
||||
Handler func(multiaddr.Multiaddr, []hash.Hash)
|
||||
}
|
||||
|
||||
// SelectiveParams groups the parameters of
|
||||
// the execution of selective container operation.
|
||||
SelectiveParams struct {
|
||||
/* Should be set to true only if service under object transport implementations is served on localhost. */
|
||||
ServeLocal bool
|
||||
|
||||
/* Raw option of the request */
|
||||
Raw bool
|
||||
|
||||
/* TTL for object transport. All transport operations inherit same value. */
|
||||
TTL uint32
|
||||
|
||||
/* Required ID of processing container. If empty or not set, an error is returned. */
|
||||
CID
|
||||
|
||||
/* List of nodes selected for processing. If not specified => nodes will be selected during. */
|
||||
Nodes []multiaddr.Multiaddr
|
||||
|
||||
/*
|
||||
Next two parameters provide the opportunity to process selective objects in container.
|
||||
At least on of non-empty IDList or Query is required, an error is returned otherwise.
|
||||
*/
|
||||
|
||||
/* List of objects to process (overlaps query). */
|
||||
IDList []refs.ObjectID
|
||||
/* If no objects is indicated, query is used for selection. */
|
||||
Query []byte
|
||||
|
||||
/*
|
||||
If function provided, it is called after every successful operation.
|
||||
True result breaks operation performing.
|
||||
*/
|
||||
Breaker func(refs.Address) ProgressControlFlag
|
||||
|
||||
/* Public session token */
|
||||
Token service.SessionToken
|
||||
|
||||
/* Bearer token */
|
||||
Bearer service.BearerToken
|
||||
|
||||
/* Extended headers */
|
||||
ExtendedHeaders []service.ExtendedHeader
|
||||
}
|
||||
|
||||
// ProgressControlFlag is an enumeration of progress control flags.
|
||||
ProgressControlFlag int
|
||||
|
||||
// ObjectContainerHandlerParams grops the parameters of SelectiveContainerExecutor constructor.
|
||||
ObjectContainerHandlerParams struct {
|
||||
NodeLister ContainerNodesLister
|
||||
Executor ContainerTraverseExecutor
|
||||
*zap.Logger
|
||||
}
|
||||
|
||||
simpleTraverser struct {
|
||||
*sync.Once
|
||||
list []multiaddr.Multiaddr
|
||||
}
|
||||
|
||||
selectiveCnrExec struct {
|
||||
cnl ContainerNodesLister
|
||||
Executor ContainerTraverseExecutor
|
||||
log *zap.Logger
|
||||
}
|
||||
|
||||
metaInfo struct {
|
||||
ttl uint32
|
||||
raw bool
|
||||
rt object.RequestType
|
||||
|
||||
token service.SessionToken
|
||||
|
||||
bearer service.BearerToken
|
||||
|
||||
extHdrs []service.ExtendedHeader
|
||||
}
|
||||
|
||||
putInfo struct {
|
||||
metaInfo
|
||||
obj *object.Object
|
||||
cn uint32
|
||||
}
|
||||
|
||||
getInfo struct {
|
||||
metaInfo
|
||||
addr Address
|
||||
raw bool
|
||||
}
|
||||
|
||||
headInfo struct {
|
||||
getInfo
|
||||
fullHdr bool
|
||||
}
|
||||
|
||||
searchInfo struct {
|
||||
metaInfo
|
||||
cid CID
|
||||
query []byte
|
||||
}
|
||||
|
||||
rangeHashInfo struct {
|
||||
metaInfo
|
||||
addr Address
|
||||
ranges []object.Range
|
||||
salt []byte
|
||||
}
|
||||
|
||||
execItems struct {
|
||||
params SelectiveParams
|
||||
metaConstructor func(addr Address) transport.MetaInfo
|
||||
handler transport.ResultHandler
|
||||
}
|
||||
|
||||
searchTarget struct {
|
||||
list []refs.Address
|
||||
}
|
||||
|
||||
// ContainerTraverseExecutor is an interface of
|
||||
// object operation executor with container traversing.
|
||||
ContainerTraverseExecutor interface {
|
||||
Execute(context.Context, TraverseParams)
|
||||
}
|
||||
|
||||
// TraverseParams groups the parameters of container traversing.
|
||||
TraverseParams struct {
|
||||
TransportInfo transport.MetaInfo
|
||||
Handler transport.ResultHandler
|
||||
Traverser Traverser
|
||||
WorkerPool WorkerPool
|
||||
ExecutionInterceptor func(context.Context, multiaddr.Multiaddr) bool
|
||||
}
|
||||
|
||||
// WorkerPool is an interface of go-routine pool
|
||||
WorkerPool interface {
|
||||
Submit(func()) error
|
||||
}
|
||||
|
||||
// Traverser is an interface of container traverser.
|
||||
Traverser interface {
|
||||
Next(context.Context) []multiaddr.Multiaddr
|
||||
}
|
||||
|
||||
cnrTraverseExec struct {
|
||||
transport transport.ObjectTransport
|
||||
}
|
||||
|
||||
singleRoutinePool struct{}
|
||||
|
||||
emptyReader struct{}
|
||||
)
|
||||
|
||||
const (
|
||||
_ ProgressControlFlag = iota
|
||||
|
||||
// NextAddress is a ProgressControlFlag of to go to the next address of the object.
|
||||
NextAddress
|
||||
|
||||
// NextNode is a ProgressControlFlag of to go to the next node.
|
||||
NextNode
|
||||
|
||||
// BreakProgress is a ProgressControlFlag to interrupt the execution.
|
||||
BreakProgress
|
||||
)
|
||||
|
||||
const (
|
||||
instanceFailMsg = "could not create container objects collector"
|
||||
errEmptyLogger = internal.Error("empty logger")
|
||||
errEmptyNodeLister = internal.Error("empty container node lister")
|
||||
errEmptyTraverseExecutor = internal.Error("empty container traverse executor")
|
||||
|
||||
errSelectiveParams = internal.Error("neither ID list nor query provided")
|
||||
)
|
||||
|
||||
var errNilObjectTransport = errors.New("object transport is nil")
|
||||
|
||||
func (s *selectiveCnrExec) Put(ctx context.Context, p *PutParams) error {
|
||||
meta := &putInfo{
|
||||
metaInfo: metaInfo{
|
||||
ttl: p.TTL,
|
||||
rt: object.RequestPut,
|
||||
raw: p.Raw,
|
||||
|
||||
token: p.Token,
|
||||
|
||||
bearer: p.Bearer,
|
||||
|
||||
extHdrs: p.ExtendedHeaders,
|
||||
},
|
||||
obj: p.Object,
|
||||
cn: p.CopiesNumber,
|
||||
}
|
||||
|
||||
return s.exec(ctx, &execItems{
|
||||
params: p.SelectiveParams,
|
||||
metaConstructor: func(Address) transport.MetaInfo { return meta },
|
||||
handler: p,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *selectiveCnrExec) Get(ctx context.Context, p *GetParams) error {
|
||||
return s.exec(ctx, &execItems{
|
||||
params: p.SelectiveParams,
|
||||
metaConstructor: func(addr Address) transport.MetaInfo {
|
||||
return &getInfo{
|
||||
metaInfo: metaInfo{
|
||||
ttl: p.TTL,
|
||||
rt: object.RequestGet,
|
||||
raw: p.Raw,
|
||||
|
||||
token: p.Token,
|
||||
|
||||
bearer: p.Bearer,
|
||||
|
||||
extHdrs: p.ExtendedHeaders,
|
||||
},
|
||||
addr: addr,
|
||||
raw: p.Raw,
|
||||
}
|
||||
},
|
||||
handler: p,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *selectiveCnrExec) Head(ctx context.Context, p *HeadParams) error {
|
||||
return s.exec(ctx, &execItems{
|
||||
params: p.SelectiveParams,
|
||||
metaConstructor: func(addr Address) transport.MetaInfo {
|
||||
return &headInfo{
|
||||
getInfo: getInfo{
|
||||
metaInfo: metaInfo{
|
||||
ttl: p.TTL,
|
||||
rt: object.RequestHead,
|
||||
raw: p.Raw,
|
||||
|
||||
token: p.Token,
|
||||
|
||||
bearer: p.Bearer,
|
||||
|
||||
extHdrs: p.ExtendedHeaders,
|
||||
},
|
||||
addr: addr,
|
||||
raw: p.Raw,
|
||||
},
|
||||
fullHdr: p.FullHeaders,
|
||||
}
|
||||
},
|
||||
handler: p,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *selectiveCnrExec) Search(ctx context.Context, p *SearchParams) error {
|
||||
return s.exec(ctx, &execItems{
|
||||
params: p.SelectiveParams,
|
||||
metaConstructor: func(Address) transport.MetaInfo {
|
||||
return &searchInfo{
|
||||
metaInfo: metaInfo{
|
||||
ttl: p.TTL,
|
||||
rt: object.RequestSearch,
|
||||
raw: p.Raw,
|
||||
|
||||
token: p.Token,
|
||||
|
||||
bearer: p.Bearer,
|
||||
|
||||
extHdrs: p.ExtendedHeaders,
|
||||
},
|
||||
cid: p.SearchCID,
|
||||
query: p.SearchQuery,
|
||||
}
|
||||
},
|
||||
handler: p,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *selectiveCnrExec) RangeHash(ctx context.Context, p *RangeHashParams) error {
|
||||
return s.exec(ctx, &execItems{
|
||||
params: p.SelectiveParams,
|
||||
metaConstructor: func(addr Address) transport.MetaInfo {
|
||||
return &rangeHashInfo{
|
||||
metaInfo: metaInfo{
|
||||
ttl: p.TTL,
|
||||
rt: object.RequestRangeHash,
|
||||
raw: p.Raw,
|
||||
|
||||
token: p.Token,
|
||||
|
||||
bearer: p.Bearer,
|
||||
|
||||
extHdrs: p.ExtendedHeaders,
|
||||
},
|
||||
addr: addr,
|
||||
ranges: p.Ranges,
|
||||
salt: p.Salt,
|
||||
}
|
||||
},
|
||||
handler: p,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *selectiveCnrExec) exec(ctx context.Context, p *execItems) error {
|
||||
if err := p.params.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodes, err := s.prepareNodes(ctx, &p.params)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
loop:
|
||||
for i := range nodes {
|
||||
addrList := s.prepareAddrList(ctx, &p.params, nodes[i])
|
||||
if len(addrList) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
for j := range addrList {
|
||||
if p.params.Breaker != nil {
|
||||
switch cFlag := p.params.Breaker(addrList[j]); cFlag {
|
||||
case NextAddress:
|
||||
continue
|
||||
case NextNode:
|
||||
continue loop
|
||||
case BreakProgress:
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
s.Executor.Execute(ctx, TraverseParams{
|
||||
TransportInfo: p.metaConstructor(addrList[j]),
|
||||
Handler: p.handler,
|
||||
Traverser: newSimpleTraverser(nodes[i]),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SelectiveParams) validate() error {
|
||||
switch {
|
||||
case len(s.IDList) == 0 && len(s.Query) == 0:
|
||||
return errSelectiveParams
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s *selectiveCnrExec) prepareNodes(ctx context.Context, p *SelectiveParams) ([]multiaddr.Multiaddr, error) {
|
||||
if len(p.Nodes) > 0 {
|
||||
return p.Nodes, nil
|
||||
}
|
||||
|
||||
// If node serves Object transport service on localhost => pass single empty node
|
||||
if p.ServeLocal {
|
||||
// all transport implementations will use localhost by default
|
||||
return []multiaddr.Multiaddr{nil}, nil
|
||||
}
|
||||
|
||||
// Otherwise use container nodes
|
||||
return s.cnl.ContainerNodes(ctx, p.CID)
|
||||
}
|
||||
|
||||
func (s *selectiveCnrExec) prepareAddrList(ctx context.Context, p *SelectiveParams, node multiaddr.Multiaddr) []refs.Address {
|
||||
var (
|
||||
addrList []Address
|
||||
l = len(p.IDList)
|
||||
)
|
||||
|
||||
if l > 0 {
|
||||
addrList = make([]Address, 0, l)
|
||||
for i := range p.IDList {
|
||||
addrList = append(addrList, Address{CID: p.CID, ObjectID: p.IDList[i]})
|
||||
}
|
||||
|
||||
return addrList
|
||||
}
|
||||
|
||||
handler := new(searchTarget)
|
||||
|
||||
s.Executor.Execute(ctx, TraverseParams{
|
||||
TransportInfo: &searchInfo{
|
||||
metaInfo: metaInfo{
|
||||
ttl: p.TTL,
|
||||
rt: object.RequestSearch,
|
||||
raw: p.Raw,
|
||||
|
||||
token: p.Token,
|
||||
|
||||
bearer: p.Bearer,
|
||||
|
||||
extHdrs: p.ExtendedHeaders,
|
||||
},
|
||||
cid: p.CID,
|
||||
query: p.Query,
|
||||
},
|
||||
Handler: handler,
|
||||
Traverser: newSimpleTraverser(node),
|
||||
})
|
||||
|
||||
return handler.list
|
||||
}
|
||||
|
||||
func newSimpleTraverser(list ...multiaddr.Multiaddr) Traverser {
|
||||
return &simpleTraverser{
|
||||
Once: new(sync.Once),
|
||||
list: list,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *simpleTraverser) Next(context.Context) (res []multiaddr.Multiaddr) {
|
||||
s.Do(func() {
|
||||
res = s.list
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s metaInfo) GetTTL() uint32 { return s.ttl }
|
||||
|
||||
func (s metaInfo) GetTimeout() time.Duration { return 0 }
|
||||
|
||||
func (s metaInfo) GetRaw() bool { return s.raw }
|
||||
|
||||
func (s metaInfo) Type() object.RequestType { return s.rt }
|
||||
|
||||
func (s metaInfo) GetSessionToken() service.SessionToken { return s.token }
|
||||
|
||||
func (s metaInfo) GetBearerToken() service.BearerToken { return s.bearer }
|
||||
|
||||
func (s metaInfo) ExtendedHeaders() []service.ExtendedHeader { return s.extHdrs }
|
||||
|
||||
func (s *putInfo) GetHead() *object.Object { return s.obj }
|
||||
|
||||
func (s *putInfo) Payload() io.Reader { return new(emptyReader) }
|
||||
|
||||
func (*emptyReader) Read(p []byte) (int, error) { return 0, io.EOF }
|
||||
|
||||
func (s *putInfo) CopiesNumber() uint32 {
|
||||
return s.cn
|
||||
}
|
||||
|
||||
func (s *getInfo) GetAddress() refs.Address { return s.addr }
|
||||
|
||||
func (s *getInfo) Raw() bool { return s.raw }
|
||||
|
||||
func (s *headInfo) GetFullHeaders() bool { return s.fullHdr }
|
||||
|
||||
func (s *searchInfo) GetCID() refs.CID { return s.cid }
|
||||
|
||||
func (s *searchInfo) GetQuery() []byte { return s.query }
|
||||
|
||||
func (s *rangeHashInfo) GetAddress() refs.Address { return s.addr }
|
||||
|
||||
func (s *rangeHashInfo) GetRanges() []object.Range { return s.ranges }
|
||||
|
||||
func (s *rangeHashInfo) GetSalt() []byte { return s.salt }
|
||||
|
||||
func (s *searchTarget) HandleResult(_ context.Context, _ multiaddr.Multiaddr, r interface{}, e error) {
|
||||
if e == nil {
|
||||
s.list = append(s.list, r.([]refs.Address)...)
|
||||
}
|
||||
}
|
||||
|
||||
// HandleResult calls Handler with:
|
||||
// - Multiaddr with argument value;
|
||||
// - error equality to nil.
|
||||
func (s *PutParams) HandleResult(_ context.Context, node multiaddr.Multiaddr, _ interface{}, e error) {
|
||||
s.Handler(node, e == nil)
|
||||
}
|
||||
|
||||
// HandleResult calls Handler if error argument is nil with:
|
||||
// - Multiaddr with argument value;
|
||||
// - result casted to an Object pointer.
|
||||
func (s *GetParams) HandleResult(_ context.Context, node multiaddr.Multiaddr, r interface{}, e error) {
|
||||
if e == nil {
|
||||
s.Handler(node, r.(*object.Object))
|
||||
}
|
||||
}
|
||||
|
||||
// HandleResult calls Handler if error argument is nil with:
|
||||
// - Multiaddr with argument value;
|
||||
// - result casted to Address slice.
|
||||
func (s *SearchParams) HandleResult(_ context.Context, node multiaddr.Multiaddr, r interface{}, e error) {
|
||||
if e == nil {
|
||||
s.Handler(node, r.([]refs.Address))
|
||||
}
|
||||
}
|
||||
|
||||
// HandleResult calls Handler if error argument is nil with:
|
||||
// - Multiaddr with argument value;
|
||||
// - result casted to Hash slice.
|
||||
func (s *RangeHashParams) HandleResult(_ context.Context, node multiaddr.Multiaddr, r interface{}, e error) {
|
||||
if e == nil {
|
||||
s.Handler(node, r.([]hash.Hash))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *cnrTraverseExec) Execute(ctx context.Context, p TraverseParams) {
|
||||
if p.WorkerPool == nil {
|
||||
p.WorkerPool = new(singleRoutinePool)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
nodes := p.Traverser.Next(ctx)
|
||||
if len(nodes) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
for i := range nodes {
|
||||
node := nodes[i]
|
||||
|
||||
wg.Add(1)
|
||||
|
||||
if err := p.WorkerPool.Submit(func() {
|
||||
defer wg.Done()
|
||||
|
||||
if p.ExecutionInterceptor != nil && p.ExecutionInterceptor(ctx, node) {
|
||||
return
|
||||
}
|
||||
|
||||
s.transport.Transport(ctx, transport.ObjectTransportParams{
|
||||
TransportInfo: p.TransportInfo,
|
||||
TargetNode: node,
|
||||
ResultHandler: p.Handler,
|
||||
})
|
||||
}); err != nil {
|
||||
wg.Done()
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func (*singleRoutinePool) Submit(fn func()) error {
|
||||
fn()
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewObjectContainerHandler is a SelectiveContainerExecutor constructor.
|
||||
func NewObjectContainerHandler(p ObjectContainerHandlerParams) (SelectiveContainerExecutor, error) {
|
||||
switch {
|
||||
case p.Executor == nil:
|
||||
return nil, errors.Wrap(errEmptyTraverseExecutor, instanceFailMsg)
|
||||
case p.Logger == nil:
|
||||
return nil, errors.Wrap(errEmptyLogger, instanceFailMsg)
|
||||
case p.NodeLister == nil:
|
||||
return nil, errors.Wrap(errEmptyNodeLister, instanceFailMsg)
|
||||
}
|
||||
|
||||
return &selectiveCnrExec{
|
||||
cnl: p.NodeLister,
|
||||
Executor: p.Executor,
|
||||
log: p.Logger,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewContainerTraverseExecutor is a ContainerTraverseExecutor executor.
|
||||
func NewContainerTraverseExecutor(t transport.ObjectTransport) (ContainerTraverseExecutor, error) {
|
||||
if t == nil {
|
||||
return nil, errNilObjectTransport
|
||||
}
|
||||
|
||||
return &cnrTraverseExec{transport: t}, nil
|
||||
}
|
405
lib/implementations/validation.go
Normal file
405
lib/implementations/validation.go
Normal file
|
@ -0,0 +1,405 @@
|
|||
package implementations
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/sha256"
|
||||
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/nspcc-dev/neofs-api-go/hash"
|
||||
"github.com/nspcc-dev/neofs-api-go/object"
|
||||
"github.com/nspcc-dev/neofs-api-go/refs"
|
||||
"github.com/nspcc-dev/neofs-api-go/service"
|
||||
crypto "github.com/nspcc-dev/neofs-crypto"
|
||||
"github.com/nspcc-dev/neofs-node/internal"
|
||||
"github.com/nspcc-dev/neofs-node/lib/core"
|
||||
"github.com/nspcc-dev/neofs-node/lib/localstore"
|
||||
"github.com/nspcc-dev/neofs-node/lib/objutil"
|
||||
"github.com/nspcc-dev/neofs-node/lib/rand"
|
||||
"github.com/nspcc-dev/neofs-node/lib/replication"
|
||||
"github.com/pkg/errors"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type (
|
||||
objectValidator struct {
|
||||
as AddressStore
|
||||
ls localstore.Localstore
|
||||
executor SelectiveContainerExecutor
|
||||
log *zap.Logger
|
||||
|
||||
saltSize int
|
||||
maxRngSize uint64
|
||||
rangeCount int
|
||||
sltr Salitor
|
||||
verifier objutil.Verifier
|
||||
}
|
||||
|
||||
// Salitor is a salting data function.
|
||||
Salitor func(data, salt []byte) []byte
|
||||
|
||||
// ObjectValidatorParams groups th
|
||||
ObjectValidatorParams struct {
|
||||
AddressStore AddressStore
|
||||
Localstore localstore.Localstore
|
||||
SelectiveContainerExecutor SelectiveContainerExecutor
|
||||
Logger *zap.Logger
|
||||
|
||||
Salitor Salitor
|
||||
SaltSize int
|
||||
MaxPayloadRangeSize uint64
|
||||
PayloadRangeCount int
|
||||
|
||||
Verifier objutil.Verifier
|
||||
}
|
||||
|
||||
localHeadIntegrityVerifier struct {
|
||||
keyVerifier core.OwnerKeyVerifier
|
||||
}
|
||||
|
||||
payloadVerifier struct {
|
||||
}
|
||||
|
||||
localIntegrityVerifier struct {
|
||||
headVerifier objutil.Verifier
|
||||
payloadVerifier objutil.Verifier
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
objectValidatorInstanceFailMsg = "could not create object validator"
|
||||
errEmptyLocalstore = internal.Error("empty local storage")
|
||||
errEmptyObjectVerifier = internal.Error("empty object verifier")
|
||||
|
||||
defaultSaltSize = 64 // bytes
|
||||
defaultPayloadRangeCount = 3
|
||||
defaultMaxPayloadRangeSize = 64
|
||||
)
|
||||
|
||||
const (
|
||||
errBrokenHeaderStructure = internal.Error("broken header structure")
|
||||
|
||||
errMissingPayloadChecksumHeader = internal.Error("missing payload checksum header")
|
||||
errWrongPayloadChecksum = internal.Error("wrong payload checksum")
|
||||
)
|
||||
|
||||
func (s *objectValidator) Verify(ctx context.Context, params *replication.ObjectVerificationParams) bool {
|
||||
selfAddr, err := s.as.SelfAddr()
|
||||
if err != nil {
|
||||
s.log.Debug("receive self address failure", zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
if params.Node == nil || params.Node.Equal(selfAddr) {
|
||||
return s.verifyLocal(ctx, params.Address)
|
||||
}
|
||||
|
||||
return s.verifyRemote(ctx, params)
|
||||
}
|
||||
|
||||
func (s *objectValidator) verifyLocal(ctx context.Context, addr Address) bool {
|
||||
var (
|
||||
err error
|
||||
obj *Object
|
||||
)
|
||||
|
||||
if obj, err = s.ls.Get(addr); err != nil {
|
||||
s.log.Debug("get local meta information failure", zap.Error(err))
|
||||
return false
|
||||
} else if err = s.verifier.Verify(ctx, obj); err != nil {
|
||||
s.log.Debug("integrity check failure", zap.Error(err))
|
||||
}
|
||||
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func (s *objectValidator) verifyRemote(ctx context.Context, params *replication.ObjectVerificationParams) bool {
|
||||
var (
|
||||
receivedObj *Object
|
||||
valid bool
|
||||
)
|
||||
|
||||
defer func() {
|
||||
if params.Handler != nil && receivedObj != nil {
|
||||
params.Handler(valid, receivedObj)
|
||||
}
|
||||
}()
|
||||
|
||||
p := &HeadParams{
|
||||
GetParams: GetParams{
|
||||
SelectiveParams: SelectiveParams{
|
||||
CID: params.CID,
|
||||
Nodes: []multiaddr.Multiaddr{params.Node},
|
||||
TTL: service.NonForwardingTTL,
|
||||
IDList: []ObjectID{params.ObjectID},
|
||||
Raw: true,
|
||||
},
|
||||
Handler: func(_ multiaddr.Multiaddr, obj *object.Object) {
|
||||
receivedObj = obj
|
||||
valid = s.verifier.Verify(ctx, obj) == nil
|
||||
},
|
||||
},
|
||||
FullHeaders: true,
|
||||
}
|
||||
|
||||
if err := s.executor.Head(ctx, p); err != nil || !valid {
|
||||
return false
|
||||
} else if receivedObj.SystemHeader.PayloadLength <= 0 || receivedObj.IsLinking() {
|
||||
return true
|
||||
}
|
||||
|
||||
if !params.LocalInvalid {
|
||||
has, err := s.ls.Has(params.Address)
|
||||
if err == nil && has {
|
||||
obj, err := s.ls.Get(params.Address)
|
||||
if err == nil {
|
||||
return s.verifyThroughHashes(ctx, obj, params.Node)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
valid = false
|
||||
_ = s.executor.Get(ctx, &p.GetParams)
|
||||
|
||||
return valid
|
||||
}
|
||||
|
||||
func (s *objectValidator) verifyThroughHashes(ctx context.Context, obj *Object, node multiaddr.Multiaddr) (valid bool) {
|
||||
var (
|
||||
salt = generateSalt(s.saltSize)
|
||||
rngs = generateRanges(obj.SystemHeader.PayloadLength, s.maxRngSize, s.rangeCount)
|
||||
)
|
||||
|
||||
_ = s.executor.RangeHash(ctx, &RangeHashParams{
|
||||
SelectiveParams: SelectiveParams{
|
||||
CID: obj.SystemHeader.CID,
|
||||
Nodes: []multiaddr.Multiaddr{node},
|
||||
TTL: service.NonForwardingTTL,
|
||||
IDList: []ObjectID{obj.SystemHeader.ID},
|
||||
},
|
||||
Ranges: rngs,
|
||||
Salt: salt,
|
||||
Handler: func(node multiaddr.Multiaddr, hashes []hash.Hash) {
|
||||
valid = compareHashes(s.sltr, obj.Payload, salt, rngs, hashes)
|
||||
},
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func compareHashes(sltr Salitor, payload, salt []byte, rngs []object.Range, hashes []hash.Hash) bool {
|
||||
if len(rngs) != len(hashes) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := range rngs {
|
||||
saltPayloadPart := sltr(payload[rngs[i].Offset:rngs[i].Offset+rngs[i].Length], salt)
|
||||
if !hashes[i].Equal(hash.Sum(saltPayloadPart)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func generateRanges(payloadSize, maxRangeSize uint64, count int) []object.Range {
|
||||
res := make([]object.Range, count)
|
||||
|
||||
l := min(payloadSize, maxRangeSize)
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
res[i].Length = l
|
||||
res[i].Offset = rand.Uint64(rand.New(), int64(payloadSize-l))
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func min(a, b uint64) uint64 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func generateSalt(saltSize int) []byte {
|
||||
salt := make([]byte, saltSize)
|
||||
if _, err := rand.Read(salt); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return salt
|
||||
}
|
||||
|
||||
// NewObjectValidator constructs universal replication.ObjectVerifier.
|
||||
func NewObjectValidator(p *ObjectValidatorParams) (replication.ObjectVerifier, error) {
|
||||
switch {
|
||||
case p.Logger == nil:
|
||||
return nil, errors.Wrap(errEmptyLogger, objectValidatorInstanceFailMsg)
|
||||
case p.AddressStore == nil:
|
||||
return nil, errors.Wrap(errEmptyAddressStore, objectValidatorInstanceFailMsg)
|
||||
case p.Localstore == nil:
|
||||
return nil, errors.Wrap(errEmptyLocalstore, objectValidatorInstanceFailMsg)
|
||||
case p.Verifier == nil:
|
||||
return nil, errors.Wrap(errEmptyObjectVerifier, objectValidatorInstanceFailMsg)
|
||||
}
|
||||
|
||||
if p.SaltSize <= 0 {
|
||||
p.SaltSize = defaultSaltSize
|
||||
}
|
||||
|
||||
if p.PayloadRangeCount <= 0 {
|
||||
p.PayloadRangeCount = defaultPayloadRangeCount
|
||||
}
|
||||
|
||||
if p.MaxPayloadRangeSize <= 0 {
|
||||
p.MaxPayloadRangeSize = defaultMaxPayloadRangeSize
|
||||
}
|
||||
|
||||
return &objectValidator{
|
||||
as: p.AddressStore,
|
||||
ls: p.Localstore,
|
||||
executor: p.SelectiveContainerExecutor,
|
||||
log: p.Logger,
|
||||
saltSize: p.SaltSize,
|
||||
maxRngSize: p.MaxPayloadRangeSize,
|
||||
rangeCount: p.PayloadRangeCount,
|
||||
sltr: p.Salitor,
|
||||
verifier: p.Verifier,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewLocalHeadIntegrityVerifier constructs local object head verifier and returns objutil.Verifier interface.
|
||||
func NewLocalHeadIntegrityVerifier(keyVerifier core.OwnerKeyVerifier) (objutil.Verifier, error) {
|
||||
if keyVerifier == nil {
|
||||
return nil, core.ErrNilOwnerKeyVerifier
|
||||
}
|
||||
|
||||
return &localHeadIntegrityVerifier{
|
||||
keyVerifier: keyVerifier,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewLocalIntegrityVerifier constructs local object verifier and returns objutil.Verifier interface.
|
||||
func NewLocalIntegrityVerifier(keyVerifier core.OwnerKeyVerifier) (objutil.Verifier, error) {
|
||||
if keyVerifier == nil {
|
||||
return nil, core.ErrNilOwnerKeyVerifier
|
||||
}
|
||||
|
||||
return &localIntegrityVerifier{
|
||||
headVerifier: &localHeadIntegrityVerifier{
|
||||
keyVerifier: keyVerifier,
|
||||
},
|
||||
payloadVerifier: new(payloadVerifier),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewPayloadVerifier constructs object payload verifier and returns objutil.Verifier.
|
||||
func NewPayloadVerifier() objutil.Verifier {
|
||||
return new(payloadVerifier)
|
||||
}
|
||||
|
||||
type hdrOwnerKeyContainer struct {
|
||||
owner refs.OwnerID
|
||||
key []byte
|
||||
}
|
||||
|
||||
func (s hdrOwnerKeyContainer) GetOwnerID() refs.OwnerID {
|
||||
return s.owner
|
||||
}
|
||||
|
||||
func (s hdrOwnerKeyContainer) GetOwnerKey() []byte {
|
||||
return s.key
|
||||
}
|
||||
|
||||
func (s *localHeadIntegrityVerifier) Verify(ctx context.Context, obj *Object) error {
|
||||
var (
|
||||
checkKey *ecdsa.PublicKey
|
||||
ownerKeyCnr core.OwnerKeyContainer
|
||||
)
|
||||
|
||||
if _, h := obj.LastHeader(object.HeaderType(object.TokenHdr)); h != nil {
|
||||
token := h.GetValue().(*object.Header_Token).Token
|
||||
|
||||
if err := service.VerifySignatureWithKey(
|
||||
crypto.UnmarshalPublicKey(token.GetOwnerKey()),
|
||||
service.NewVerifiedSessionToken(token),
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ownerKeyCnr = token
|
||||
|
||||
checkKey = crypto.UnmarshalPublicKey(token.GetSessionKey())
|
||||
} else if _, h := obj.LastHeader(object.HeaderType(object.PublicKeyHdr)); h != nil {
|
||||
pkHdr := h.GetValue().(*object.Header_PublicKey)
|
||||
if pkHdr != nil && pkHdr.PublicKey != nil {
|
||||
val := pkHdr.PublicKey.GetValue()
|
||||
|
||||
ownerKeyCnr = &hdrOwnerKeyContainer{
|
||||
owner: obj.GetSystemHeader().OwnerID,
|
||||
key: val,
|
||||
}
|
||||
|
||||
checkKey = crypto.UnmarshalPublicKey(val)
|
||||
}
|
||||
}
|
||||
|
||||
if ownerKeyCnr == nil {
|
||||
return core.ErrNilOwnerKeyContainer
|
||||
} else if err := s.keyVerifier.VerifyKey(ctx, ownerKeyCnr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return verifyObjectIntegrity(obj, checkKey)
|
||||
}
|
||||
|
||||
// verifyObjectIntegrity verifies integrity of object header.
|
||||
// Returns error if object
|
||||
// - does not contains integrity header;
|
||||
// - integrity header is not a last header in object;
|
||||
// - integrity header signature is broken.
|
||||
func verifyObjectIntegrity(obj *Object, key *ecdsa.PublicKey) error {
|
||||
n, h := obj.LastHeader(object.HeaderType(object.IntegrityHdr))
|
||||
|
||||
if l := len(obj.Headers); l <= 0 || n != l-1 {
|
||||
return errBrokenHeaderStructure
|
||||
}
|
||||
|
||||
integrityHdr := h.Value.(*object.Header_Integrity).Integrity
|
||||
if integrityHdr == nil {
|
||||
return errBrokenHeaderStructure
|
||||
}
|
||||
|
||||
data, err := objutil.MarshalHeaders(obj, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hdrChecksum := sha256.Sum256(data)
|
||||
|
||||
return crypto.Verify(key, hdrChecksum[:], integrityHdr.ChecksumSignature)
|
||||
}
|
||||
|
||||
func (s *payloadVerifier) Verify(_ context.Context, obj *Object) error {
|
||||
if _, h := obj.LastHeader(object.HeaderType(object.PayloadChecksumHdr)); h == nil {
|
||||
return errMissingPayloadChecksumHeader
|
||||
} else if checksum := sha256.Sum256(obj.Payload); !bytes.Equal(
|
||||
checksum[:],
|
||||
h.Value.(*object.Header_PayloadChecksum).PayloadChecksum,
|
||||
) {
|
||||
return errWrongPayloadChecksum
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *localIntegrityVerifier) Verify(ctx context.Context, obj *Object) error {
|
||||
if err := s.headVerifier.Verify(ctx, obj); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.payloadVerifier.Verify(ctx, obj)
|
||||
}
|
273
lib/implementations/validation_test.go
Normal file
273
lib/implementations/validation_test.go
Normal file
|
@ -0,0 +1,273 @@
|
|||
package implementations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/sha256"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/nspcc-dev/neofs-api-go/object"
|
||||
"github.com/nspcc-dev/neofs-api-go/refs"
|
||||
"github.com/nspcc-dev/neofs-api-go/service"
|
||||
crypto "github.com/nspcc-dev/neofs-crypto"
|
||||
"github.com/nspcc-dev/neofs-node/internal"
|
||||
"github.com/nspcc-dev/neofs-node/lib/core"
|
||||
"github.com/nspcc-dev/neofs-node/lib/localstore"
|
||||
"github.com/nspcc-dev/neofs-node/lib/objutil"
|
||||
"github.com/nspcc-dev/neofs-node/lib/test"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type testEntity struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (s *testEntity) Verify(context.Context, *object.Object) error { return s.err }
|
||||
|
||||
func (s *testEntity) SelfAddr() (multiaddr.Multiaddr, error) { panic("implement me") }
|
||||
func (s *testEntity) Put(context.Context, *localstore.Object) error { panic("implement me") }
|
||||
func (s *testEntity) Get(localstore.Address) (*localstore.Object, error) { panic("implement me") }
|
||||
func (s *testEntity) Del(localstore.Address) error { panic("implement me") }
|
||||
func (s *testEntity) Meta(localstore.Address) (*localstore.ObjectMeta, error) { panic("implement me") }
|
||||
func (s *testEntity) Has(localstore.Address) (bool, error) { panic("implement me") }
|
||||
func (s *testEntity) ObjectsCount() (uint64, error) { panic("implement me") }
|
||||
func (s *testEntity) Size() int64 { panic("implement me") }
|
||||
func (s *testEntity) Iterate(localstore.FilterPipeline, localstore.MetaHandler) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *testEntity) PRead(ctx context.Context, addr refs.Address, rng object.Range) ([]byte, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *testEntity) VerifyKey(context.Context, core.OwnerKeyContainer) error {
|
||||
return s.err
|
||||
}
|
||||
|
||||
func TestNewObjectValidator(t *testing.T) {
|
||||
validParams := ObjectValidatorParams{
|
||||
Logger: zap.L(),
|
||||
AddressStore: new(testEntity),
|
||||
Localstore: new(testEntity),
|
||||
Verifier: new(testEntity),
|
||||
}
|
||||
|
||||
t.Run("valid params", func(t *testing.T) {
|
||||
s, err := NewObjectValidator(&validParams)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, s)
|
||||
})
|
||||
t.Run("fail on empty local storage", func(t *testing.T) {
|
||||
p := validParams
|
||||
p.Localstore = nil
|
||||
_, err := NewObjectValidator(&p)
|
||||
require.EqualError(t, err, errors.Wrap(errEmptyLocalstore, objectValidatorInstanceFailMsg).Error())
|
||||
})
|
||||
t.Run("fail on empty logger", func(t *testing.T) {
|
||||
p := validParams
|
||||
p.Logger = nil
|
||||
_, err := NewObjectValidator(&p)
|
||||
require.EqualError(t, err, errors.Wrap(errEmptyLogger, objectValidatorInstanceFailMsg).Error())
|
||||
})
|
||||
}
|
||||
|
||||
func TestNewLocalIntegrityVerifier(t *testing.T) {
|
||||
var (
|
||||
err error
|
||||
verifier objutil.Verifier
|
||||
keyVerifier = new(testEntity)
|
||||
)
|
||||
|
||||
_, err = NewLocalHeadIntegrityVerifier(nil)
|
||||
require.EqualError(t, err, core.ErrNilOwnerKeyVerifier.Error())
|
||||
|
||||
_, err = NewLocalIntegrityVerifier(nil)
|
||||
require.EqualError(t, err, core.ErrNilOwnerKeyVerifier.Error())
|
||||
|
||||
verifier, err = NewLocalHeadIntegrityVerifier(keyVerifier)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, verifier)
|
||||
|
||||
verifier, err = NewLocalIntegrityVerifier(keyVerifier)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, verifier)
|
||||
}
|
||||
|
||||
func TestLocalHeadIntegrityVerifier_Verify(t *testing.T) {
|
||||
var (
|
||||
ctx = context.TODO()
|
||||
ownerPrivateKey = test.DecodeKey(0)
|
||||
ownerPublicKey = &ownerPrivateKey.PublicKey
|
||||
sessionPrivateKey = test.DecodeKey(1)
|
||||
sessionPublicKey = &sessionPrivateKey.PublicKey
|
||||
)
|
||||
|
||||
ownerID, err := refs.NewOwnerID(ownerPublicKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
s, err := NewLocalIntegrityVerifier(core.NewNeoKeyVerifier())
|
||||
require.NoError(t, err)
|
||||
|
||||
okItems := []func() *Object{
|
||||
// correct object w/ session token
|
||||
func() *Object {
|
||||
token := new(service.Token)
|
||||
token.SetOwnerID(ownerID)
|
||||
token.SetSessionKey(crypto.MarshalPublicKey(sessionPublicKey))
|
||||
|
||||
require.NoError(t,
|
||||
service.AddSignatureWithKey(
|
||||
ownerPrivateKey,
|
||||
service.NewSignedSessionToken(token),
|
||||
),
|
||||
)
|
||||
|
||||
obj := new(Object)
|
||||
obj.AddHeader(&object.Header{
|
||||
Value: &object.Header_Token{
|
||||
Token: token,
|
||||
},
|
||||
})
|
||||
|
||||
obj.SetPayload([]byte{1, 2, 3})
|
||||
addPayloadChecksum(obj)
|
||||
|
||||
addHeadersChecksum(t, obj, sessionPrivateKey)
|
||||
|
||||
return obj
|
||||
},
|
||||
// correct object w/o session token
|
||||
func() *Object {
|
||||
obj := new(Object)
|
||||
obj.SystemHeader.OwnerID = ownerID
|
||||
obj.SetPayload([]byte{1, 2, 3})
|
||||
|
||||
addPayloadChecksum(obj)
|
||||
|
||||
obj.AddHeader(&object.Header{
|
||||
Value: &object.Header_PublicKey{
|
||||
PublicKey: &object.PublicKey{
|
||||
Value: crypto.MarshalPublicKey(ownerPublicKey),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
addHeadersChecksum(t, obj, ownerPrivateKey)
|
||||
|
||||
return obj
|
||||
},
|
||||
}
|
||||
|
||||
failItems := []func() *Object{}
|
||||
|
||||
for _, item := range okItems {
|
||||
require.NoError(t, s.Verify(ctx, item()))
|
||||
}
|
||||
|
||||
for _, item := range failItems {
|
||||
require.Error(t, s.Verify(ctx, item()))
|
||||
}
|
||||
}
|
||||
|
||||
func addPayloadChecksum(obj *Object) {
|
||||
payloadChecksum := sha256.Sum256(obj.GetPayload())
|
||||
|
||||
obj.AddHeader(&object.Header{
|
||||
Value: &object.Header_PayloadChecksum{
|
||||
PayloadChecksum: payloadChecksum[:],
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func addHeadersChecksum(t *testing.T, obj *Object, key *ecdsa.PrivateKey) {
|
||||
headersData, err := objutil.MarshalHeaders(obj, len(obj.Headers))
|
||||
require.NoError(t, err)
|
||||
|
||||
headersChecksum := sha256.Sum256(headersData)
|
||||
|
||||
integrityHdr := new(object.IntegrityHeader)
|
||||
integrityHdr.SetHeadersChecksum(headersChecksum[:])
|
||||
|
||||
require.NoError(t, service.AddSignatureWithKey(key, integrityHdr))
|
||||
|
||||
obj.AddHeader(&object.Header{
|
||||
Value: &object.Header_Integrity{
|
||||
Integrity: integrityHdr,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestPayloadVerifier_Verify(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
verifier := new(payloadVerifier)
|
||||
|
||||
t.Run("missing header", func(t *testing.T) {
|
||||
obj := new(Object)
|
||||
require.EqualError(t, verifier.Verify(ctx, obj), errMissingPayloadChecksumHeader.Error())
|
||||
})
|
||||
|
||||
t.Run("correct result", func(t *testing.T) {
|
||||
payload := testData(t, 10)
|
||||
|
||||
cs := sha256.Sum256(payload)
|
||||
hdr := &object.Header_PayloadChecksum{PayloadChecksum: cs[:]}
|
||||
|
||||
obj := &Object{
|
||||
Headers: []object.Header{{Value: hdr}},
|
||||
Payload: payload,
|
||||
}
|
||||
|
||||
require.NoError(t, verifier.Verify(ctx, obj))
|
||||
|
||||
hdr.PayloadChecksum[0]++
|
||||
require.EqualError(t, verifier.Verify(ctx, obj), errWrongPayloadChecksum.Error())
|
||||
|
||||
hdr.PayloadChecksum[0]--
|
||||
obj.Payload[0]++
|
||||
require.EqualError(t, verifier.Verify(ctx, obj), errWrongPayloadChecksum.Error())
|
||||
})
|
||||
}
|
||||
|
||||
func TestLocalIntegrityVerifier_Verify(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
obj := new(Object)
|
||||
|
||||
t.Run("head verification failure", func(t *testing.T) {
|
||||
hErr := internal.Error("test error for head verifier")
|
||||
|
||||
s := &localIntegrityVerifier{
|
||||
headVerifier: &testEntity{
|
||||
err: hErr, // force head verifier to return hErr
|
||||
},
|
||||
}
|
||||
|
||||
require.EqualError(t, s.Verify(ctx, obj), hErr.Error())
|
||||
})
|
||||
|
||||
t.Run("correct result", func(t *testing.T) {
|
||||
pErr := internal.Error("test error for payload verifier")
|
||||
|
||||
s := &localIntegrityVerifier{
|
||||
headVerifier: new(testEntity),
|
||||
payloadVerifier: &testEntity{
|
||||
err: pErr, // force payload verifier to return hErr
|
||||
},
|
||||
}
|
||||
|
||||
require.EqualError(t, s.Verify(ctx, obj), pErr.Error())
|
||||
})
|
||||
}
|
||||
|
||||
// testData returns size bytes of random data.
|
||||
func testData(t *testing.T, size int) []byte {
|
||||
res := make([]byte, size)
|
||||
_, err := rand.Read(res)
|
||||
require.NoError(t, err)
|
||||
return res
|
||||
}
|
||||
|
||||
// TODO: write functionality tests
|
17
lib/ir/info.go
Normal file
17
lib/ir/info.go
Normal file
|
@ -0,0 +1,17 @@
|
|||
package ir
|
||||
|
||||
// Info is a structure that groups the information
|
||||
// about inner ring.
|
||||
type Info struct {
|
||||
nodes []Node
|
||||
}
|
||||
|
||||
// SetNodes is an IR node list setter.
|
||||
func (s *Info) SetNodes(v []Node) {
|
||||
s.nodes = v
|
||||
}
|
||||
|
||||
// Nodes is an IR node list getter.
|
||||
func (s Info) Nodes() []Node {
|
||||
return s.nodes
|
||||
}
|
25
lib/ir/info_test.go
Normal file
25
lib/ir/info_test.go
Normal file
|
@ -0,0 +1,25 @@
|
|||
package ir
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestInfo(t *testing.T) {
|
||||
s := Info{}
|
||||
|
||||
n1 := Node{}
|
||||
n1.SetKey([]byte{1, 2, 3})
|
||||
|
||||
n2 := Node{}
|
||||
n2.SetKey([]byte{4, 5, 6})
|
||||
|
||||
nodes := []Node{
|
||||
n1,
|
||||
n2,
|
||||
}
|
||||
s.SetNodes(nodes)
|
||||
|
||||
require.Equal(t, nodes, s.Nodes())
|
||||
}
|
17
lib/ir/node.go
Normal file
17
lib/ir/node.go
Normal file
|
@ -0,0 +1,17 @@
|
|||
package ir
|
||||
|
||||
// Node is a structure that groups
|
||||
// the information about IR node.
|
||||
type Node struct {
|
||||
key []byte
|
||||
}
|
||||
|
||||
// SetKey is an IR node public key setter.
|
||||
func (s *Node) SetKey(v []byte) {
|
||||
s.key = v
|
||||
}
|
||||
|
||||
// Key is an IR node public key getter.
|
||||
func (s Node) Key() []byte {
|
||||
return s.key
|
||||
}
|
16
lib/ir/node_test.go
Normal file
16
lib/ir/node_test.go
Normal file
|
@ -0,0 +1,16 @@
|
|||
package ir
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNode(t *testing.T) {
|
||||
s := Node{}
|
||||
|
||||
key := []byte{1, 2, 3}
|
||||
s.SetKey(key)
|
||||
|
||||
require.Equal(t, key, s.Key())
|
||||
}
|
94
lib/ir/storage.go
Normal file
94
lib/ir/storage.go
Normal file
|
@ -0,0 +1,94 @@
|
|||
package ir
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
crypto "github.com/nspcc-dev/neofs-crypto"
|
||||
"github.com/nspcc-dev/neofs-node/internal"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Storage is an interface of the storage of info about NeoFS IR.
|
||||
type Storage interface {
|
||||
GetIRInfo(GetInfoParams) (*GetInfoResult, error)
|
||||
}
|
||||
|
||||
// GetInfoParams is a structure that groups the parameters
|
||||
// for IR info receiving operation.
|
||||
type GetInfoParams struct {
|
||||
}
|
||||
|
||||
// GetInfoResult is a structure that groups
|
||||
// values returned by IR info receiving operation.
|
||||
type GetInfoResult struct {
|
||||
info Info
|
||||
}
|
||||
|
||||
// ErrNilStorage is returned by functions that expect
|
||||
// a non-nil Storage, but received nil.
|
||||
const ErrNilStorage = internal.Error("inner ring storage is nil")
|
||||
|
||||
// SetInfo is an IR info setter.
|
||||
func (s *GetInfoResult) SetInfo(v Info) {
|
||||
s.info = v
|
||||
}
|
||||
|
||||
// Info is an IR info getter.
|
||||
func (s GetInfoResult) Info() Info {
|
||||
return s.info
|
||||
}
|
||||
|
||||
// BinaryKeyList returns the list of binary public key of IR nodes.
|
||||
//
|
||||
// If passed Storage is nil, ErrNilStorage returns.
|
||||
func BinaryKeyList(storage Storage) ([][]byte, error) {
|
||||
if storage == nil {
|
||||
return nil, ErrNilStorage
|
||||
}
|
||||
|
||||
// get IR info
|
||||
getRes, err := storage.GetIRInfo(GetInfoParams{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(
|
||||
err,
|
||||
"could not get information about IR",
|
||||
)
|
||||
}
|
||||
|
||||
nodes := getRes.Info().Nodes()
|
||||
|
||||
keys := make([][]byte, 0, len(nodes))
|
||||
|
||||
for i := range nodes {
|
||||
keys = append(keys, nodes[i].Key())
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// IsInnerRingKey checks if the passed argument is the
|
||||
// key of one of IR nodes.
|
||||
//
|
||||
// Uses BinaryKeyList function to receive the key list of IR nodes internally.
|
||||
//
|
||||
// If passed key slice is empty, crypto.ErrEmptyPublicKey returns immediately.
|
||||
func IsInnerRingKey(storage Storage, key []byte) (bool, error) {
|
||||
// check key emptiness
|
||||
// TODO: summarize the void check to a full IR key-format check.
|
||||
if len(key) == 0 {
|
||||
return false, crypto.ErrEmptyPublicKey
|
||||
}
|
||||
|
||||
irKeys, err := BinaryKeyList(storage)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for i := range irKeys {
|
||||
if bytes.Equal(irKeys[i], key) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
101
lib/ir/storage_test.go
Normal file
101
lib/ir/storage_test.go
Normal file
|
@ -0,0 +1,101 @@
|
|||
package ir
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
crypto "github.com/nspcc-dev/neofs-crypto"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type testInfoReceiver struct {
|
||||
keys [][]byte
|
||||
|
||||
err error
|
||||
}
|
||||
|
||||
func (s testInfoReceiver) GetIRInfo(GetInfoParams) (*GetInfoResult, error) {
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
}
|
||||
|
||||
nodes := make([]Node, 0, len(s.keys))
|
||||
|
||||
for i := range s.keys {
|
||||
node := Node{}
|
||||
node.SetKey(s.keys[i])
|
||||
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
||||
info := Info{}
|
||||
info.SetNodes(nodes)
|
||||
|
||||
res := new(GetInfoResult)
|
||||
res.SetInfo(info)
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (s *testInfoReceiver) addKey(key []byte) {
|
||||
s.keys = append(s.keys, key)
|
||||
}
|
||||
|
||||
func TestGetInfoResult(t *testing.T) {
|
||||
s := GetInfoResult{}
|
||||
|
||||
info := Info{}
|
||||
|
||||
n := Node{}
|
||||
n.SetKey([]byte{1, 2, 3})
|
||||
|
||||
info.SetNodes([]Node{
|
||||
n,
|
||||
})
|
||||
|
||||
s.SetInfo(info)
|
||||
|
||||
require.Equal(t, info, s.Info())
|
||||
}
|
||||
|
||||
func TestIsInnerRingKey(t *testing.T) {
|
||||
var (
|
||||
res bool
|
||||
err error
|
||||
s = new(testInfoReceiver)
|
||||
)
|
||||
|
||||
// empty public key
|
||||
res, err = IsInnerRingKey(nil, nil)
|
||||
require.EqualError(t, err, crypto.ErrEmptyPublicKey.Error())
|
||||
|
||||
key := []byte{1, 2, 3}
|
||||
|
||||
// nil Storage
|
||||
res, err = IsInnerRingKey(nil, key)
|
||||
require.EqualError(t, err, ErrNilStorage.Error())
|
||||
|
||||
// force Storage to return an error
|
||||
s.err = errors.New("some error")
|
||||
|
||||
// Storage error
|
||||
res, err = IsInnerRingKey(s, key)
|
||||
require.EqualError(t, errors.Cause(err), s.err.Error())
|
||||
|
||||
// reset Storage error
|
||||
s.err = nil
|
||||
|
||||
// IR keys don't contain key
|
||||
s.addKey(append(key, 1))
|
||||
|
||||
res, err = IsInnerRingKey(s, key)
|
||||
require.NoError(t, err)
|
||||
require.False(t, res)
|
||||
|
||||
// IR keys contain key
|
||||
s.addKey(key)
|
||||
|
||||
res, err = IsInnerRingKey(s, key)
|
||||
require.NoError(t, err)
|
||||
require.True(t, res)
|
||||
}
|
35
lib/localstore/alias.go
Normal file
35
lib/localstore/alias.go
Normal file
|
@ -0,0 +1,35 @@
|
|||
package localstore
|
||||
|
||||
import (
|
||||
"github.com/nspcc-dev/neofs-api-go/hash"
|
||||
"github.com/nspcc-dev/neofs-api-go/object"
|
||||
"github.com/nspcc-dev/neofs-api-go/refs"
|
||||
)
|
||||
|
||||
// CID is a type alias of
|
||||
// CID from refs package of neofs-api-go.
|
||||
type CID = refs.CID
|
||||
|
||||
// SGID is a type alias of
|
||||
// SGID from refs package of neofs-api-go.
|
||||
type SGID = refs.ObjectID
|
||||
|
||||
// Header is a type alias of
|
||||
// Header from object package of neofs-api-go.
|
||||
type Header = object.Header
|
||||
|
||||
// Object is a type alias of
|
||||
// Object from object package of neofs-api-go.
|
||||
type Object = object.Object
|
||||
|
||||
// ObjectID is a type alias of
|
||||
// ObjectID from refs package of neofs-api-go.
|
||||
type ObjectID = refs.ObjectID
|
||||
|
||||
// Address is a type alias of
|
||||
// Address from refs package of neofs-api-go.
|
||||
type Address = refs.Address
|
||||
|
||||
// Hash is a type alias of
|
||||
// Hash from hash package of neofs-api-go.
|
||||
type Hash = hash.Hash
|
38
lib/localstore/del.go
Normal file
38
lib/localstore/del.go
Normal file
|
@ -0,0 +1,38 @@
|
|||
package localstore
|
||||
|
||||
import (
|
||||
"github.com/nspcc-dev/neofs-api-go/refs"
|
||||
"github.com/nspcc-dev/neofs-node/lib/metrics"
|
||||
"github.com/pkg/errors"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (l *localstore) Del(key refs.Address) error {
|
||||
k, err := key.Hash()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Localstore Del failed on key.Marshal")
|
||||
}
|
||||
|
||||
// try to fetch object for metrics
|
||||
obj, err := l.Get(key)
|
||||
if err != nil {
|
||||
l.log.Warn("localstore Del failed on localstore.Get", zap.Error(err))
|
||||
}
|
||||
|
||||
if err := l.blobBucket.Del(k); err != nil {
|
||||
l.log.Warn("Localstore Del failed on BlobBucket.Del", zap.Error(err))
|
||||
}
|
||||
|
||||
if err := l.metaBucket.Del(k); err != nil {
|
||||
return errors.Wrap(err, "Localstore Del failed on MetaBucket.Del")
|
||||
}
|
||||
|
||||
if obj != nil {
|
||||
l.col.UpdateContainer(
|
||||
key.CID,
|
||||
obj.SystemHeader.PayloadLength,
|
||||
metrics.RemSpace)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
306
lib/localstore/filter.go
Normal file
306
lib/localstore/filter.go
Normal file
|
@ -0,0 +1,306 @@
|
|||
package localstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/nspcc-dev/neofs-node/internal"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type (
|
||||
// FilterCode is an enumeration of filter return codes.
|
||||
FilterCode int
|
||||
|
||||
// PriorityFlag is an enumeration of priority flags.
|
||||
PriorityFlag int
|
||||
|
||||
filterPipelineSet []FilterPipeline
|
||||
|
||||
// FilterFunc is a function that checks whether an ObjectMeta matches a specific criterion.
|
||||
FilterFunc func(ctx context.Context, meta *ObjectMeta) *FilterResult
|
||||
|
||||
// FilterResult groups of ObjectMeta filter result values.
|
||||
FilterResult struct {
|
||||
c FilterCode
|
||||
|
||||
e error
|
||||
}
|
||||
|
||||
// FilterPipeline is an interface of ObjectMeta filtering tool with sub-filters and priorities.
|
||||
FilterPipeline interface {
|
||||
Pass(ctx context.Context, meta *ObjectMeta) *FilterResult
|
||||
PutSubFilter(params SubFilterParams) error
|
||||
GetPriority() uint64
|
||||
SetPriority(uint64)
|
||||
GetName() string
|
||||
}
|
||||
|
||||
// FilterParams groups the parameters of FilterPipeline constructor.
|
||||
FilterParams struct {
|
||||
Name string
|
||||
Priority uint64
|
||||
FilterFunc FilterFunc
|
||||
}
|
||||
|
||||
// SubFilterParams groups the parameters of sub-filter registration.
|
||||
SubFilterParams struct {
|
||||
PriorityFlag
|
||||
FilterPipeline
|
||||
OnIgnore FilterCode
|
||||
OnPass FilterCode
|
||||
OnFail FilterCode
|
||||
}
|
||||
|
||||
filterPipeline struct {
|
||||
*sync.RWMutex
|
||||
|
||||
name string
|
||||
pri uint64
|
||||
filterFn FilterFunc
|
||||
|
||||
maxSubPri uint64
|
||||
mSubResult map[string]map[FilterCode]FilterCode
|
||||
subFilters []FilterPipeline
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
// PriorityValue is a PriorityFlag of the sub-filter registration with GetPriority() value.
|
||||
PriorityValue PriorityFlag = iota
|
||||
|
||||
// PriorityMax is a PriorityFlag of the sub-filter registration with maximum priority.
|
||||
PriorityMax
|
||||
|
||||
// PriorityMin is a PriorityFlag of the sub-filter registration with minimum priority.
|
||||
PriorityMin
|
||||
)
|
||||
|
||||
const (
|
||||
// CodeUndefined is a undefined FilterCode.
|
||||
CodeUndefined FilterCode = iota
|
||||
|
||||
// CodePass is a FilterCode of filter passage.
|
||||
CodePass
|
||||
|
||||
// CodeFail is a FilterCode of filter failure.
|
||||
CodeFail
|
||||
|
||||
// CodeIgnore is a FilterCode of filter ignoring.
|
||||
CodeIgnore
|
||||
)
|
||||
|
||||
var (
|
||||
rPass = &FilterResult{
|
||||
c: CodePass,
|
||||
}
|
||||
|
||||
rFail = &FilterResult{
|
||||
c: CodeFail,
|
||||
}
|
||||
|
||||
rIgnore = &FilterResult{
|
||||
c: CodeIgnore,
|
||||
}
|
||||
|
||||
rUndefined = &FilterResult{
|
||||
c: CodeUndefined,
|
||||
}
|
||||
)
|
||||
|
||||
// ResultPass returns the FilterResult with CodePass code and nil error.
|
||||
func ResultPass() *FilterResult {
|
||||
return rPass
|
||||
}
|
||||
|
||||
// ResultFail returns the FilterResult with CodeFail code and nil error.
|
||||
func ResultFail() *FilterResult {
|
||||
return rFail
|
||||
}
|
||||
|
||||
// ResultIgnore returns the FilterResult with CodeIgnore code and nil error.
|
||||
func ResultIgnore() *FilterResult {
|
||||
return rIgnore
|
||||
}
|
||||
|
||||
// ResultUndefined returns the FilterResult with CodeUndefined code and nil error.
|
||||
func ResultUndefined() *FilterResult {
|
||||
return rUndefined
|
||||
}
|
||||
|
||||
// ResultWithError returns the FilterResult with passed code and error.
|
||||
func ResultWithError(c FilterCode, e error) *FilterResult {
|
||||
return &FilterResult{
|
||||
e: e,
|
||||
c: c,
|
||||
}
|
||||
}
|
||||
|
||||
// Code returns the filter result code.
|
||||
func (s *FilterResult) Code() FilterCode {
|
||||
return s.c
|
||||
}
|
||||
|
||||
// Err returns the filter result error.
|
||||
func (s *FilterResult) Err() error {
|
||||
return s.e
|
||||
}
|
||||
|
||||
func (f filterPipelineSet) Len() int { return len(f) }
|
||||
func (f filterPipelineSet) Less(i, j int) bool { return f[i].GetPriority() > f[j].GetPriority() }
|
||||
func (f filterPipelineSet) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
|
||||
|
||||
func (r FilterCode) String() string {
|
||||
switch r {
|
||||
case CodePass:
|
||||
return "PASSED"
|
||||
case CodeFail:
|
||||
return "FAILED"
|
||||
case CodeIgnore:
|
||||
return "IGNORED"
|
||||
default:
|
||||
return "UNDEFINED"
|
||||
}
|
||||
}
|
||||
|
||||
// NewFilter is a FilterPipeline constructor.
|
||||
func NewFilter(p *FilterParams) FilterPipeline {
|
||||
return &filterPipeline{
|
||||
RWMutex: new(sync.RWMutex),
|
||||
name: p.Name,
|
||||
pri: p.Priority,
|
||||
filterFn: p.FilterFunc,
|
||||
mSubResult: make(map[string]map[FilterCode]FilterCode),
|
||||
}
|
||||
}
|
||||
|
||||
// AllPassIncludingFilter returns FilterPipeline with sub-filters composed from parameters.
|
||||
// Result filter fails with CodeFail code if any of the sub-filters returns not a CodePass code.
|
||||
func AllPassIncludingFilter(name string, params ...*FilterParams) (FilterPipeline, error) {
|
||||
res := NewFilter(&FilterParams{
|
||||
Name: name,
|
||||
FilterFunc: SkippingFilterFunc,
|
||||
})
|
||||
|
||||
for i := range params {
|
||||
if err := res.PutSubFilter(SubFilterParams{
|
||||
FilterPipeline: NewFilter(params[i]),
|
||||
OnIgnore: CodeFail,
|
||||
OnFail: CodeFail,
|
||||
}); err != nil {
|
||||
return nil, errors.Wrap(err, "could not create all pass including filter")
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (p *filterPipeline) Pass(ctx context.Context, meta *ObjectMeta) *FilterResult {
|
||||
p.RLock()
|
||||
defer p.RUnlock()
|
||||
|
||||
for i := range p.subFilters {
|
||||
subResult := p.subFilters[i].Pass(ctx, meta)
|
||||
subName := p.subFilters[i].GetName()
|
||||
|
||||
cSub := subResult.Code()
|
||||
|
||||
if cSub <= CodeUndefined {
|
||||
return ResultUndefined()
|
||||
}
|
||||
|
||||
if cFin := p.mSubResult[subName][cSub]; cFin != CodeIgnore {
|
||||
return ResultWithError(cFin, subResult.Err())
|
||||
}
|
||||
}
|
||||
|
||||
if p.filterFn == nil {
|
||||
return ResultUndefined()
|
||||
}
|
||||
|
||||
return p.filterFn(ctx, meta)
|
||||
}
|
||||
|
||||
func (p *filterPipeline) PutSubFilter(params SubFilterParams) error {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
if params.FilterPipeline == nil {
|
||||
return internal.Error("could not put sub filter: empty filter pipeline")
|
||||
}
|
||||
|
||||
name := params.FilterPipeline.GetName()
|
||||
if _, ok := p.mSubResult[name]; ok {
|
||||
return errors.Errorf("filter %s is already in pipeline %s", name, p.GetName())
|
||||
}
|
||||
|
||||
if params.PriorityFlag != PriorityMin {
|
||||
if pri := params.FilterPipeline.GetPriority(); pri < math.MaxUint64 {
|
||||
params.FilterPipeline.SetPriority(pri + 1)
|
||||
}
|
||||
} else {
|
||||
params.FilterPipeline.SetPriority(0)
|
||||
}
|
||||
|
||||
switch pri := params.FilterPipeline.GetPriority(); params.PriorityFlag {
|
||||
case PriorityMax:
|
||||
if p.maxSubPri < math.MaxUint64 {
|
||||
p.maxSubPri++
|
||||
}
|
||||
|
||||
params.FilterPipeline.SetPriority(p.maxSubPri)
|
||||
case PriorityValue:
|
||||
if pri > p.maxSubPri {
|
||||
p.maxSubPri = pri
|
||||
}
|
||||
}
|
||||
|
||||
if params.OnFail <= 0 {
|
||||
params.OnFail = CodeIgnore
|
||||
}
|
||||
|
||||
if params.OnIgnore <= 0 {
|
||||
params.OnIgnore = CodeIgnore
|
||||
}
|
||||
|
||||
if params.OnPass <= 0 {
|
||||
params.OnPass = CodeIgnore
|
||||
}
|
||||
|
||||
p.mSubResult[name] = map[FilterCode]FilterCode{
|
||||
CodePass: params.OnPass,
|
||||
CodeIgnore: params.OnIgnore,
|
||||
CodeFail: params.OnFail,
|
||||
}
|
||||
|
||||
p.subFilters = append(p.subFilters, params.FilterPipeline)
|
||||
|
||||
sort.Sort(filterPipelineSet(p.subFilters))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *filterPipeline) GetPriority() uint64 {
|
||||
p.RLock()
|
||||
defer p.RUnlock()
|
||||
|
||||
return p.pri
|
||||
}
|
||||
func (p *filterPipeline) SetPriority(pri uint64) {
|
||||
p.Lock()
|
||||
p.pri = pri
|
||||
p.Unlock()
|
||||
}
|
||||
|
||||
func (p *filterPipeline) GetName() string {
|
||||
p.RLock()
|
||||
defer p.RUnlock()
|
||||
|
||||
if p.name == "" {
|
||||
return "FILTER_UNNAMED"
|
||||
}
|
||||
|
||||
return p.name
|
||||
}
|
39
lib/localstore/filter_funcs.go
Normal file
39
lib/localstore/filter_funcs.go
Normal file
|
@ -0,0 +1,39 @@
|
|||
package localstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// SkippingFilterFunc is a FilterFunc that always returns result with
|
||||
// CodePass code and nil error.
|
||||
func SkippingFilterFunc(_ context.Context, _ *ObjectMeta) *FilterResult {
|
||||
return ResultPass()
|
||||
}
|
||||
|
||||
// ContainerFilterFunc returns a FilterFunc that returns:
|
||||
// - result with CodePass code and nil error if CID of ObjectMeta if from the CID list;
|
||||
// - result with CodeFail code an nil error otherwise.
|
||||
func ContainerFilterFunc(cidList []CID) FilterFunc {
|
||||
return func(_ context.Context, meta *ObjectMeta) *FilterResult {
|
||||
for i := range cidList {
|
||||
if meta.Object.SystemHeader.CID.Equal(cidList[i]) {
|
||||
return ResultPass()
|
||||
}
|
||||
}
|
||||
|
||||
return ResultFail()
|
||||
}
|
||||
}
|
||||
|
||||
// StoredEarlierThanFilterFunc returns a FilterFunc that returns:
|
||||
// - result with CodePass code and nil error if StoreEpoch is less that argument;
|
||||
// - result with CodeFail code and nil error otherwise.
|
||||
func StoredEarlierThanFilterFunc(epoch uint64) FilterFunc {
|
||||
return func(_ context.Context, meta *ObjectMeta) *FilterResult {
|
||||
if meta.StoreEpoch < epoch {
|
||||
return ResultPass()
|
||||
}
|
||||
|
||||
return ResultFail()
|
||||
}
|
||||
}
|
38
lib/localstore/filter_test.go
Normal file
38
lib/localstore/filter_test.go
Normal file
|
@ -0,0 +1,38 @@
|
|||
package localstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/nspcc-dev/neofs-node/internal"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSkippingFilterFunc(t *testing.T) {
|
||||
res := SkippingFilterFunc(context.TODO(), &ObjectMeta{})
|
||||
require.Equal(t, CodePass, res.Code())
|
||||
}
|
||||
|
||||
func TestFilterResult(t *testing.T) {
|
||||
var (
|
||||
r *FilterResult
|
||||
c = CodePass
|
||||
e = internal.Error("test error")
|
||||
)
|
||||
|
||||
r = ResultPass()
|
||||
require.Equal(t, CodePass, r.Code())
|
||||
require.NoError(t, r.Err())
|
||||
|
||||
r = ResultFail()
|
||||
require.Equal(t, CodeFail, r.Code())
|
||||
require.NoError(t, r.Err())
|
||||
|
||||
r = ResultIgnore()
|
||||
require.Equal(t, CodeIgnore, r.Code())
|
||||
require.NoError(t, r.Err())
|
||||
|
||||
r = ResultWithError(c, e)
|
||||
require.Equal(t, c, r.Code())
|
||||
require.EqualError(t, r.Err(), e.Error())
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue