Compare commits

..

7 commits

Author SHA1 Message Date
Evgenii Stratonikov
1231db87b5 do fix price trick
Signed-off-by: Evgenii Stratonikov <evgeniy@morphbits.ru>
2022-08-18 11:27:15 +03:00
Evgenii Stratonikov
9a91ec4c5b m
Signed-off-by: Evgenii Stratonikov <evgeniy@morphbits.ru>
2022-08-18 11:20:25 +03:00
Evgenii Stratonikov
bfabbb7968 renew neofs TLD
Signed-off-by: Evgenii Stratonikov <evgeniy@morphbits.ru>
2022-08-17 19:22:28 +03:00
Evgenii Stratonikov
f9ff3adef2 neofs-adm: remove neofs contract
Signed-off-by: Evgenii Stratonikov <evgeniy@morphbits.ru>
2022-08-17 19:01:31 +03:00
Evgenii Stratonikov
44bf2e8d06 neofs-adm: set hardcode nns hashes
Signed-off-by: Evgenii Stratonikov <evgeniy@morphbits.ru>
2022-08-17 18:30:50 +03:00
Alex Vanin
bd3f43cdeb Disable things in notary disabled environment
Signed-off-by: Alex Vanin <alexey@nspcc.ru>
2022-08-17 18:30:49 +03:00
Alex Vanin
a01b05a09c Disable things in notary disabled environment
Signed-off-by: Alex Vanin <alexey@nspcc.ru>
2022-08-17 18:29:34 +03:00
1093 changed files with 16402 additions and 31360 deletions

View file

@ -1,19 +1,19 @@
FROM golang:1.18 as builder
FROM golang:1.17 as builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository
WORKDIR /src
COPY . /src
RUN make bin/frostfs-adm
RUN make bin/neofs-adm
# Executable image
FROM alpine AS frostfs-adm
FROM alpine AS neofs-adm
RUN apk add --no-cache bash
WORKDIR /
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
COPY --from=builder /src/bin/frostfs-adm /bin/frostfs-adm
COPY --from=builder /src/bin/neofs-adm /bin/neofs-adm
CMD ["frostfs-adm"]
CMD ["neofs-adm"]

View file

@ -1,25 +0,0 @@
FROM golang:1.19
WORKDIR /tmp
# Install apt packages
RUN apt-get update && apt-get install --no-install-recommends -y \
pip \
&& apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false \
&& rm -rf /var/lib/apt/lists/*
# Dash → Bash
RUN echo "dash dash/sh boolean false" | debconf-set-selections
RUN DEBIAN_FRONTEND=noninteractive dpkg-reconfigure dash
RUN useradd -u 1234 -d /home/ci -m ci
USER ci
ENV PATH="$PATH:/home/ci/.local/bin"
COPY .pre-commit-config.yaml .
RUN pip install "pre-commit==3.1.1" \
&& git init . \
&& pre-commit install-hooks \
&& rm -rf /tmp/*

View file

@ -1,19 +1,19 @@
FROM golang:1.18 as builder
FROM golang:1.17 as builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository
WORKDIR /src
COPY . /src
RUN make bin/frostfs-cli
RUN make bin/neofs-cli
# Executable image
FROM alpine AS frostfs-cli
FROM alpine AS neofs-cli
RUN apk add --no-cache bash
WORKDIR /
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
COPY --from=builder /src/bin/frostfs-cli /bin/frostfs-cli
COPY --from=builder /src/bin/neofs-cli /bin/neofs-cli
CMD ["frostfs-cli"]
CMD ["neofs-cli"]

View file

@ -3,6 +3,6 @@ RUN apk add --no-cache bash ca-certificates
WORKDIR /
COPY bin/frostfs-adm /bin/frostfs-adm
COPY bin/neofs-adm /bin/neofs-adm
CMD ["frostfs-adm"]
CMD ["neofs-adm"]

View file

@ -3,6 +3,6 @@ RUN apk add --no-cache bash ca-certificates
WORKDIR /
COPY bin/frostfs-cli /bin/frostfs-cli
COPY bin/neofs-cli /bin/neofs-cli
CMD ["frostfs-cli"]
CMD ["neofs-cli"]

View file

@ -3,6 +3,6 @@ RUN apk add --no-cache bash ca-certificates
WORKDIR /
COPY bin/frostfs-ir /bin/frostfs-ir
COPY bin/neofs-ir /bin/neofs-ir
CMD ["frostfs-ir"]
CMD ["neofs-ir"]

View file

@ -3,6 +3,6 @@ RUN apk add --no-cache bash ca-certificates
WORKDIR /
COPY bin/frostfs-node /bin/frostfs-node
COPY bin/neofs-node /bin/neofs-node
CMD ["frostfs-node"]
CMD ["neofs-node"]

View file

@ -1,18 +1,18 @@
FROM golang:1.18 as builder
FROM golang:1.17 as builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository
WORKDIR /src
COPY . /src
RUN make bin/frostfs-ir
RUN make bin/neofs-ir
# Executable image
FROM alpine AS frostfs-ir
FROM alpine AS neofs-ir
RUN apk add --no-cache bash
WORKDIR /
COPY --from=builder /src/bin/frostfs-ir /bin/frostfs-ir
COPY --from=builder /src/bin/neofs-ir /bin/neofs-ir
CMD ["frostfs-ir"]
CMD ["neofs-ir"]

View file

@ -1,18 +1,18 @@
FROM golang:1.18 as builder
FROM golang:1.17 as builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository
WORKDIR /src
COPY . /src
RUN make bin/frostfs-node
RUN make bin/neofs-node
# Executable image
FROM alpine AS frostfs-node
FROM alpine AS neofs-node
RUN apk add --no-cache bash
WORKDIR /
COPY --from=builder /src/bin/frostfs-node /bin/frostfs-node
COPY --from=builder /src/bin/neofs-node /bin/neofs-node
CMD ["frostfs-node"]
CMD ["neofs-node"]

View file

@ -1,19 +1,19 @@
FROM golang:1.18 as builder
FROM golang:1.17 as builder
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository
WORKDIR /src
COPY . /src
RUN make bin/frostfs-node
RUN make bin/neofs-node
# Executable image
FROM alpine AS frostfs-node
FROM alpine AS neofs-node
RUN apk add --no-cache bash
WORKDIR /
COPY --from=builder /src/bin/frostfs-node /bin/frostfs-node
COPY --from=builder /src/bin/neofs-node /bin/neofs-node
COPY --from=builder /src/config/testnet/config.yml /config.yml
CMD ["frostfs-node", "--config", "/config.yml"]
CMD ["neofs-node", "--config", "/config.yml"]

View file

@ -5,5 +5,4 @@ docker-compose.yml
Dockerfile
temp
.dockerignore
docker
.cache
docker

View file

@ -41,10 +41,3 @@ assignees: ''
* Version used:
* Server setup and configuration:
* Operating System and version (`uname -a`):
## Don't forget to add labels!
- component label (`frostfs-adm`, `frostfs-storage`, ...)
- `goodfirstissue`, `helpwanted` if needed
- does this issue belong to an epic?
- priority (`P0`-`P4`) if already triaged
- quarter label (`202XQY`) if possible

View file

@ -18,11 +18,3 @@ assignees: ''
## Additional context
<!-- Add any other context or screenshots about the feature request here. -->
## Don't forget to add labels!
- component label (`neofs-adm`, `neofs-storage`, ...)
- issue type (`enhancement`, `refactor`, ...)
- `goodfirstissue`, `helpwanted` if needed
- does this issue belong to an epic?
- priority (`P0`-`P4`) if already triaged
- quarter label (`202XQY`) if possible

197
.github/logo.svg vendored
View file

@ -1,70 +1,129 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 25.0.1, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<svg version="1.1" id="Слой_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
viewBox="0 0 184.2 51.8" style="enable-background:new 0 0 184.2 51.8;" xml:space="preserve">
<style type="text/css">
.st0{display:none;}
.st1{display:inline;}
.st2{fill:#01E397;}
.st3{display:inline;fill:#010032;}
.st4{display:inline;fill:#00E599;}
.st5{display:inline;fill:#00AF92;}
.st6{fill:#00C3E5;}
</style>
<g id="Layer_2">
<g id="Layer_1-2" class="st0">
<g class="st1">
<path class="st2" d="M146.6,18.3v7.2h10.9V29h-10.9v10.7h-4V14.8h18v3.5H146.6z"/>
<path class="st2" d="M180,15.7c1.7,0.9,3,2.2,4,3.8l-3,2.7c-0.6-1.3-1.5-2.4-2.6-3.3c-1.3-0.7-2.8-1-4.3-1
c-1.4-0.1-2.8,0.3-4,1.1c-0.9,0.5-1.5,1.5-1.4,2.6c0,1,0.5,1.9,1.4,2.4c1.5,0.8,3.2,1.3,4.9,1.5c1.9,0.3,3.7,0.8,5.4,1.6
c1.2,0.5,2.2,1.3,2.9,2.3c0.6,1,1,2.2,0.9,3.4c0,1.4-0.5,2.7-1.3,3.8c-0.9,1.2-2.1,2.1-3.5,2.6c-1.7,0.6-3.4,0.9-5.2,0.8
c-5,0-8.6-1.6-10.7-5l2.9-2.8c0.7,1.4,1.8,2.5,3.1,3.3c1.5,0.7,3.1,1.1,4.7,1c1.5,0.1,2.9-0.2,4.2-0.9c0.9-0.5,1.5-1.5,1.5-2.6
c0-0.9-0.5-1.8-1.3-2.2c-1.5-0.7-3.1-1.2-4.8-1.5c-1.9-0.3-3.7-0.8-5.5-1.5c-1.2-0.5-2.2-1.4-3-2.4c-0.6-1-1-2.2-0.9-3.4
c0-1.4,0.4-2.7,1.2-3.8c0.8-1.2,2-2.2,3.3-2.8c1.6-0.7,3.4-1.1,5.2-1C176.1,14.3,178.2,14.8,180,15.7z"/>
</g>
<path class="st3" d="M73.3,16.3c1.9,1.9,2.9,4.5,2.7,7.1v15.9h-4V24.8c0-2.6-0.5-4.5-1.6-5.7c-1.2-1.2-2.8-1.8-4.5-1.7
c-1.3,0-2.5,0.3-3.7,0.8c-1.2,0.7-2.2,1.7-2.9,2.9c-0.8,1.5-1.1,3.2-1.1,4.9v13.3h-4V15.1l3.6,1.5v1.7c0.8-1.5,2.1-2.6,3.6-3.3
c1.5-0.8,3.2-1.2,4.9-1.1C68.9,13.8,71.3,14.7,73.3,16.3z"/>
<path class="st3" d="M104.4,28.3H85.6c0.1,2.2,1,4.3,2.5,5.9c1.5,1.4,3.5,2.2,5.6,2.1c1.6,0.1,3.2-0.2,4.6-0.9
c1.1-0.6,2-1.6,2.5-2.8l3.3,1.8c-0.9,1.7-2.3,3.1-4,4c-2,1-4.2,1.5-6.4,1.4c-3.7,0-6.7-1.1-8.8-3.4s-3.2-5.5-3.2-9.6s1-7.2,3-9.5
s5-3.4,8.7-3.4c2.1-0.1,4.2,0.5,6.1,1.5c1.6,1,3,2.5,3.8,4.2c0.9,1.8,1.3,3.9,1.3,5.9C104.6,26.4,104.6,27.4,104.4,28.3z
M88.1,19.3c-1.4,1.5-2.2,3.4-2.4,5.5h15.1c-0.2-2-1-3.9-2.3-5.5c-1.4-1.3-3.2-2-5.1-1.9C91.5,17.3,89.6,18,88.1,19.3z"/>
<path class="st3" d="M131,17.3c2.2,2.3,3.2,5.5,3.2,9.5s-1,7.3-3.2,9.6s-5.1,3.4-8.8,3.4s-6.7-1.1-8.9-3.4s-3.2-5.5-3.2-9.6
s1.1-7.2,3.2-9.5s5.1-3.4,8.9-3.4S128.9,15,131,17.3z M116.2,19.9c-1.5,2-2.2,4.4-2.1,6.9c-0.2,2.5,0.6,5,2.1,7
c1.5,1.7,3.7,2.7,6,2.6c2.3,0.1,4.4-0.9,5.9-2.6c1.5-2,2.3-4.5,2.1-7c0.1-2.5-0.6-4.9-2.1-6.9c-1.5-1.7-3.6-2.7-5.9-2.6
C119.9,17.2,117.7,18.2,116.2,19.9z"/>
<polygon class="st4" points="0,9.1 0,43.7 22.5,51.8 22.5,16.9 46.8,7.9 24.8,0 "/>
<polygon class="st5" points="24.3,17.9 24.3,36.8 46.8,44.9 46.8,9.6 "/>
</g>
<g>
<g>
<path class="st6" d="M41.6,17.5H28.2v6.9h10.4v3.3H28.2v10.2h-3.9V14.2h17.2V17.5z"/>
<path class="st6" d="M45.8,37.9v-18h3.3l0.4,3.2c0.5-1.2,1.2-2.1,2.1-2.7c0.9-0.6,2.1-0.9,3.5-0.9c0.4,0,0.7,0,1.1,0.1
c0.4,0.1,0.7,0.2,0.9,0.3l-0.5,3.4c-0.3-0.1-0.6-0.2-0.9-0.2C55.4,23,54.9,23,54.4,23c-0.7,0-1.5,0.2-2.2,0.6
c-0.7,0.4-1.3,1-1.8,1.8s-0.7,1.8-0.7,3v9.5H45.8z"/>
<path class="st6" d="M68.6,19.6c1.8,0,3.3,0.4,4.6,1.1c1.3,0.7,2.4,1.8,3.1,3.2s1.1,3.1,1.1,5c0,1.9-0.4,3.6-1.1,5
c-0.8,1.4-1.8,2.5-3.1,3.2c-1.3,0.7-2.9,1.1-4.6,1.1s-3.3-0.4-4.6-1.1c-1.3-0.7-2.4-1.8-3.2-3.2c-0.8-1.4-1.2-3.1-1.2-5
c0-1.9,0.4-3.6,1.2-5s1.8-2.5,3.2-3.2C65.3,19.9,66.8,19.6,68.6,19.6z M68.6,22.6c-1.1,0-2,0.2-2.8,0.7c-0.8,0.5-1.3,1.2-1.7,2.1
s-0.6,2.1-0.6,3.5c0,1.3,0.2,2.5,0.6,3.4s1,1.7,1.7,2.2s1.7,0.7,2.8,0.7c1.1,0,2-0.2,2.7-0.7c0.7-0.5,1.3-1.2,1.7-2.2
s0.6-2.1,0.6-3.4c0-1.4-0.2-2.5-0.6-3.5s-1-1.6-1.7-2.1C70.6,22.8,69.6,22.6,68.6,22.6z"/>
<path class="st6" d="M89.2,38.3c-1.8,0-3.4-0.3-4.9-1c-1.5-0.7-2.7-1.7-3.5-3l2.7-2.3c0.5,1,1.3,1.8,2.3,2.4
c1,0.6,2.2,0.9,3.6,0.9c1.1,0,2-0.2,2.6-0.6c0.6-0.4,1-0.9,1-1.6c0-0.5-0.2-0.9-0.5-1.2s-0.9-0.6-1.7-0.8l-3.8-0.8
c-1.9-0.4-3.3-1-4.1-1.9c-0.8-0.9-1.2-1.9-1.2-3.3c0-1,0.3-1.9,0.9-2.7c0.6-0.8,1.4-1.5,2.5-2s2.5-0.8,4-0.8c1.8,0,3.3,0.3,4.6,1
c1.3,0.6,2.2,1.5,2.9,2.7l-2.7,2.2c-0.5-1-1.1-1.7-2-2.1c-0.9-0.5-1.8-0.7-2.8-0.7c-0.8,0-1.4,0.1-2,0.3c-0.6,0.2-1,0.5-1.3,0.8
c-0.3,0.3-0.4,0.7-0.4,1.2c0,0.5,0.2,0.9,0.5,1.3s1,0.6,1.9,0.8l4.1,0.9c1.7,0.3,2.9,0.9,3.7,1.7c0.7,0.8,1.1,1.8,1.1,2.9
c0,1.2-0.3,2.2-0.9,3c-0.6,0.9-1.5,1.6-2.6,2C92.1,38.1,90.7,38.3,89.2,38.3z"/>
<path class="st6" d="M112.8,19.9v3H99.3v-3H112.8z M106.6,14.6v17.9c0,0.9,0.2,1.5,0.7,1.9c0.5,0.4,1.1,0.6,1.9,0.6
c0.6,0,1.2-0.1,1.7-0.3c0.5-0.2,0.9-0.5,1.3-0.8l0.9,2.8c-0.6,0.5-1.2,0.9-2,1.1c-0.8,0.3-1.7,0.4-2.7,0.4c-1,0-2-0.2-2.8-0.5
s-1.5-0.9-2-1.6c-0.5-0.8-0.7-1.7-0.8-3V15.7L106.6,14.6z"/>
<path d="M137.9,17.5h-13.3v6.9h10.4v3.3h-10.4v10.2h-3.9V14.2h17.2V17.5z"/>
<path d="M150.9,13.8c2.1,0,4,0.4,5.5,1.2c1.6,0.8,2.9,2,4,3.5l-2.6,2.5c-0.9-1.4-1.9-2.4-3.1-3c-1.1-0.6-2.5-0.9-4-0.9
c-1.2,0-2.1,0.2-2.8,0.5c-0.7,0.3-1.3,0.7-1.6,1.2c-0.3,0.5-0.5,1.1-0.5,1.7c0,0.7,0.3,1.4,0.8,1.9c0.5,0.6,1.5,1,2.9,1.3
l4.8,1.1c2.3,0.5,3.9,1.3,4.9,2.3c1,1,1.4,2.3,1.4,3.9c0,1.5-0.4,2.7-1.2,3.8c-0.8,1.1-1.9,1.9-3.3,2.5s-3.1,0.9-5,0.9
c-1.7,0-3.2-0.2-4.5-0.6c-1.3-0.4-2.5-1-3.5-1.8c-1-0.7-1.8-1.6-2.5-2.6l2.7-2.7c0.5,0.8,1.1,1.6,1.9,2.2
c0.8,0.7,1.7,1.2,2.7,1.5c1,0.4,2.2,0.5,3.4,0.5c1.1,0,2.1-0.1,2.9-0.4c0.8-0.3,1.4-0.7,1.8-1.2c0.4-0.5,0.6-1.1,0.6-1.9
c0-0.7-0.2-1.3-0.7-1.8c-0.5-0.5-1.3-0.9-2.6-1.2l-5.2-1.2c-1.4-0.3-2.6-0.8-3.6-1.3c-0.9-0.6-1.6-1.3-2.1-2.1s-0.7-1.8-0.7-2.8
c0-1.3,0.4-2.6,1.1-3.7c0.7-1.1,1.8-2,3.2-2.6C147.3,14.1,148.9,13.8,150.9,13.8z"/>
</g>
</g>
</g>
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
sodipodi:docname="logo_fs.svg"
inkscape:version="1.0 (4035a4fb49, 2020-05-01)"
id="svg57"
version="1.1"
viewBox="0 0 105 25"
height="25mm"
width="105mm">
<defs
id="defs51">
<clipPath
clipPathUnits="userSpaceOnUse"
id="clipPath434">
<path
d="M 0,0 H 1366 V 768 H 0 Z"
id="path432" />
</clipPath>
</defs>
<sodipodi:namedview
inkscape:window-maximized="0"
inkscape:window-y="0"
inkscape:window-x="130"
inkscape:window-height="1040"
inkscape:window-width="1274"
height="50mm"
units="mm"
showgrid="false"
inkscape:document-rotation="0"
inkscape:current-layer="layer1"
inkscape:document-units="mm"
inkscape:cy="344.49897"
inkscape:cx="468.64708"
inkscape:zoom="0.7"
inkscape:pageshadow="2"
inkscape:pageopacity="0.0"
borderopacity="1.0"
bordercolor="#666666"
pagecolor="#ffffff"
id="base" />
<metadata
id="metadata54">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>
</metadata>
<g
id="layer1"
inkscape:groupmode="layer"
inkscape:label="Layer 1">
<g
id="g424"
transform="matrix(0.35277777,0,0,-0.35277777,63.946468,10.194047)">
<path
d="m 0,0 v -8.093 h 12.287 v -3.94 H 0 V -24.067 H -4.534 V 3.898 H 15.677 V 0 Z"
style="fill:#00e396;fill-opacity:1;fill-rule:nonzero;stroke:none"
id="path426" />
</g>
<g
transform="matrix(0.35277777,0,0,-0.35277777,-315.43002,107.34005)"
id="g428">
<g
id="g430"
clip-path="url(#clipPath434)">
<g
id="g436"
transform="translate(1112.874,278.2981)">
<path
d="M 0,0 C 1.822,-0.932 3.354,-2.359 4.597,-4.28 L 1.165,-7.373 c -0.791,1.695 -1.779,2.924 -2.966,3.686 -1.186,0.763 -2.768,1.145 -4.745,1.145 -1.949,0 -3.461,-0.389 -4.534,-1.166 -1.074,-0.777 -1.61,-1.772 -1.61,-2.987 0,-1.13 0.523,-2.027 1.568,-2.69 1.045,-0.664 2.909,-1.236 5.593,-1.716 2.514,-0.452 4.512,-1.024 5.995,-1.716 1.483,-0.693 2.564,-1.554 3.242,-2.585 0.677,-1.031 1.016,-2.309 1.016,-3.834 0,-1.639 -0.466,-3.079 -1.398,-4.322 -0.932,-1.243 -2.239,-2.197 -3.919,-2.86 -1.681,-0.664 -3.623,-0.996 -5.826,-0.996 -5.678,0 -9.689,1.892 -12.033,5.678 l 3.178,3.178 c 0.903,-1.695 2.068,-2.939 3.495,-3.729 1.426,-0.791 3.199,-1.186 5.318,-1.186 2.005,0 3.58,0.345 4.724,1.038 1.144,0.692 1.716,1.674 1.716,2.945 0,1.017 -0.516,1.835 -1.547,2.457 -1.031,0.621 -2.832,1.172 -5.402,1.653 -2.571,0.479 -4.618,1.073 -6.143,1.779 -1.526,0.706 -2.635,1.582 -3.326,2.627 -0.693,1.045 -1.039,2.316 -1.039,3.813 0,1.582 0.438,3.023 1.314,4.322 0.875,1.299 2.14,2.33 3.792,3.093 1.653,0.763 3.58,1.144 5.783,1.144 C -4.018,1.398 -1.822,0.932 0,0"
style="fill:#00e396;fill-opacity:1;fill-rule:nonzero;stroke:none"
id="path438" />
</g>
<g
id="g440"
transform="translate(993.0239,277.5454)">
<path
d="m 0,0 c 2.054,-1.831 3.083,-4.465 3.083,-7.902 v -17.935 h -4.484 v 16.366 c 0,2.914 -0.626,5.024 -1.877,6.332 -1.253,1.308 -2.924,1.962 -5.016,1.962 -1.495,0 -2.896,-0.327 -4.204,-0.981 -1.308,-0.654 -2.381,-1.719 -3.222,-3.194 -0.841,-1.477 -1.261,-3.335 -1.261,-5.576 v -14.909 h -4.484 V 1.328 l 4.086,-1.674 0.118,-1.84 c 0.933,1.681 2.222,2.923 3.867,3.727 1.643,0.803 3.493,1.205 5.548,1.205 C -4.671,2.746 -2.055,1.83 0,0"
style="fill:#000033;fill-opacity:1;fill-rule:nonzero;stroke:none"
id="path442" />
</g>
<g
id="g444"
transform="translate(1027.9968,264.0386)">
<path
d="m 0,0 h -21.128 c 0.261,-2.84 1.205,-5.044 2.83,-6.613 1.625,-1.57 3.727,-2.355 6.305,-2.355 2.054,0 3.763,0.356 5.128,1.065 1.363,0.71 2.288,1.738 2.774,3.083 l 3.755,-1.961 c -1.121,-1.981 -2.616,-3.495 -4.484,-4.54 -1.868,-1.046 -4.259,-1.569 -7.173,-1.569 -4.223,0 -7.538,1.289 -9.948,3.867 -2.41,2.578 -3.615,6.146 -3.615,10.704 0,4.558 1.149,8.127 3.447,10.705 2.298,2.578 5.557,3.867 9.779,3.867 2.615,0 4.876,-0.58 6.782,-1.738 1.905,-1.158 3.343,-2.728 4.315,-4.707 C -0.262,7.827 0.224,5.605 0.224,3.139 0.224,2.092 0.149,1.046 0,0 m -18.298,10.144 c -1.513,-1.457 -2.438,-3.512 -2.775,-6.165 h 16.982 c -0.3,2.615 -1.159,4.661 -2.578,6.137 -1.42,1.476 -3.307,2.214 -5.661,2.214 -2.466,0 -4.455,-0.728 -5.968,-2.186"
style="fill:#000033;fill-opacity:1;fill-rule:nonzero;stroke:none"
id="path446" />
</g>
<g
id="g448"
transform="translate(1057.8818,276.4246)">
<path
d="m 0,0 c 2.41,-2.578 3.615,-6.147 3.615,-10.705 0,-4.558 -1.205,-8.126 -3.615,-10.704 -2.41,-2.578 -5.726,-3.867 -9.948,-3.867 -4.222,0 -7.537,1.289 -9.947,3.867 -2.41,2.578 -3.615,6.146 -3.615,10.704 0,4.558 1.205,8.127 3.615,10.705 2.41,2.578 5.725,3.867 9.947,3.867 C -5.726,3.867 -2.41,2.578 0,0 m -16.617,-2.858 c -1.607,-1.906 -2.41,-4.522 -2.41,-7.847 0,-3.326 0.803,-5.94 2.41,-7.846 1.607,-1.905 3.83,-2.858 6.669,-2.858 2.839,0 5.063,0.953 6.67,2.858 1.606,1.906 2.41,4.52 2.41,7.846 0,3.325 -0.804,5.941 -2.41,7.847 C -4.885,-0.953 -7.109,0 -9.948,0 c -2.839,0 -5.062,-0.953 -6.669,-2.858"
style="fill:#000033;fill-opacity:1;fill-rule:nonzero;stroke:none"
id="path450" />
</g>
</g>
</g>
<g
id="g452"
transform="matrix(0.35277777,0,0,-0.35277777,5.8329581,6.5590171)">
<path
d="m 0,0 0.001,-38.946 25.286,-9.076 V -8.753 L 52.626,1.321 27.815,10.207 Z"
style="fill:#00e599;fill-opacity:1;fill-rule:nonzero;stroke:none"
id="path454" />
</g>
<g
id="g456"
transform="matrix(0.35277777,0,0,-0.35277777,15.479008,10.041927)">
<path
d="M 0,0 V -21.306 L 25.293,-30.364 25.282,9.347 Z"
style="fill:#00b091;fill-opacity:1;fill-rule:nonzero;stroke:none"
id="path458" />
</g>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 5.5 KiB

After

Width:  |  Height:  |  Size: 6.5 KiB

21
.github/workflows/dco.yml vendored Normal file
View file

@ -0,0 +1,21 @@
name: DCO check
on:
pull_request:
branches:
- master
jobs:
commits_check_job:
runs-on: ubuntu-latest
name: Commits Check
steps:
- name: Get PR Commits
id: 'get-pr-commits'
uses: tim-actions/get-pr-commits@master
with:
token: ${{ secrets.GITHUB_TOKEN }}
- name: DCO Check
uses: tim-actions/dco@master
with:
commits: ${{ steps.get-pr-commits.outputs.commits }}

58
.github/workflows/go.yml vendored Normal file
View file

@ -0,0 +1,58 @@
name: neofs-node tests
on:
push:
branches:
- master
paths-ignore:
- '*.md'
pull_request:
branches:
- master
paths-ignore:
- '*.md'
jobs:
test:
runs-on: ubuntu-20.04
strategy:
matrix:
go: [ '1.17.x', '1.18.x' ]
steps:
- name: Setup go
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go }}
- name: Check out code
uses: actions/checkout@v2
- name: Cache go mod
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ matrix.go }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-${{ matrix.go }}-
- name: Run go test
run: go test -coverprofile=coverage.txt -covermode=atomic ./...
- name: Codecov
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
run: bash <(curl -s https://codecov.io/bash)
lint:
runs-on: ubuntu-20.04
steps:
- uses: actions/setup-go@v3
with:
go-version: 1.19
- uses: actions/checkout@v3
- name: golangci-lint
uses: golangci/golangci-lint-action@v3
with:
version: v1.48.0
args: --timeout=5m
only-new-issues: true

21
.gitignore vendored
View file

@ -28,24 +28,3 @@ testfile
# misc
.neofs-cli.yml
# debhelpers
debian/*debhelper*
# logfiles
debian/*.log
# .substvars
debian/*.substvars
# .bash-completion
debian/*.bash-completion
# Install folders and files
debian/frostfs-cli/
debian/frostfs-ir/
debian/files
debian/frostfs-storage/
debian/changelog
man/
debs/

View file

@ -1,11 +0,0 @@
[general]
fail-without-commits=true
regex-style-search=true
contrib=CC1
[title-match-regex]
regex=^\[\#[0-9X]+\]\s
[ignore-by-title]
regex=^Release(.*)
ignore=title-match-regex

View file

@ -4,7 +4,7 @@
# options for analysis running
run:
# timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 10m
timeout: 5m
# include test files or not, default is true
tests: false
@ -24,13 +24,6 @@ linters-settings:
govet:
# report about shadowed variables
check-shadowing: false
staticcheck:
checks: ["all", "-SA1019"] # TODO Enable SA1019 after deprecated warning are fixed.
funlen:
lines: 80 # default 60
statements: 60 # default 40
gocognit:
min-complexity: 40 # default 30
linters:
enable:
@ -41,26 +34,16 @@ linters:
# some default golangci-lint linters
- errcheck
- gosimple
- godot
- ineffassign
- staticcheck
- typecheck
- unused
# extra linters
- bidichk
- durationcheck
- exhaustive
- exportloopref
- gofmt
- goimports
- misspell
- predeclared
- reassign
- whitespace
- containedctx
- funlen
- gocognit
- contextcheck
- goimports
- unused
disable-all: true
fast: false

View file

@ -1,35 +0,0 @@
ci:
autofix_prs: false
repos:
- repo: https://github.com/jorisroovers/gitlint
rev: v0.19.1
hooks:
- id: gitlint
stages: [commit-msg]
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: check-added-large-files
- id: check-case-conflict
- id: check-executables-have-shebangs
- id: check-shebang-scripts-are-executable
- id: check-merge-conflict
- id: check-json
- id: check-xml
- id: check-yaml
- id: trailing-whitespace
args: [--markdown-linebreak-ext=md]
- id: end-of-file-fixer
exclude: ".key$"
- repo: https://github.com/shellcheck-py/shellcheck-py
rev: v0.9.0.2
hooks:
- id: shellcheck
- repo: https://github.com/golangci/golangci-lint
rev: v1.51.2
hooks:
- id: golangci-lint

View file

@ -1,17 +0,0 @@
pipeline:
# Kludge for non-root containers under WoodPecker
fix-ownership:
image: alpine:latest
commands: chown -R 1234:1234 .
pre-commit:
image: git.frostfs.info/truecloudlab/frostfs-ci:v0.36
commands:
- export HOME="$(getent passwd $(id -u) | cut '-d:' -f6)"
- pre-commit run
unit:
image: git.frostfs.info/truecloudlab/frostfs-ci:v0.36
commands:
- export HOME="$(getent passwd $(id -u) | cut '-d:' -f6)"
- make test

View file

@ -1,351 +1,21 @@
# Changelog
Changelog for FrostFS Node
Changelog for NeoFS Node
## [Unreleased]
### Added
- Add GAS pouring mechanism for a configurable list of wallets (#128)
- Separate batching for replicated operations over the same container in pilorama (#1621)
- Doc for extended headers (#2128)
- New `frostfs_node_object_container_size` metric for tracking size of reqular objects in a container (#2116)
- New `frostfs_node_object_payload_size` metric for tracking size of reqular objects on a single shard (#1794)
- Add command `frostfs-adm morph netmap-candidates` (#1889)
- `object.delete.tombstone_lifetime` config parameter to set tombstone lifetime in the DELETE service (#2246)
- Reload config for pprof and metrics on SIGHUP in `neofs-node` (#1868)
- Multiple configs support (#44)
- Parameters `nns-name` and `nns-zone` for command `frostfs-cli container create` (#37)
- Tree service now saves the last synchronization height which persists across restarts (#82)
### Changed
- Change `frostfs_node_engine_container_size` to counting sizes of logical objects
- `common.PrintVerbose` prints via `cobra.Command.Printf` (#1962)
- Env prefix in configuration changed to `FROSTFS_*` (#43)
- Link object is broadcast throughout the whole container now (#57)
- Pilorama now can merge multiple batches into one (#2231)
- Storage engine now can start even when some shard components are unavailable (#2238)
- `neofs-cli` buffer for object put increased from 4 KiB to 3 MiB (#2243)
- Expired locked object is available for reading (#56)
- Initialize write-cache asynchronously (#32)
### Fixed
- Increase payload size metric on shards' `put` operation (#1794)
- Big object removal with non-local parts (#1978)
- Disable pilorama when moving to degraded mode (#2197)
- Fetching blobovnicza objects that not found in write-cache (#2206)
- Do not search for the small objects in FSTree (#2206)
- Correct status error for expired session token (#2207)
- Set flag `mode` required for `frostfs-cli control shards set-mode` (#8)
- Fix `dirty` suffix in debian package version (#53)
- Prevent node process from killing by systemd when shutting down (#1465)
- Restore subscriptions correctly on morph client switch (#2212)
- Expired objects could be returned if not marked with GC yet (#2213)
- `neofs-adm morph dump-hashes` now properly iterates over custom domain (#2224)
- Possible deadlock in write-cache (#2239)
- Fix `*_req_count` and `*_req_count_success` metric values (#2241)
- Storage ID update by write-cache (#2244)
- `neo-go` client deadlock on subscription restoration (#2244)
- Possible panic during write-cache initialization (#2234)
- Do not fetch an object if `meta` is missing it (#61)
- Create contract wallet only by `init` and `update-config` command (#63)
- Actually use `object.put.pool_size_local` and independent pool for local puts (#64).
- Pretty printer of basic ACL in the NeoFS CLI (#2259)
- Adding of public key for nns group `group.frostfs` at init step (#130)
### Removed
### Updated
- `neo-go` to `v0.100.1`
- `github.com/klauspost/compress` to `v1.15.13`
- `github.com/multiformats/go-multiaddr` to `v0.8.0`
- `golang.org/x/term` to `v0.3.0`
- `google.golang.org/grpc` to `v1.51.0`
- `github.com/nats-io/nats.go` to `v1.22.1`
- `github.com/TrueCloudLab/hrw` to `v.1.1.1`
- Minimum go version to v1.18
### Updating from v0.35.0
You need to change configuration environment variables to `FROSTFS_*` if you use any.
New config field `object.delete.tombstone_lifetime` allows to set tombstone lifetime
more appropriate for a specific deployment.
## [0.35.0] - 2022-12-28 - Sindo (신도, 信島)
### Added
- `morph list-containers` in `neofs-adm` (#1689)
- `--binary` flag in `neofs-cli object put/get/delete` commands (#1338)
- `session` flag support to `neofs-cli object hash` (#2029)
- Shard can now change mode when encountering background disk errors (#2035)
- Background workers and object service now use separate client caches (#2048)
- `replicator.pool_size` config field to tune replicator pool size (#2049)
- Fix NNS hash parsing in morph client (#2063)
- `neofs-cli neofs-cli acl basic/extended print` commands (#2012)
- `neofs_node_object_*_req_count_success` prometheus metrics for tracking successfully executed requests (#1984)
- Metric 'readonly' to get shards mode (#2022)
- Tree service replication timeout (#2159)
- `apiclient.reconnect_timeout` setting allowing to ignore failed clients for some time (#2164)
### Changed
- `object lock` command reads CID and OID the same way other commands do (#1971)
- `LOCK` object are stored on every container node (#1502)
- `neofs-cli container get-eacl` print ACL table in json format only with arg `--json' (#2012)
- Side chain notary deposits use max uint32 as till parameter (#1486)
- Allow object removal without linking object (#2100)
- `neofs-cli container delete` command pre-checks container ownership (#2106)
- Policer cache size is now 1024 (#2158)
- Tree service now synchronizes with container nodes in a random order (#2127)
- Pilorama no longer tries to apply already applied operations (#2161)
- Use `sync.Pool` in Object.PUT service (#2139)
- Shard uses metabase for `HEAD` requests by default, not write-cache (#2167)
- Clarify help for `--expire-at` parameter for commands `object lock/put` and `bearer create` (#2097)
- Node spawns `GETRANGE` requests signed with the node's key if session key was not found for `RANGEHASH` (#2144)
- Full list of container is no longer cached (#2176)
### Fixed
- Open FSTree in sync mode by default (#1992)
- `neofs-cli container nodes`'s output (#1991)
- Increase error counter for write-cache flush errors (#1818)
- Correctly select the shard for applying tree service operations (#1996)
- Do not panic and return correct errors for bad inputs in `GET_RANGE` (#2007, #2024)
- Physical child object removal by GC (#1699)
- Broadcasting helper objects (#1972)
- `neofs-cli lock object`'s `lifetime` flag handling (#1972)
- Do not move write-cache in read-only mode for flushing (#1906)
- Child object collection on CLI side with a bearer token (#2000)
- Fix concurrent map writes in `Object.Put` service (#2037)
- Malformed request errors' reasons in the responses (#2028)
- Session token's IAT and NBF checks in ACL service (#2028)
- Losing meta information on request forwarding (#2040)
- Assembly process triggered by a request with a bearer token (#2040)
- Losing locking context after metabase resync (#1502)
- Removing all trees by container ID if tree ID is empty in `pilorama.Forest.TreeDrop` (#1940)
- Concurrent mode changes in the metabase and blobstor (#2057)
- Panic in IR when performing HEAD requests (#2069)
- Write-cache flush duplication (#2074)
- Ignore error if a transaction already exists in a morph client (#2075)
- ObjectID signature output in the CLI (#2104)
- Pack arguments of `setPrice` invocation during contract update (#2078)
- `neofs-cli object hash` panic (#2079)
- Closing `neo-go` WS clients on shutdown and switch processes (#2080)
- Making notary deposits with a zero GAS balance (#2080)
- Notary requests on shutdown (#2075)
- `neofs-cli container create ` check the sufficiency of the number of nodes in the selector for replicas (#2038)
- Data duplication during request forwarding (#2047)
- Tree service panic on `TreeMove` operation (#2140)
- Panic in `GETRANGE` with zero length (#2095)
- Spawning useless `GETRANGE` with zero length for a big object (#2101)
- Incomplete object put errors do contain the deepest error's message (#2092)
- Prioritize internal addresses for clients (#2156)
- Force object removal via control service (#2145)
- Synchronizing a tree now longer reports an error for a single-node container (#2154)
- Prevent leaking goroutines in the tree service (#2162)
- Do not search for LOCK objects when delete container when session provided (#2152)
- Race conditions on shard's mode switch (#1956)
- Returning expired/removed objects from write-cache (#2016)
### Removed
- `-g` option from `neofs-cli control ...` and `neofs-cli container create` commands (#2089)
- `--header` from `neofs-cli object get` (#2090)
### Updated
- `neo-go` to `v0.100.0`
- `spf13/cobra` to `v1.6.1`
- `spf13/viper` to `v1.8.0`
- `google.golang.org/grpc` to `v1.50.1`
### Updating from v0.34.0
Pass CID and OID parameters via the `--cid` and `--oid` flags, not as the command arguments.
Replicator pool size can now be fine-tuned with `replicator.pool_size` config field.
The default value is taken from `object.put.pool_size_remote` as in earlier versions.
Added `neofs_node_object_*_req_count_success` metrics for tracking successfully executed requests.
`neofs-cli container delete` command now requires given account or session issuer
to match the container owner. Use `--force` (`-f`) flag to bypass this requirement.
Tree service network replication can now be fine-tuned with `tree.replication_timeout` config field.
## [0.34.0] - 2022-10-31 - Marado (마라도, 馬羅島)
### Added
- `--timeout` flag in `neofs-cli control` commands (#1917)
- Document shard modes of operation (#1909)
- `tree list` CLI command (#1332)
- `TreeService.GetTrees` RPC (#1902)
- All trees synchronization on bootstrap (#1902)
- `--force` flag to `neofs-cli control set-status` command (#1916)
- Logging `SessionService.Create` RPC on the server for debug (#1930)
- Debian packages can now be built with `make debpackage` (#409)
### Changed
- Path to a metabase can now be reloaded with a SIGHUP (#1869)
### Fixed
- `writecache.max_object_size` is now correctly handled (#1925)
- Correctly handle setting ONLINE netmap status after maintenance (#1922)
- Correctly reset shard errors in `ControlService.SetShardMode` RPC (#1931)
- Setting node's network state to `MAINTENANCE` while network settings forbid it (#1916)
- Do not panic during API client creation (#1936)
- Correctly sign new epoch transaction in neofs-adm for a committee of more than 4 nodes (#1949)
- Inability to provide session to NeoFS CLI in a NeoFS-binary format (#1933)
- `neofs-adm` now works correctly with a committee of more than 4 nodes (#1949, #1959)
- Closing a shard now waits until GC background workers stop (#1964)
- Make it possible to use `shard.ContainerSize` in read-only mode (#1975)
- Storage node now starts if at least one gRPC endpoint is available (#1893)
- Panic in API multy client (#1961)
- Blobstor object removal log messages (#1953)
- Missing object relatives in object removal session opened by NeoFS CLI (#1978)
- Bringing a node back online during maintenance (#1900)
### Updated
- `neo-go` to `v0.99.4`
- `protoc` to `v3.21.7`
- `neofs-sdk` to `v1.0.0-rc.7`
### Updating from v0.33.0
Now storage node serves Control API `SetNemapStatus` request with `MAINTENANCE`
status only if the mode is allowed in the network settings. To force starting the local
maintenance on the node, provide `--force` flag to the `neofs-cli control set-status`
command.
## [0.33.0] - 2022-10-17 - Anmado (안마도, 鞍馬島)
### Added
- Serving `NetmapService.NetmapSnapshot` RPC (#1793)
- `netmap snapshot` command of NeoFS CLI (#1793)
- `apiclient.allow_external` config flag to fallback to node external addresses (#1817)
- Support `MAINTENANCE` state of the storage nodes (#1680, #1681)
- Changelog updates CI step (#1808)
- Validate storage node configuration before node startup (#1805)
- `neofs-node -check` command to check the configuration file (#1805)
- `flush-cache` control service command to flush write-cache (#1806)
- `wallet-address` flag in `neofs-adm morph refill-gas` command (#1820)
- Validate policy before container creation (#1704)
- `--timeout` flag in `neofs-cli` subcommands (#1837)
- `container nodes` command to output list of nodes for container, grouped by replica (#1704)
- Configuration flag to ignore shard in `neofs-node` (#1840)
- Add new RPC `TreeService.Healthcheck`
- Fallback to `GET` if `GET_RANGE` from one storage nodes to another is denied by basic ACL (#1884)
- List of shards and logger level runtime reconfiguration (#1770)
- `neofs-adm morph set-config` now supports well-known `MaintenanceModeAllowed` key (#1892)
- `add`, `get-by-path` and `add-by-path` tree service CLI commands (#1332)
- Tree synchronisation on startup (#1329)
- Morph client returns to the highest priority endpoint after the switch (#1615)
### Changed
- Allow to evacuate shard data with `EvacuateShard` control RPC (#1800)
- Flush write-cache when moving shard to DEGRADED mode (#1825)
- Make `morph.cache_ttl` default value equal to morph block time (#1846)
- Policer marks nodes under maintenance as OK without requests (#1680)
- Unify help messages in CLI (#1854)
- `evacuate`, `set-mode` and `flush-cache` control subcommands now accept a list of shard ids (#1867)
- Reading `object` commands of NeoFS CLI don't open remote sessions (#1865)
- Use hex format to print storage node ID (#1765)
### Fixed
- Description of command `netmap nodeinfo` (#1821)
- Proper status for object.Delete if session token is missing (#1697)
- Fail startup if metabase has an old version (#1809)
- Storage nodes could enter the network with any state (#1796)
- Missing check of new state value in `ControlService.SetNetmapStatus` (#1797)
- Correlation of object session to request (#1420)
- Do not increase error counter in `engine.Inhume` if shard is read-only (#1839)
- `control drop-objects` can remove split objects (#1830)
- Node's status in `neofs-cli netmap nodeinfo` command (#1833)
- Child check in object assembly process of `ObjectService.Get` handler (#1878)
- Shard ID in the object counter metrics (#1863)
- Metabase migration from the first version (#1860)
### Removed
- Remove WIF and NEP2 support in `neofs-cli`'s --wallet flag (#1128)
- Remove --generate-key option in `neofs-cli container delete` (#1692)
- Serving `ControlService.NetmapSnapshot` RPC (#1793)
- `control netmap-snapshot` command of NeoFS CLI (#1793)
### Updated
- `neofs-contract` to `v0.16.0`
- `neofs-api-go` to `v2.14.0`
### Updating from v0.32.0
Replace using the `control netmap-snapshot` command with `netmap snapshot` one in NeoFS CLI.
Node can now specify additional addresses in `ExternalAddr` attribute. To allow a node to dial
other nodes external address, use `apiclient.allow_external` config setting.
Add `--force` option to skip placement validity check for container creation.
Pass `maintenance` state to `neofs-cli control set-status` to enter maintenance mode.
If network allows maintenance state (*), it will be reflected in the network map.
Storage nodes under maintenance are not excluded from the network map, but don't
serve object operations. (*) can be fetched from network configuration via
`neofs-cli netmap netinfo` command.
To allow maintenance mode during neofs-adm deployments, set
`network.maintenance_mode_allowed` parameter in config.
When issuing an object session token for root (virtual, "big") objects,
additionally include all members of the split-chain. If session context
includes root object only, it is not spread to physical ("small") objects.
`neofs-node` configuration now supports `mode: disabled` flag for a shard.
This can be used to temporarily ignore shards without completely removing them
from the config file.
## [0.32.0] - 2022-09-14 - Pungdo (풍도, 楓島)
### Added
- Objects counter metric (#1712)
- `meta` subcommand to `neofs-lens` (#1714)
- Storage node metrics with global and per-shard object counters (#1658)
- Removal of trees on container removal (#1630)
- Logging new epoch events on storage node (#1763)
- Timeout for streaming RPC (#1746)
- `neofs-adm` is now able to dump hashes from a custom zone (#1748)
- Empty filename support in the Tree Service (#1698)
- Flag to `neofs-cli container list-objects` command for attribute printing (#1649)
### Changed
- `neofs-cli object put`'s object ID output has changed from "ID" to "OID" (#1296)
- `neofs-cli container set-eacl` command now pre-checks container ACL's extensibility (#1652)
- Access control in Tree service (#1628)
- Tree service doesn't restrict depth in `rpc GetSubTree` (#1753)
- `neofs-adm` registers contract hashes in both hex and string address formats (#1749)
- Container list cache synchronization with the Sidechain (#1632)
- Blobstor components are unified (#1584, #1686, #1523)
### Fixed
- Panic on write-cache's `Delete` operation (#1664)
- Payload duplication in `neofs-cli storagegroup put` (#1706)
- Contract calls in notary disabled environments (#1743)
- `Blobovnicza.Get` op now iterates over all size buckets (#1707)
- Object expiration time (#1670)
- Parser of the placement policy (#1775)
- Tree service timeout logs (#1759)
- Object flushing on writecache side (#1745)
- Active blobovniczas caching (#1691)
- `neofs-adm` TX waiting (#1738)
- `neofs-adm` registers contracts with a minimal GAS payment (#1683)
- Permissions of the file created by `neofs-cli` (#1719)
- `neofs-adm` creates TX with a high priority attribute (#1702)
- Storage node's restart after a hard reboot (#1647)
### Removed
### Updated
- `neo-go` to `v0.99.2`
- `nspcc-dev/neofs-contract` to `v0.15.5`
- `prometheus/client_golang` to `v1.13.0`
- `google.golang.org/protobuf` to `v1.28.1`
### Updating from v0.31.0
Storage Node now collects object count prometheus metrics: `neofs_node_object_counter`.
Provide `--no-precheck` flag to `neofs-cli container set-eacl` for unconditional sending of a request
(previous default behavior).
## [0.31.0] - 2022-08-04 - Baengnyeongdo (백령도, 白翎島)
### Added
@ -573,15 +243,15 @@ Clean up all metabases and re-sync them using `resync_metabase` config flag.
- Reduced amount of slices with pointers (#1239)
### Updating from v0.28.0-rc.2
Remove `NEOFS_IR_MAINNET_ENDPOINT_NOTIFICATION`,
Remove `NEOFS_IR_MAINNET_ENDPOINT_NOTIFICATION`,
`NEOFS_IR_MORPH_ENDPOINT_NOTIFICATION`, and `NEOFS_MORPH_NOTIFICATION_ENDPOINT`
from Inner Ring and Storage configurations.
from Inner Ring and Storage configurations.
Specify _WebSocket_ endpoints in `NEOFS_IR_MAINNET_ENDPOINT_CLIENT`,
`NEOFS_IR_MORPH_ENDPOINT_CLIENT`, and `NEOFS_MORPH_RPC_ENDPOINT` at Inner Ring
and Storage configurations.
Specify path to persistent session token db in Storage configuration with
Specify path to persistent session token db in Storage configuration with
`NEOFS_NODE_PERSISTENT_SESSIONS_PATH`.
## [0.28.0-rc.2] - 2022-03-24
@ -597,7 +267,7 @@ Specify path to persistent session token db in Storage configuration with
## [0.28.0-rc.1] - 2022-03-18
Native RFC-6979 signatures of messages and tokens, LOCK object types,
Native RFC-6979 signatures of messages and tokens, LOCK object types,
experimental notifications over NATS with NeoFS API v2.12 support
### Fixed
@ -633,8 +303,8 @@ experimental notifications over NATS with NeoFS API v2.12 support
- Deprecated structures from SDK v1.0.0 rc (#1181)
### Updating from neofs-node v0.27.5
Set shard error threshold for read-only mode switch with
`NEOFS_STORAGE_SHARD_RO_ERROR_THRESHOLD` (default: 0, deactivated).
Set shard error threshold for read-only mode switch with
`NEOFS_STORAGE_SHARD_RO_ERROR_THRESHOLD` (default: 0, deactivated).
Set NATS configuration for notifications in `NEOFS_NODE_NOTIFICATION` section.
See example config for more details.
@ -700,7 +370,7 @@ See example config for more details.
Use `--wallet` key in CLI to provide WIF or binary key file instead of `--wif`
and `--binary-key`.
Replace `NEOFS_STORAGE_SHARD_N_USE_WRITE_CACHE` with
Replace `NEOFS_STORAGE_SHARD_N_USE_WRITE_CACHE` with
`NEOFS_STORAGE_SHARD_N_WRITECACHE_ENABLED` in Storage node config.
Specify `password: xxx` in config file for NeoFS CLI to avoid password input.
@ -777,7 +447,7 @@ NeoFS API v2.11.0 support with response status codes and storage subnetworks.
- CLI now opens LOCODE database in read-only mode for listing command (#958)
- Tombstone owner now is always set (#842)
- Node in relay mode does not require shard config anymore (#969)
- Alphabet nodes now ignore notary notifications with non-HALT main tx (#976)
- Alphabet nodes now ignore notary notifications with non-HALT main tx (#976)
- neofs-adm now prints version of NNS contract (#1014)
- Possible NPE in blobovnicza (#1007)
- More precise calculation of blobovnicza size (#915)
@ -794,13 +464,13 @@ NeoFS API v2.11.0 support with response status codes and storage subnetworks.
- Alphabet nodes resign `AddPeer` request if it updates Storage node info (#938)
- All applications now use client from neofs-sdk-go library (#966)
- Some shard configuration records were renamed, see upgrading section (#859)
- `Nonce` and `VUB` values of notary transactions generated from notification
- `Nonce` and `VUB` values of notary transactions generated from notification
hash (#844)
- Non alphabet notary invocations now have 4 witnesses (#975)
- Object replication is now async and continuous (#965)
- NeoFS ADM updated for the neofs-contract v0.13.0 deploy (#984)
- Minimal TLS version is set to v1.2 (#878)
- Alphabet nodes now invoke `netmap.Register` to add node to the network map
- Alphabet nodes now invoke `netmap.Register` to add node to the network map
candidates in notary enabled environment (#1008)
### Upgrading from v0.26.1
@ -830,7 +500,7 @@ with `NEOFS_IR_FEE_NAMED_CONTAINER_REGISTER`.
### Fixed
- Storage Node handles requests before its initialization is finished (#934)
- Release worker pools gracefully (#901)
- Metabase ignored containers of storage group and tombstone objects
- Metabase ignored containers of storage group and tombstone objects
in listing (#945)
- CLI missed endpoint flag in `control netmap-snapshot` command (#942)
- Write cache object persisting (#866)
@ -844,16 +514,16 @@ with `NEOFS_IR_FEE_NAMED_CONTAINER_REGISTER`.
### Changed
- Use FSTree counter in write cache (#821)
- Calculate notary deposit `till` parameter depending on available
- Calculate notary deposit `till` parameter depending on available
deposit (#910)
- Storage node returns session token error if attached token's private key
- Storage node returns session token error if attached token's private key
is not available (#943)
- Refactor of NeoFS API client in inner ring (#946)
- LOCODE generator tries to find the closest continent if there are
- LOCODE generator tries to find the closest continent if there are
no exact match (#955)
### Upgrading from v0.26.0
You can specify default section in storage engine configuration.
You can specify default section in storage engine configuration.
See [example](./config/example/node.yaml) for more details.
## [0.26.0] - 2021-10-19 - Udo (우도, 牛島)
@ -863,7 +533,7 @@ NeoFS API v2.10 support
### Fixed
- Check remote node public key in every response message (#645)
- Do not lose local container size estimations (#872)
- Compressed and uncompressed objects are always available for reading
- Compressed and uncompressed objects are always available for reading
regardless of compression configuration (#868)
- Use request session token in ACL check of object.Put (#881)
- Parse URI in neofs-cli properly (#883)
@ -917,7 +587,7 @@ instead.
### Added
- Support of multiple Neo RPC endpoints in Inner Ring node (#792)
`mainchain` section of storage node config is left unused by the application.
`mainchain` section of storage node config is left unused by the application.
## [0.25.0] - 2021-09-27 - Mungapdo (문갑도, 文甲島)
@ -925,7 +595,7 @@ instead.
- Work of a storage node with one Neo RPC endpoint instead of a list (#746)
- Lack of support for HEAD operation on the object write cache (#762)
- Storage node attribute parsing is stable now (#787)
- Inner Ring node now logs transaction hashes of Deposit and Withdraw events
- Inner Ring node now logs transaction hashes of Deposit and Withdraw events
in LittleEndian encoding (#794)
- Storage node uses public keys of the remote nodes in placement traverser
checks (#645)
@ -933,7 +603,7 @@ instead.
(#816)
- neofs-adm supports update and deploy of neofs-contract v0.11.0 (#834, #836)
- Possible NPE in public key conversion (#848)
- Object assembly routine do not forward existing request instead of creating
- Object assembly routine do not forward existing request instead of creating
new one (#839)
- Shard now returns only physical stored objects for replication (#840)
@ -942,7 +612,7 @@ instead.
- Smart contract address auto negotiation with NNS contract (#736)
- Detailed logs for all data writing operations in storage engine (#790)
- Docker build and release targets in Makefile (#785)
- Metabase restore option in the shard config (#789)
- Metabase restore option in the shard config (#789)
- Write cache used size limit in bytes (#776)
### Changed
@ -977,7 +647,7 @@ Added `NEOFS_STORAGE_SHARD_<N>_WRITECACHE_SIZE_LIMIT` where `<N>` is shard ID.
This is the size limit for the all write cache storages combined in bytes. Default
size limit is 1 GiB.
Added `NEOFS_STORAGE_SHARD_<N>_REFILL_METABASE` bool flag where `<N>` is shard
Added `NEOFS_STORAGE_SHARD_<N>_REFILL_METABASE` bool flag where `<N>` is shard
ID. This flag purges metabase instance at the application start and reinitialize
it with available objects from the blobstor.
@ -986,12 +656,12 @@ Object service pool size now split into `NEOFS_OBJECT_PUT_POOL_SIZE_REMOTE` and
## [0.24.1] - 2021-09-07
### Fixed
### Fixed
- Storage and Inner Ring will not start until Neo RPC node will have the height
of the latest processed block by the nodes (#795)
### Upgrading from v0.24.0
Specify path to the local state DB in Inner Ring node config with
Specify path to the local state DB in Inner Ring node config with
`NEOFS_IR_NODE_PERSISTENT_STATE_PATH`. Specify path to the local state DB in
Storage node config with `NEOFS_NODE_PERSISTENT_STATE_PATH`.
@ -1010,7 +680,7 @@ Storage node config with `NEOFS_NODE_PERSISTENT_STATE_PATH`.
- Contract update support in `neofs-adm` utility (#748)
- Container transferring support in `neofs-adm` utility (#755)
- Storage Node's balance refilling support in `neofs-adm` utility (#758)
- Support `COMMON_PREFIX` filter for object attributes in storage engine and `neofs-cli` (#760)
- Support `COMMON_PREFIX` filter for object attributes in storage engine and `neofs-cli` (#760)
- Node's and IR's notary status debug message on startup (#758)
- Go `1.17` unit tests in CI (#766)
- Supporting all eACL filter fields from the specification (#768)
@ -1072,7 +742,7 @@ Improved stability for notary disabled environment.
- Storage Node configuration example contains usable parameters (#699)
### Fixed
- Do not use side chain RoleManagement contract as source of Inner Ring list
- Do not use side chain RoleManagement contract as source of Inner Ring list
when notary disabled in side chain (#672)
- Alphabet list transition is even more effective (#697)
- Inner Ring node does not require proxy and processing contracts if notary
@ -1139,9 +809,9 @@ Storage nodes with a group of network endpoints.
- Control service with healthcheck RPC in IR and CLI support ([#414](https://github.com/nspcc-dev/neofs-node/issues/414)).
### Fixed
- Approval of objects with with duplicate attribute keys or empty values ([#633](https://github.com/nspcc-dev/neofs-node/issues/633)).
- Approval of objects with with duplicate attribute keys or empty values ([#633](https://github.com/nspcc-dev/neofs-node/issues/633)).
- Approval of containers with with duplicate attribute keys or empty values ([#634](https://github.com/nspcc-dev/neofs-node/issues/634)).
- Default path for CLI config ([#626](https://github.com/nspcc-dev/neofs-node/issues/626)).
- Default path for CLI config ([#626](https://github.com/nspcc-dev/neofs-node/issues/626)).
### Changed
- `version` command replaced with `--version` flag in CLI ([#571](https://github.com/nspcc-dev/neofs-node/issues/571)).
@ -1169,7 +839,7 @@ Storage nodes with a group of network endpoints.
- grpc: [v1.38.0](https://github.com/grpc/grpc-go/releases/tag/v1.38.0).
- cast: [v1.3.1](https://github.com/spf13/cast/releases/tag/v1.3.1).
- cobra: [1.1.3](https://github.com/spf13/cobra/releases/tag/v1.1.3).
- viper: [v1.8.1](https://github.com/spf13/viper/releases/tag/v1.8.1).
- viper: [v1.8.1](https://github.com/spf13/viper/releases/tag/v1.8.1).
## [0.21.1] - 2021-06-10
@ -1189,7 +859,7 @@ Session token support in container service, refactored config in storage node,
TLS support on gRPC servers.
### Fixed
- ACL service traverses over all RequestMetaHeader chain to find
- ACL service traverses over all RequestMetaHeader chain to find
bearer and session tokens (#548).
- Object service correctly resends complete objects without attached
session token (#501).
@ -1197,7 +867,7 @@ TLS support on gRPC servers.
- Client cache now gracefully closes all available connections (#567).
### Added
- Session token support in container service for `container.Put`,
- Session token support in container service for `container.Put`,
`container.Delete` and `container.SetEACL` operations.
- Session token support in container and sign command of NeoFS CLI.
- TLS encryption support of gRPC service in storage node.
@ -1207,8 +877,8 @@ TLS support on gRPC servers.
update earlier.
- Inner ring processes extended ACL changes.
- Inner ring makes signature checks of containers and extended ACLs.
- Refactored config of storage node.
- Static clients from `morph/client` do not process notary invocations
- Refactored config of storage node.
- Static clients from `morph/client` do not process notary invocations
explicitly anymore. Now notary support specified at static client creation.
- Updated neo-go to v0.95.1 release.
- Updated neofs-api-go to v1.27.0 release.
@ -1219,7 +889,7 @@ TLS support on gRPC servers.
## [0.20.0] - 2021-05-21 - Dolsando (돌산도, 突山島)
NeoFS is N3 RC2 compatible.
NeoFS is N3 RC2 compatible.
### Fixed
- Calculations in EigenTrust algorithm (#527).
@ -1232,7 +902,7 @@ NeoFS is N3 RC2 compatible.
- Client for NeoFSID contract.
### Changed
- Reorganized and removed plenty of application configuration records
- Reorganized and removed plenty of application configuration records
(#510, #511, #512, #514).
- Nodes do not resolve remote addresses manually.
- Presets for basic ACL in CLI are `private` ,`public-read` and
@ -1250,11 +920,11 @@ NeoFS is N3 RC2 compatible.
Storage nodes exchange, calculate, aggregate and store reputation information
in reputation contract. Inner ring nodes support workflows with and without
notary subsystem in chains.
notary subsystem in chains.
### Fixed
- Build with go1.16.
- Notary deposits last more blocks.
- Notary deposits last more blocks.
- TX hashes now prints in little endian in logs.
- Metabase deletes graves regardless of the presence of objects.
- SplitInfo error created from all shards instead of first matched shard.
@ -1262,7 +932,7 @@ notary subsystem in chains.
- Storage node does not send rebootstrap messages after it went offline.
### Added
- Reputation subsystem that includes reputation collection, exchange,
- Reputation subsystem that includes reputation collection, exchange,
calculation and storage components.
- Notary and non notary workflows in inner ring.
- Audit fee transfer for inner ring nodes that performed audit.
@ -1272,7 +942,7 @@ calculation and storage components.
### Changed
- Metabase puts data in batches.
- Network related new epoch handlers in storage node executed asynchronously.
- Network related new epoch handlers in storage node executed asynchronously.
- Storage node gets epoch duration from global config.
- Storage node resign and resend Search, Range, Head, Get requests of object
service without modification.
@ -1289,7 +959,7 @@ alphabet keys are synchronized with main chain.
### Fixed
- Metabase does not store object payloads anymore.
- TTLNetCache now always evict data after a timeout.
- NeoFS CLI keyer could misinterpret hex value as base58.
- NeoFS CLI keyer could misinterpret hex value as base58.
### Added
- Local trust controller in storage node.
@ -1301,7 +971,7 @@ alphabet keys are synchronized with main chain.
## [0.17.0] - 2021-03-22 - Jebudo (제부도, 濟扶島)
Notary contract support, updated neofs-api-go with raw client, some performance
Notary contract support, updated neofs-api-go with raw client, some performance
tweaks with extra caches and enhanced metrics.
### Added
@ -1320,7 +990,7 @@ tweaks with extra caches and enhanced metrics.
Garbage collector is now running inside storage engine. It is accessed
via Control API, from `policer` component and through object expiration
scrubbers.
scrubbers.
Inner ring configuration now supports single chain mode with any number of
alphabet contracts.
@ -1351,39 +1021,39 @@ Storage node now supports NetworkInfo method in netmap service.
## [0.15.0] - 2021-02-12 - Seonyudo (선유도, 仙遊島)
NeoFS nodes are now preview5-compatible.
NeoFS nodes are now preview5-compatible.
IR nodes are now engaged in the distribution of funds to the storage nodes:
for the passed audit and for the amount of stored information. All timers
of the IR nodes related to the generation and processing of global system
events are decoupled from astronomical time, and are measured in the number
for the passed audit and for the amount of stored information. All timers
of the IR nodes related to the generation and processing of global system
events are decoupled from astronomical time, and are measured in the number
of blockchain blocks.
For the geographic positioning of storage nodes, a global NeoFS location
database is now used, the key in which is a UN/LOCODE, and the base itself
database is now used, the key in which is a UN/LOCODE, and the base itself
is generated on the basis of the UN/LOCODE and OpenFlights databases.
### Added
- Timers with time in blocks of the chain.
- Subscriptions to new blocks in blockchain event `Listener`.
- Tracking the volume of stored information by containers in the
- Tracking the volume of stored information by containers in the
storage engine and an external interface for obtaining this data.
- `TransferX` operation in sidechain client.
- Calculators of audit and basic settlements.
- Distribution of funds to storage nodes for audit and for the amount
- Distribution of funds to storage nodes for audit and for the amount
of stored information (settlement processors of IR).
- NeoFS API `Container.AnnounceUsedSpace` RPC service.
- Exchange of information about container volumes between storage nodes
- Exchange of information about container volumes between storage nodes
controlled by IR through sidechain notifications.
- Support of new search matchers (`STRING_NOT_EQUAL`, `NOT_PRESENT`).
- Functional for the formation of NeoFS location database.
- CLI commands for generating and reading the location database.
- Checking the locode attribute and generating geographic attributes
- Checking the locode attribute and generating geographic attributes
for candidates for a network map on IR side.
- Verification of the eACL signature when checking Object ACL rules.
### Fixed
- Overwriting the local configuration of node attributes when updating
- Overwriting the local configuration of node attributes when updating
the network map.
- Ignoring the X-headers CLI `storagegroup` commands.
- Inability to attach bearer token in CLI `storagegroup` commands.
@ -1401,7 +1071,7 @@ is generated on the basis of the UN/LOCODE and OpenFlights databases.
### Fixed
- Upload of objects bigger than single gRPC message.
- Inconsistent placement issues (#347, #349).
- Bug when ACL request classifier failed to classify `RoleOthers` in
- Bug when ACL request classifier failed to classify `RoleOthers` in
first epoch.
### Added
@ -1415,13 +1085,13 @@ is generated on the basis of the UN/LOCODE and OpenFlights databases.
Testnet4 related bugfixes.
### Fixed
- Default values for blobovnicza object size limit and blobstor small object
### Fixed
- Default values for blobovnicza object size limit and blobstor small object
size are not zero.
- Various storage engine log messages.
- Bug when inner ring node ignored bootstrap messages from restarted storage
nodes.
nodes.
### Added
- Timeout for reading boltDB files at storage node initialization.
@ -1575,11 +1245,8 @@ NeoFS-API v2.0 support and updated brand-new storage node application.
## [0.10.0] - 2020-07-10
First public review release.
[Unreleased]: https://github.com/nspcc-dev/neofs-node/compare/v0.35.0...master
[0.35.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.34.0...v0.35.0
[0.34.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.33.0...v0.34.0
[0.33.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.32.0...v0.33.0
[0.32.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.31.0...v0.32.0
[Unreleased]: https://github.com/nspcc-dev/neofs-node/compare/v0.31.0...master
[0.31.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.30.2...v0.31.0
[0.30.2]: https://github.com/nspcc-dev/neofs-node/compare/v0.30.1...v0.30.2
[0.30.1]: https://github.com/nspcc-dev/neofs-node/compare/v0.30.0...v0.30.1

View file

@ -3,8 +3,8 @@
First, thank you for contributing! We love and encourage pull requests from
everyone. Please follow the guidelines:
- Check the open [issues](https://github.com/TrueCloudLab/frostfs-node/issues) and
[pull requests](https://github.com/TrueCloudLab/frostfs-node/pulls) for existing
- Check the open [issues](https://github.com/nspcc-dev/neofs-node/issues) and
[pull requests](https://github.com/nspcc-dev/neofs-node/pulls) for existing
discussions.
- Open an issue first, to discuss a new feature or enhancement.
@ -23,23 +23,23 @@ everyone. Please follow the guidelines:
## Development Workflow
Start by forking the `frostfs-node` repository, make changes in a branch and then
Start by forking the `neofs-node` repository, make changes in a branch and then
send a pull request. We encourage pull requests to discuss code changes. Here
are the steps in details:
### Set up your GitHub Repository
Fork [FrostFS node upstream](https://github.com/TrueCloudLab/frostfs-node/fork) source
Fork [NeoFS node upstream](https://github.com/nspcc-dev/neofs-node/fork) source
repository to your own personal repository. Copy the URL of your fork (you will
need it for the `git clone` command below).
```sh
$ git clone https://github.com/TrueCloudLab/frostfs-node
$ git clone https://github.com/nspcc-dev/neofs-node
```
### Set up git remote as ``upstream``
```sh
$ cd frostfs-node
$ git remote add upstream https://github.com/TrueCloudLab/frostfs-node
$ cd neofs-node
$ git remote add upstream https://github.com/nspcc-dev/neofs-node
$ git fetch upstream
$ git merge upstream/master
...
@ -79,7 +79,7 @@ Description
```
```
$ git commit -sam '[#123] Add some feature'
$ git commit -am '[#123] Add some feature'
```
### Push to the branch
@ -106,8 +106,7 @@ contributors".
To sign your work, just add a line like this at the end of your commit message:
```
Signed-off-by: Samii Sakisaka <samii@ivunojikan.co.jp>
Signed-off-by: Samii Sakisaka <samii@nspcc.ru>
```
This can easily be done with the `--signoff` option to `git commit`.

View file

@ -1,7 +1,5 @@
# Credits
FrostFS continues the development of NeoFS.
Initial NeoFS research and development (2018-2020) was done by
[NeoSPCC](https://nspcc.ru) team.
@ -10,7 +8,7 @@ In alphabetical order:
- Alexey Vanin
- Anastasia Prasolova
- Anatoly Bogatyrev
- Evgeny Kulikov
- Evgeny Kulikov
- Evgeny Stratonikov
- Leonard Liubich
- Sergei Liubich

56
Makefile Executable file → Normal file
View file

@ -2,13 +2,13 @@
SHELL = bash
REPO ?= $(shell go list -m)
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
HUB_IMAGE ?= truecloudlab/frostfs
HUB_IMAGE ?= nspccdev/neofs
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
GO_VERSION ?= 1.19
LINT_VERSION ?= 1.50.0
GO_VERSION ?= 1.17
LINT_VERSION ?= 1.46.2
ARCH = amd64
BIN = bin
@ -16,20 +16,13 @@ RELEASE = release
DIRS = $(BIN) $(RELEASE)
# List of binaries to build.
CMDS = $(notdir $(basename $(wildcard cmd/frostfs-*)))
CMDS = $(notdir $(basename $(wildcard cmd/*)))
BINS = $(addprefix $(BIN)/, $(CMDS))
# .deb package versioning
OS_RELEASE = $(shell lsb_release -cs)
PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \
sed -E "s/(.*)-(g[a-fA-F0-9]{6,8})(.*)/\1\3~\2/" | \
sed "s/-/~/")-${OS_RELEASE}
.PHONY: help all images dep clean fmts fmt imports test lint docker/lint
prepare-release debpackage pre-commit unpre-commit
.PHONY: help all images dep clean fmts fmt imports test lint docker/lint prepare-release
# To build a specific binary, use it's name prefix with bin/ as a target
# For example `make bin/frostfs-node` will build only storage node binary
# For example `make bin/neofs-node` will build only storage node binary
# Just `make` will build all possible binaries
all: $(DIRS) $(BINS)
@ -50,7 +43,7 @@ $(DIRS):
# Prepare binaries and archives for release
.ONESHELL:
prepare-release: docker/all
@for file in `ls -1 $(BIN)/frostfs-*`; do
@for file in `ls -1 $(BIN)/neofs-*`; do
cp $$file $(RELEASE)/`basename $$file`-$(ARCH)
strip $(RELEASE)/`basename $$file`-$(ARCH)
tar -czf $(RELEASE)/`basename $$file`-$(ARCH).tar.gz $(RELEASE)/`basename $$file`-$(ARCH)
@ -67,26 +60,26 @@ dep:
# Regenerate proto files:
protoc:
@GOPRIVATE=github.com/TrueCloudLab go mod vendor
@GOPRIVATE=github.com/nspcc-dev go mod vendor
# Install specific version for protobuf lib
@go list -f '{{.Path}}/...@{{.Version}}' -m github.com/golang/protobuf | xargs go install -v
@GOBIN=$(abspath $(BIN)) go install -mod=mod -v git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/protogen
@GOBIN=$(abspath $(BIN)) go install -mod=mod -v github.com/nspcc-dev/neofs-api-go/v2/util/protogen
# Protoc generate
@for f in `find . -type f -name '*.proto' -not -path './vendor/*'`; do \
echo "⇒ Processing $$f "; \
protoc \
--proto_path=.:./vendor:/usr/local/include \
--plugin=protoc-gen-go-frostfs=$(BIN)/protogen \
--go-frostfs_out=. --go-frostfs_opt=paths=source_relative \
--plugin=protoc-gen-go-neofs=$(BIN)/protogen \
--go-neofs_out=. --go-neofs_opt=paths=source_relative \
--go_out=. --go_opt=paths=source_relative \
--go-grpc_opt=require_unimplemented_servers=false \
--go-grpc_out=. --go-grpc_opt=paths=source_relative $$f; \
done
rm -rf vendor
# Build FrostFS component's docker image
# Build NeoFS component's docker image
image-%:
@echo "⇒ Build FrostFS $* docker image "
@echo "⇒ Build NeoFS $* docker image "
@docker build \
--build-arg REPO=$(REPO) \
--build-arg VERSION=$(VERSION) \
@ -140,33 +133,12 @@ docker/lint:
--env HOME=/src \
golangci/golangci-lint:v$(LINT_VERSION) bash -c 'cd /src/ && make lint'
# Activate pre-commit hooks
pre-commit:
pre-commit install -t pre-commit -t commit-msg
# Deactivate pre-commit hooks
unpre-commit:
pre-commit uninstall -t pre-commit -t commit-msg
# Print version
version:
@echo $(VERSION)
# Delete built artifacts
clean:
rm -rf vendor
rm -rf .cache
rm -rf $(BIN)
rm -rf $(RELEASE)
# Package for Debian
debpackage:
dch -b --package frostfs-node \
--controlmaint \
--newversion $(PKG_VERSION) \
--distribution $(OS_RELEASE) \
"Please see CHANGELOG.md for code changes for $(VERSION)"
dpkg-buildpackage --no-sign -b
debclean:
dh clean

View file

@ -1,40 +1,39 @@
<p align="center">
<img src="./.github/logo.svg" width="500px" alt="FrostFS">
<img src="./.github/logo.svg" width="500px" alt="NeoFS">
</p>
<p align="center">
<a href="https://frostfs.info">FrostFS</a> is a decentralized distributed object storage integrated with the <a href="https://neo.org">NEO Blockchain</a>.
<a href="https://fs.neo.org">NeoFS</a> is a decentralized distributed object storage integrated with the <a href="https://neo.org">NEO Blockchain</a>.
</p>
---
[![Report](https://goreportcard.com/badge/github.com/TrueCloudLab/frostfs-node)](https://goreportcard.com/report/github.com/TrueCloudLab/frostfs-node)
![GitHub release (latest SemVer)](https://img.shields.io/github/v/release/TrueCloudLab/frostfs-node?sort=semver)
![License](https://img.shields.io/github/license/TrueCloudLab/frostfs-node.svg?style=popout)
[![Report](https://goreportcard.com/badge/github.com/nspcc-dev/neofs-node)](https://goreportcard.com/report/github.com/nspcc-dev/neofs-node)
![GitHub release (latest SemVer)](https://img.shields.io/github/v/release/nspcc-dev/neofs-node?sort=semver)
![License](https://img.shields.io/github/license/nspcc-dev/neofs-node.svg?style=popout)
# Overview
FrostFS Nodes are organized in a peer-to-peer network that takes care of storing
NeoFS Nodes are organized in a peer-to-peer network that takes care of storing
and distributing user's data. Any Neo user may participate in the network and
get paid for providing storage resources to other users or store their data in
FrostFS and pay a competitive price for it.
NeoFS and pay a competitive price for it.
Users can reliably store object data in the FrostFS network and have a transparent
Users can reliably store object data in the NeoFS network and have a transparent
data placement process due to a decentralized architecture and flexible storage
policies. Each node is responsible for executing the storage policies that the
users select for geographical location, reliability level, number of nodes, type
of disks, capacity, etc. Thus, FrostFS gives full control over data to users.
of disks, capacity, etc. Thus, NeoFS gives full control over data to users.
Deep [Neo Blockchain](https://neo.org) integration allows FrostFS to be used by
Deep [Neo Blockchain](https://neo.org) integration allows NeoFS to be used by
dApps directly from
[NeoVM](https://docs.neo.org/docs/en-us/basic/technology/neovm.html) on the
[Smart Contract](https://docs.neo.org/docs/en-us/intro/glossary.html)
code level. This way dApps are not limited to on-chain storage and can
manipulate large amounts of data without paying a prohibitive price.
FrostFS has a native [gRPC API](https://git.frostfs.info/TrueCloudLab/frostfs-api) and has
NeoFS has a native [gRPC API](https://github.com/nspcc-dev/neofs-api) and has
protocol gateways for popular protocols such as [AWS
S3](https://github.com/TrueCloudLab/frostfs-s3-gw),
[HTTP](https://github.com/TrueCloudLab/frostfs-http-gw),
S3](https://github.com/nspcc-dev/neofs-s3-gw),
[HTTP](https://github.com/nspcc-dev/neofs-http-gw),
[FUSE](https://wikipedia.org/wiki/Filesystem_in_Userspace) and
[sFTP](https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol) allowing
developers to integrate applications without rewriting their code.
@ -44,12 +43,12 @@ developers to integrate applications without rewriting their code.
Now, we only support GNU/Linux on amd64 CPUs with AVX/AVX2 instructions. More
platforms will be officially supported after release `1.0`.
The latest version of frostfs-node works with frostfs-contract
[v0.16.0](https://github.com/TrueCloudLab/frostfs-contract/releases/tag/v0.16.0).
The latest version of neofs-node works with neofs-contract
[v0.15.3](https://github.com/nspcc-dev/neofs-contract/releases/tag/v0.15.3).
# Building
To make all binaries you need Go 1.18+ and `make`:
To make all binaries you need Go 1.17+ and `make`:
```
make all
```
@ -57,7 +56,7 @@ The resulting binaries will appear in `bin/` folder.
To make a specific binary use:
```
make bin/frostfs-<name>
make bin/neofs-<name>
```
See the list of all available commands in the `cmd` folder.
@ -66,12 +65,12 @@ See the list of all available commands in the `cmd` folder.
Building can also be performed in a container:
```
make docker/all # build all binaries
make docker/bin/frostfs-<name> # build a specific binary
make docker/bin/neofs-<name> # build a specific binary
```
## Docker images
To make docker images suitable for use in [frostfs-dev-env](https://github.com/TrueCloudLab/frostfs-dev-env/) use:
To make docker images suitable for use in [neofs-dev-env](https://github.com/nspcc-dev/neofs-dev-env/) use:
```
make images
```
@ -86,7 +85,7 @@ the feature/topic you are going to implement.
# Credits
FrostFS is maintained by [True Cloud Lab](https://github.com/TrueCloudLab/) with the help and
NeoFS is maintained by [NeoSPCC](https://nspcc.ru) with the help and
contributions from community members.
Please see [CREDITS](CREDITS.md) for details.

View file

@ -1 +1 @@
v0.35.0
v0.31.0

View file

@ -1,14 +0,0 @@
package commonflags
const (
ConfigFlag = "config"
ConfigFlagShorthand = "c"
ConfigFlagUsage = "Config file"
ConfigDirFlag = "config-dir"
ConfigDirFlagUsage = "Config directory"
Verbose = "verbose"
VerboseShorthand = "v"
VerboseUsage = "Verbose output"
)

View file

@ -1,29 +0,0 @@
package config
import (
"github.com/spf13/cobra"
)
const configPathFlag = "path"
var (
// RootCmd is a root command of config section.
RootCmd = &cobra.Command{
Use: "config",
Short: "Section for frostfs-adm config related commands",
}
initCmd = &cobra.Command{
Use: "init",
Short: "Initialize basic frostfs-adm configuration file",
Example: `frostfs-adm config init
frostfs-adm config init --path .config/frostfs-adm.yml`,
RunE: initConfig,
}
)
func init() {
RootCmd.AddCommand(initCmd)
initCmd.Flags().String(configPathFlag, "", "Path to config (default ~/.frostfs/adm/config.yml)")
}

View file

@ -1,251 +0,0 @@
package morph
import (
"bytes"
"errors"
"fmt"
"strings"
"text/tabwriter"
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/emit"
"github.com/nspcc-dev/neo-go/pkg/vm/opcode"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
const lastGlagoliticLetter = 41
type contractDumpInfo struct {
hash util.Uint160
name string
version string
}
func dumpContractHashes(cmd *cobra.Command, _ []string) error {
c, err := getN3Client(viper.GetViper())
if err != nil {
return fmt.Errorf("can't create N3 client: %w", err)
}
cs, err := c.GetContractStateByID(1)
if err != nil {
return err
}
zone, _ := cmd.Flags().GetString(customZoneFlag)
if zone != "" {
return dumpCustomZoneHashes(cmd, cs.Hash, zone, c)
}
infos := []contractDumpInfo{{name: nnsContract, hash: cs.Hash}}
irSize := 0
for ; irSize < lastGlagoliticLetter; irSize++ {
ok, err := nnsIsAvailable(c, cs.Hash, getAlphabetNNSDomain(irSize))
if err != nil {
return err
} else if ok {
break
}
}
bw := io.NewBufBinWriter()
if irSize != 0 {
bw.Reset()
for i := 0; i < irSize; i++ {
emit.AppCall(bw.BinWriter, cs.Hash, "resolve", callflag.ReadOnly,
getAlphabetNNSDomain(i),
int64(nns.TXT))
}
alphaRes, err := c.InvokeScript(bw.Bytes(), nil)
if err != nil {
return fmt.Errorf("can't fetch info from NNS: %w", err)
}
for i := 0; i < irSize; i++ {
info := contractDumpInfo{name: fmt.Sprintf("alphabet %d", i)}
if h, err := parseNNSResolveResult(alphaRes.Stack[i]); err == nil {
info.hash = h
}
infos = append(infos, info)
}
}
for _, ctrName := range contractList {
bw.Reset()
emit.AppCall(bw.BinWriter, cs.Hash, "resolve", callflag.ReadOnly,
ctrName+".frostfs", int64(nns.TXT))
res, err := c.InvokeScript(bw.Bytes(), nil)
if err != nil {
return fmt.Errorf("can't fetch info from NNS: %w", err)
}
info := contractDumpInfo{name: ctrName}
if len(res.Stack) != 0 {
if h, err := parseNNSResolveResult(res.Stack[0]); err == nil {
info.hash = h
}
}
infos = append(infos, info)
}
fillContractVersion(cmd, c, infos)
printContractInfo(cmd, infos)
return nil
}
func dumpCustomZoneHashes(cmd *cobra.Command, nnsHash util.Uint160, zone string, c Client) error {
const nnsMaxTokens = 100
inv := invoker.New(c, nil)
if !strings.HasPrefix(zone, ".") {
zone = "." + zone
}
var infos []contractDumpInfo
processItem := func(item stackitem.Item) {
bs, err := item.TryBytes()
if err != nil {
cmd.PrintErrf("Invalid NNS record: %v\n", err)
return
}
if !bytes.HasSuffix(bs, []byte(zone)) || bytes.HasPrefix(bs, []byte(morphClient.NNSGroupKeyName)) {
// Related https://github.com/nspcc-dev/neofs-contract/issues/316.
return
}
h, err := nnsResolveHash(inv, nnsHash, string(bs))
if err != nil {
cmd.PrintErrf("Could not resolve name %s: %v\n", string(bs), err)
return
}
infos = append(infos, contractDumpInfo{
hash: h,
name: strings.TrimSuffix(string(bs), zone),
})
}
sessionID, iter, err := unwrap.SessionIterator(inv.Call(nnsHash, "tokens"))
if err != nil {
if errors.Is(err, unwrap.ErrNoSessionID) {
items, err := unwrap.Array(inv.CallAndExpandIterator(nnsHash, "tokens", nnsMaxTokens))
if err != nil {
return fmt.Errorf("can't get a list of NNS domains: %w", err)
}
if len(items) == nnsMaxTokens {
cmd.PrintErrln("Provided RPC endpoint doesn't support sessions, some hashes might be lost.")
}
for i := range items {
processItem(items[i])
}
} else {
return err
}
} else {
defer func() {
_ = inv.TerminateSession(sessionID)
}()
items, err := inv.TraverseIterator(sessionID, &iter, nnsMaxTokens)
for err == nil && len(items) != 0 {
for i := range items {
processItem(items[i])
}
items, err = inv.TraverseIterator(sessionID, &iter, nnsMaxTokens)
}
if err != nil {
return fmt.Errorf("error during NNS domains iteration: %w", err)
}
}
fillContractVersion(cmd, c, infos)
printContractInfo(cmd, infos)
return nil
}
func parseContractVersion(item stackitem.Item) string {
bi, err := item.TryInteger()
if err != nil || bi.Sign() == 0 || !bi.IsInt64() {
return "unknown"
}
v := bi.Int64()
major := v / 1_000_000
minor := (v % 1_000_000) / 1000
patch := v % 1_000
return fmt.Sprintf("v%d.%d.%d", major, minor, patch)
}
func printContractInfo(cmd *cobra.Command, infos []contractDumpInfo) {
if len(infos) == 0 {
return
}
buf := bytes.NewBuffer(nil)
tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
for _, info := range infos {
if info.version == "" {
info.version = "unknown"
}
_, _ = tw.Write([]byte(fmt.Sprintf("%s\t(%s):\t%s\n",
info.name, info.version, info.hash.StringLE())))
}
_ = tw.Flush()
cmd.Print(buf.String())
}
func fillContractVersion(cmd *cobra.Command, c Client, infos []contractDumpInfo) {
bw := io.NewBufBinWriter()
sub := io.NewBufBinWriter()
for i := range infos {
if infos[i].hash.Equals(util.Uint160{}) {
emit.Int(bw.BinWriter, 0)
} else {
sub.Reset()
emit.AppCall(sub.BinWriter, infos[i].hash, "version", callflag.NoneFlag)
if sub.Err != nil {
panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err))
}
script := sub.Bytes()
emit.Instruction(bw.BinWriter, opcode.TRY, []byte{byte(3 + len(script) + 2), 0})
bw.BinWriter.WriteBytes(script)
emit.Instruction(bw.BinWriter, opcode.ENDTRY, []byte{2 + 1})
emit.Opcodes(bw.BinWriter, opcode.PUSH0)
}
}
emit.Opcodes(bw.BinWriter, opcode.NOP) // for the last ENDTRY target
if bw.Err != nil {
panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err))
}
res, err := c.InvokeScript(bw.Bytes(), nil)
if err != nil {
cmd.Printf("Can't fetch version from NNS: %v\n", err)
return
}
if res.State == vmstate.Halt.String() {
for i := range res.Stack {
infos[i].version = parseContractVersion(res.Stack[i])
}
}
}

View file

@ -1,298 +0,0 @@
package morph
import (
"encoding/hex"
"errors"
"fmt"
"strconv"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/emit"
"github.com/nspcc-dev/neo-go/pkg/vm/opcode"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
)
const defaultExpirationTime = 10 * 365 * 24 * time.Hour / time.Second
func (c *initializeContext) setNNS() error {
nnsCs, err := c.Client.GetContractStateByID(1)
if err != nil {
return err
}
ok, err := c.nnsRootRegistered(nnsCs.Hash, "frostfs")
if err != nil {
return err
} else if !ok {
bw := io.NewBufBinWriter()
emit.AppCall(bw.BinWriter, nnsCs.Hash, "register", callflag.All,
"frostfs", c.CommitteeAcc.Contract.ScriptHash(),
"ops@nspcc.ru", int64(3600), int64(600), int64(defaultExpirationTime), int64(3600))
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
if err := c.sendCommitteeTx(bw.Bytes(), true); err != nil {
return fmt.Errorf("can't add domain root to NNS: %w", err)
}
if err := c.awaitTx(); err != nil {
return err
}
}
alphaCs := c.getContract(alphabetContract)
for i, acc := range c.Accounts {
alphaCs.Hash = state.CreateContractHash(acc.Contract.ScriptHash(), alphaCs.NEF.Checksum, alphaCs.Manifest.Name)
domain := getAlphabetNNSDomain(i)
if err := c.nnsRegisterDomain(nnsCs.Hash, alphaCs.Hash, domain); err != nil {
return err
}
c.Command.Printf("NNS: Set %s -> %s\n", domain, alphaCs.Hash.StringLE())
}
for _, ctrName := range contractList {
cs := c.getContract(ctrName)
domain := ctrName + ".frostfs"
if err := c.nnsRegisterDomain(nnsCs.Hash, cs.Hash, domain); err != nil {
return err
}
c.Command.Printf("NNS: Set %s -> %s\n", domain, cs.Hash.StringLE())
}
groupKey := c.ContractWallet.Accounts[0].PrivateKey().PublicKey()
err = c.updateNNSGroup(nnsCs.Hash, groupKey)
if err != nil {
return err
}
c.Command.Printf("NNS: Set %s -> %s\n", morphClient.NNSGroupKeyName, hex.EncodeToString(groupKey.Bytes()))
return c.awaitTx()
}
func (c *initializeContext) updateNNSGroup(nnsHash util.Uint160, pub *keys.PublicKey) error {
bw := io.NewBufBinWriter()
keyAlreadyAdded, domainRegCodeEmitted, err := c.emitUpdateNNSGroupScript(bw, nnsHash, pub)
if keyAlreadyAdded || err != nil {
return err
}
script := bw.Bytes()
if domainRegCodeEmitted {
w := io.NewBufBinWriter()
emit.Instruction(w.BinWriter, opcode.INITSSLOT, []byte{1})
wrapRegisterScriptWithPrice(w, nnsHash, script)
script = w.Bytes()
}
return c.sendCommitteeTx(script, true)
}
// emitUpdateNNSGroupScript emits script for updating group key stored in NNS.
// First return value is true iff the key is already there and nothing should be done.
// Second return value is true iff a domain registration code was emitted.
func (c *initializeContext) emitUpdateNNSGroupScript(bw *io.BufBinWriter, nnsHash util.Uint160, pub *keys.PublicKey) (bool, bool, error) {
isAvail, err := nnsIsAvailable(c.Client, nnsHash, morphClient.NNSGroupKeyName)
if err != nil {
return false, false, err
}
if !isAvail {
currentPub, err := nnsResolveKey(c.ReadOnlyInvoker, nnsHash, morphClient.NNSGroupKeyName)
if err != nil {
return false, false, err
}
if pub.Equal(currentPub) {
return true, false, nil
}
}
if isAvail {
emit.AppCall(bw.BinWriter, nnsHash, "register", callflag.All,
morphClient.NNSGroupKeyName, c.CommitteeAcc.Contract.ScriptHash(),
"ops@nspcc.ru", int64(3600), int64(600), int64(defaultExpirationTime), int64(3600))
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
}
emit.AppCall(bw.BinWriter, nnsHash, "deleteRecords", callflag.All, "group.frostfs", int64(nns.TXT))
emit.AppCall(bw.BinWriter, nnsHash, "addRecord", callflag.All,
"group.frostfs", int64(nns.TXT), hex.EncodeToString(pub.Bytes()))
return false, isAvail, nil
}
func getAlphabetNNSDomain(i int) string {
return alphabetContract + strconv.FormatUint(uint64(i), 10) + ".frostfs"
}
// wrapRegisterScriptWithPrice wraps a given script with `getPrice`/`setPrice` calls for NNS.
// It is intended to be used for a single transaction, and not as a part of other scripts.
// It is assumed that script already contains static slot initialization code, the first one
// (with index 0) is used to store the price.
func wrapRegisterScriptWithPrice(w *io.BufBinWriter, nnsHash util.Uint160, s []byte) {
if len(s) == 0 {
return
}
emit.AppCall(w.BinWriter, nnsHash, "getPrice", callflag.All)
emit.Opcodes(w.BinWriter, opcode.STSFLD0)
emit.AppCall(w.BinWriter, nnsHash, "setPrice", callflag.All, 1)
w.WriteBytes(s)
emit.Opcodes(w.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK)
emit.AppCallNoArgs(w.BinWriter, nnsHash, "setPrice", callflag.All)
if w.Err != nil {
panic(fmt.Errorf("BUG: can't wrap register script: %w", w.Err))
}
}
func (c *initializeContext) nnsRegisterDomainScript(nnsHash, expectedHash util.Uint160, domain string) ([]byte, bool, error) {
ok, err := nnsIsAvailable(c.Client, nnsHash, domain)
if err != nil {
return nil, false, err
}
if ok {
bw := io.NewBufBinWriter()
emit.AppCall(bw.BinWriter, nnsHash, "register", callflag.All,
domain, c.CommitteeAcc.Contract.ScriptHash(),
"ops@nspcc.ru", int64(3600), int64(600), int64(defaultExpirationTime), int64(3600))
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
if bw.Err != nil {
panic(bw.Err)
}
return bw.Bytes(), false, nil
}
s, err := nnsResolveHash(c.ReadOnlyInvoker, nnsHash, domain)
if err != nil {
return nil, false, err
}
return nil, s == expectedHash, nil
}
func (c *initializeContext) nnsRegisterDomain(nnsHash, expectedHash util.Uint160, domain string) error {
script, ok, err := c.nnsRegisterDomainScript(nnsHash, expectedHash, domain)
if ok || err != nil {
return err
}
w := io.NewBufBinWriter()
emit.Instruction(w.BinWriter, opcode.INITSSLOT, []byte{1})
wrapRegisterScriptWithPrice(w, nnsHash, script)
emit.AppCall(w.BinWriter, nnsHash, "deleteRecords", callflag.All, domain, int64(nns.TXT))
emit.AppCall(w.BinWriter, nnsHash, "addRecord", callflag.All,
domain, int64(nns.TXT), expectedHash.StringLE())
emit.AppCall(w.BinWriter, nnsHash, "addRecord", callflag.All,
domain, int64(nns.TXT), address.Uint160ToString(expectedHash))
return c.sendCommitteeTx(w.Bytes(), true)
}
func (c *initializeContext) nnsRootRegistered(nnsHash util.Uint160, zone string) (bool, error) {
res, err := c.CommitteeAct.Call(nnsHash, "isAvailable", "name."+zone)
if err != nil {
return false, err
}
return res.State == vmstate.Halt.String(), nil
}
var errMissingNNSRecord = errors.New("missing NNS record")
// Returns errMissingNNSRecord if invocation fault exception contains "token not found".
func nnsResolveHash(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (util.Uint160, error) {
item, err := nnsResolve(inv, nnsHash, domain)
if err != nil {
return util.Uint160{}, err
}
return parseNNSResolveResult(item)
}
func nnsResolve(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (stackitem.Item, error) {
return unwrap.Item(inv.Call(nnsHash, "resolve", domain, int64(nns.TXT)))
}
func nnsResolveKey(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (*keys.PublicKey, error) {
res, err := nnsResolve(inv, nnsHash, domain)
if err != nil {
return nil, err
}
if _, ok := res.Value().(stackitem.Null); ok {
return nil, errors.New("NNS record is missing")
}
arr, ok := res.Value().([]stackitem.Item)
if !ok {
return nil, errors.New("API of the NNS contract method `resolve` has changed")
}
for i := range arr {
var bs []byte
bs, err = arr[i].TryBytes()
if err != nil {
continue
}
return keys.NewPublicKeyFromString(string(bs))
}
return nil, errors.New("no valid keys are found")
}
// parseNNSResolveResult parses the result of resolving NNS record.
// It works with multiple formats (corresponding to multiple NNS versions).
// If array of hashes is provided, it returns only the first one.
func parseNNSResolveResult(res stackitem.Item) (util.Uint160, error) {
arr, ok := res.Value().([]stackitem.Item)
if !ok {
arr = []stackitem.Item{res}
}
if _, ok := res.Value().(stackitem.Null); ok || len(arr) == 0 {
return util.Uint160{}, errors.New("NNS record is missing")
}
for i := range arr {
bs, err := arr[i].TryBytes()
if err != nil {
continue
}
// We support several formats for hash encoding, this logic should be maintained in sync
// with nnsResolve from pkg/morph/client/nns.go
h, err := util.Uint160DecodeStringLE(string(bs))
if err == nil {
return h, nil
}
h, err = address.StringToUint160(string(bs))
if err == nil {
return h, nil
}
}
return util.Uint160{}, errors.New("no valid hashes are found")
}
func nnsIsAvailable(c Client, nnsHash util.Uint160, name string) (bool, error) {
switch ct := c.(type) {
case *rpcclient.Client:
return ct.NNSIsAvailable(nnsHash, name)
default:
b, err := unwrap.Bool(invokeFunction(c, nnsHash, "isAvailable", []any{name}, nil))
if err != nil {
return false, fmt.Errorf("`isAvailable`: invalid response: %w", err)
}
return b, nil
}
}

View file

@ -1,121 +0,0 @@
package morph
import (
"encoding/hex"
"os"
"path/filepath"
"strconv"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
"github.com/nspcc-dev/neo-go/pkg/config"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/vm"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/viper"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
)
const (
contractsPath = "../../../../../../frostfs-contract/frostfs-contract-v0.16.0.tar.gz"
protoFileName = "proto.yml"
)
func TestInitialize(t *testing.T) {
// This test needs frostfs-contract tarball, so it is skipped by default.
// It is here for performing local testing after the changes.
t.Skip()
t.Run("1 nodes", func(t *testing.T) {
testInitialize(t, 1)
})
t.Run("4 nodes", func(t *testing.T) {
testInitialize(t, 4)
})
t.Run("7 nodes", func(t *testing.T) {
testInitialize(t, 7)
})
}
func testInitialize(t *testing.T, committeeSize int) {
testdataDir := t.TempDir()
v := viper.GetViper()
generateTestData(t, testdataDir, committeeSize)
v.Set(protoConfigPath, filepath.Join(testdataDir, protoFileName))
// Set to the path or remove the next statement to download from the network.
require.NoError(t, initCmd.Flags().Set(contractsInitFlag, contractsPath))
v.Set(localDumpFlag, filepath.Join(testdataDir, "out"))
v.Set(alphabetWalletsFlag, testdataDir)
v.Set(epochDurationInitFlag, 1)
v.Set(maxObjectSizeInitFlag, 1024)
setTestCredentials(v, committeeSize)
require.NoError(t, initializeSideChainCmd(initCmd, nil))
t.Run("force-new-epoch", func(t *testing.T) {
require.NoError(t, forceNewEpochCmd(forceNewEpoch, nil))
})
t.Run("set-config", func(t *testing.T) {
require.NoError(t, setConfigCmd(setConfig, []string{"MaintenanceModeAllowed=true"}))
})
t.Run("set-policy", func(t *testing.T) {
require.NoError(t, setPolicyCmd(setPolicy, []string{"ExecFeeFactor=1"}))
})
t.Run("remove-node", func(t *testing.T) {
pk, err := keys.NewPrivateKey()
require.NoError(t, err)
pub := hex.EncodeToString(pk.PublicKey().Bytes())
require.NoError(t, removeNodesCmd(removeNodes, []string{pub}))
})
}
func generateTestData(t *testing.T, dir string, size int) {
v := viper.GetViper()
v.Set(alphabetWalletsFlag, dir)
sizeStr := strconv.FormatUint(uint64(size), 10)
require.NoError(t, generateAlphabetCmd.Flags().Set(alphabetSizeFlag, sizeStr))
setTestCredentials(v, size)
require.NoError(t, generateAlphabetCreds(generateAlphabetCmd, nil))
var pubs []string
for i := 0; i < size; i++ {
p := filepath.Join(dir, innerring.GlagoliticLetter(i).String()+".json")
w, err := wallet.NewWalletFromFile(p)
require.NoError(t, err, "wallet doesn't exist")
for _, acc := range w.Accounts {
if acc.Label == singleAccountName {
pub, ok := vm.ParseSignatureContract(acc.Contract.Script)
require.True(t, ok)
pubs = append(pubs, hex.EncodeToString(pub))
continue
}
}
}
cfg := config.Config{}
cfg.ProtocolConfiguration.Magic = 12345
cfg.ProtocolConfiguration.ValidatorsCount = size
cfg.ProtocolConfiguration.SecondsPerBlock = 1
cfg.ProtocolConfiguration.StandbyCommittee = pubs // sorted by glagolic letters
cfg.ProtocolConfiguration.P2PSigExtensions = true
cfg.ProtocolConfiguration.VerifyTransactions = true
cfg.ProtocolConfiguration.VerifyBlocks = true
data, err := yaml.Marshal(cfg)
require.NoError(t, err)
protoPath := filepath.Join(dir, protoFileName)
require.NoError(t, os.WriteFile(protoPath, data, os.ModePerm))
}
func setTestCredentials(v *viper.Viper, size int) {
for i := 0; i < size; i++ {
v.Set("credentials."+innerring.GlagoliticLetter(i).String(), strconv.FormatUint(uint64(i), 10))
}
v.Set("credentials.contract", testContractPassword)
}

View file

@ -1,29 +0,0 @@
package morph
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
func listNetmapCandidatesNodes(cmd *cobra.Command, _ []string) {
c, err := getN3Client(viper.GetViper())
commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err)
inv := invoker.New(c, nil)
cs, err := c.GetContractStateByID(1)
commonCmd.ExitOnErr(cmd, "can't get NNS contract info: %w", err)
nmHash, err := nnsResolveHash(inv, cs.Hash, netmapContract+".frostfs")
commonCmd.ExitOnErr(cmd, "can't get netmap contract hash: %w", err)
res, err := inv.Call(nmHash, "netmapCandidates")
commonCmd.ExitOnErr(cmd, "can't fetch list of network config keys from the netmap contract", err)
nm, err := netmap.DecodeNetMap(res.Stack)
commonCmd.ExitOnErr(cmd, "unable to decode netmap: %w", err)
commonCmd.PrettyPrintNetMap(cmd, *nm, !viper.GetBool(commonflags.Verbose))
}

View file

@ -1,21 +0,0 @@
package morph
import (
"fmt"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
func updateContracts(cmd *cobra.Command, _ []string) error {
wCtx, err := newInitializeContext(cmd, viper.GetViper())
if err != nil {
return fmt.Errorf("initialization error: %w", err)
}
if err := wCtx.deployNNS(updateMethodName); err != nil {
return err
}
return wCtx.updateContracts()
}

View file

@ -1,85 +0,0 @@
package modules
import (
"os"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/storagecfg"
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/autocomplete"
utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/gendoc"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var (
rootCmd = &cobra.Command{
Use: "frostfs-adm",
Short: "FrostFS Administrative Tool",
Long: `FrostFS Administrative Tool provides functions to setup and
manage FrostFS network deployment.`,
RunE: entryPoint,
SilenceUsage: true,
}
)
func init() {
cobra.OnInitialize(func() { initConfig(rootCmd) })
// we need to init viper config to bind viper and cobra configurations for
// rpc endpoint, alphabet wallet dir, key credentials, etc.
// use stdout as default output for cmd.Print()
rootCmd.SetOut(os.Stdout)
rootCmd.PersistentFlags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
rootCmd.PersistentFlags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
rootCmd.PersistentFlags().BoolP(commonflags.Verbose, commonflags.VerboseShorthand, false, commonflags.VerboseUsage)
_ = viper.BindPFlag(commonflags.Verbose, rootCmd.PersistentFlags().Lookup(commonflags.Verbose))
rootCmd.Flags().Bool("version", false, "Application version")
rootCmd.AddCommand(config.RootCmd)
rootCmd.AddCommand(morph.RootCmd)
rootCmd.AddCommand(storagecfg.RootCmd)
rootCmd.AddCommand(autocomplete.Command("frostfs-adm"))
rootCmd.AddCommand(gendoc.Command(rootCmd))
}
func Execute() error {
return rootCmd.Execute()
}
func entryPoint(cmd *cobra.Command, args []string) error {
printVersion, _ := cmd.Flags().GetBool("version")
if printVersion {
cmd.Print(misc.BuildInfo("FrostFS Adm"))
return nil
}
return cmd.Usage()
}
func initConfig(cmd *cobra.Command) {
configFile, err := cmd.Flags().GetString(commonflags.ConfigFlag)
if err != nil {
return
}
if configFile != "" {
viper.SetConfigType("yml")
viper.SetConfigFile(configFile)
_ = viper.ReadInConfig() // if config file is set but unavailable, ignore it
}
configDir, err := cmd.Flags().GetString(commonflags.ConfigDirFlag)
if err != nil {
return
}
if configDir != "" {
_ = utilConfig.ReadConfigDir(viper.GetViper(), configDir) // if config files cannot be read, ignore it
}
}

View file

@ -1,75 +0,0 @@
# How FrostFS CLI uses session mechanism of the FrostFS
## Overview
FrostFS sessions implement a mechanism for issuing a power of attorney by one
party to another. A trusted party can provide a so-called session token as
proof of the right to act on behalf of another member of the network. The
client of operations carried out with such a token will be the user who opened
the session. The token contains information which limits power of attorney like
action context or lifetime.
The client confirms trust in a third party by signing its public (session) key
with his private key. Any operation signed using private session key with
attached session token is treated as performed by the original client.
## Types
FrostFS CLI supports two ways to execute operation within a session depending on
whether the user of the command application is an original user (1) or a trusted
one (2).
### Dynamic
For case (1) CLI user can only open dynamic sessions. Protocol call
`SessionService.Create` is used for this purpose. As a result of the call, a
private session key will be generated on the server, thus making the remote
server trusted. This type of session is useful when the client needs to
transfer part of the responsibility for the formation of strict system elements
to the trusted server. At the moment, the approach is applicable only to
creating objects.
```shell
$ frostfs-cli session create --rpc-endpoint <server_ip> --out ./blank_token
```
After this example command remote node holds session private key while its
public part is written into the session token encoded into the output file.
Later this token can be attached to the operations which support dynamic
sessions. Then the token will be finally formed and signed by CLI itself.
### Static
For case (2) CLI user can act on behalf of the person who issued the session
token to him. Unlike (1) the token must be fully prepared on the side of the
original client, and the CLI uses it only for reading. Ready token MUST have:
- correct context (object, container, etc.)
- valid lifetime
- public session key corresponding to the CLI key
- valid client signature
To sign the session token, exec:
```shell
$ frostfs-cli --wallet <client_wallet> util sign session-token --from ./blank_token --to ./token
```
Once the token is signed, it MUST NOT be modified.
## Commands
### Object
Here are sub-commands of `object` command which support only dynamic sessions (1):
- `put`
- `delete`
- `lock`
These commands accept blank token of the dynamically opened session or open
session internally if it has not been opened yet.
All other `object` sub-commands support only static sessions (2).
### Container
List of commands supporting sessions (static only):
- `create`
- `delete`
- `set-eacl`

View file

@ -1,34 +0,0 @@
# Extended headers
## Overview
Extended headers are used for request/response. They may contain any
user-defined headers to be interpreted on application level. Key name must be a
unique valid UTF-8 string. Value can't be empty. Requests or Responses with
duplicated header names or headers with empty values are considered invalid.
## Existing headers
There are some "well-known" headers starting with `__FROSTFS__` prefix that
affect system behaviour. For backward compatibility, the same set of
"well-known" headers may also use `__NEOFS__` prefix:
* `__FROSTFS__NETMAP_EPOCH` - netmap epoch to use for object placement calculation. The `value` is string
encoded `uint64` in decimal presentation. If set to '0' or omitted, the
current epoch only will be used.
* `__FROSTFS__NETMAP_LOOKUP_DEPTH` - if object can't be found using current epoch's netmap, this header limits
how many past epochs the node can look up through. Depth is applied to a current epoch or the value
of `__FROSTFS__NETMAP_EPOCH` attribute. The `value` is string encoded `uint64` in decimal presentation.
If set to '0' or not set, only the current epoch is used.
## `frostfs-cli` commands with `--xhdr`
List of commands with support of extended headers:
* `container list-objects`
* `object delete/get/hash/head/lock/put/range/search`
* `storagegroup delete/get/list/put`
Example:
```shell
$ frostfs-cli object put -r s01.frostfs.devenv:8080 -w wallet.json --cid CID --file FILE --xhdr "__FROSTFS__NETMAP_EPOCH=777"
```

View file

@ -1,15 +0,0 @@
// Package internal provides functionality for FrostFS CLI application
// communication with FrostFS network.
//
// The base client for accessing remote nodes via FrostFS API is a FrostFS SDK
// Go API client. However, although it encapsulates a useful piece of business
// logic (e.g. the signature mechanism), the FrostFS CLI application does not
// fully use the client's flexible interface.
//
// In this regard, this package provides functions over base API client
// necessary for the application. This allows you to concentrate the entire
// spectrum of the client's use in one place (this will be convenient both when
// updating the base client and for evaluating the UX of SDK library). So it is
// expected that all application packages will be limited to this package for
// the development of functionality requiring FrostFS API communication.
package internal

View file

@ -1,67 +0,0 @@
package common
import (
"encoding/json"
"errors"
"fmt"
"os"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"github.com/spf13/cobra"
)
// ReadBearerToken reads bearer token from the path provided in a specified flag.
func ReadBearerToken(cmd *cobra.Command, flagname string) *bearer.Token {
path, err := cmd.Flags().GetString(flagname)
commonCmd.ExitOnErr(cmd, "", err)
if len(path) == 0 {
return nil
}
PrintVerbose(cmd, "Reading bearer token from file [%s]...", path)
var tok bearer.Token
err = ReadBinaryOrJSON(cmd, &tok, path)
commonCmd.ExitOnErr(cmd, "invalid bearer token: %v", err)
return &tok
}
// BinaryOrJSON is an interface of entities which provide json.Unmarshaler
// and FrostFS binary decoder.
type BinaryOrJSON interface {
Unmarshal([]byte) error
json.Unmarshaler
}
// ReadBinaryOrJSON reads file data using provided path and decodes
// BinaryOrJSON from the data.
func ReadBinaryOrJSON(cmd *cobra.Command, dst BinaryOrJSON, fPath string) error {
PrintVerbose(cmd, "Reading file [%s]...", fPath)
// try to read session token from file
data, err := os.ReadFile(fPath)
if err != nil {
return fmt.Errorf("read file <%s>: %w", fPath, err)
}
PrintVerbose(cmd, "Trying to decode binary...")
err = dst.Unmarshal(data)
if err != nil {
PrintVerbose(cmd, "Failed to decode binary: %v", err)
PrintVerbose(cmd, "Trying to decode JSON...")
err = dst.UnmarshalJSON(data)
if err != nil {
PrintVerbose(cmd, "Failed to decode JSON: %v", err)
return errors.New("invalid format")
}
}
return nil
}

View file

@ -1,19 +0,0 @@
package commonflags
import (
"fmt"
"github.com/spf13/cobra"
)
const SessionToken = "session"
// InitSession registers SessionToken flag representing file path to the token of
// the session with the given name. Supports FrostFS-binary and JSON files.
func InitSession(cmd *cobra.Command, name string) {
cmd.Flags().String(
SessionToken,
"",
fmt.Sprintf("Filepath to a JSON- or binary-encoded token of the %s session", name),
)
}

View file

@ -1,62 +0,0 @@
package key
import (
"crypto/ecdsa"
"errors"
"fmt"
"os"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var errCantGenerateKey = errors.New("can't generate new private key")
// Get returns private key from wallet or binary file.
// Ideally we want to touch file-system on the last step.
// This function assumes that all flags were bind to viper in a `PersistentPreRun`.
func Get(cmd *cobra.Command) *ecdsa.PrivateKey {
pk, err := get(cmd)
commonCmd.ExitOnErr(cmd, "can't fetch private key: %w", err)
return pk
}
func get(cmd *cobra.Command) (*ecdsa.PrivateKey, error) {
keyDesc := viper.GetString(commonflags.WalletPath)
data, err := os.ReadFile(keyDesc)
if err != nil {
return nil, fmt.Errorf("%w: %v", ErrFs, err)
}
priv, err := keys.NewPrivateKeyFromBytes(data)
if err != nil {
w, err := wallet.NewWalletFromFile(keyDesc)
if err == nil {
return FromWallet(cmd, w, viper.GetString(commonflags.Account))
}
return nil, fmt.Errorf("%w: %v", ErrInvalidKey, err)
}
return &priv.PrivateKey, nil
}
// GetOrGenerate is similar to get but generates a new key if commonflags.GenerateKey is set.
func GetOrGenerate(cmd *cobra.Command) *ecdsa.PrivateKey {
pk, err := getOrGenerate(cmd)
commonCmd.ExitOnErr(cmd, "can't fetch private key: %w", err)
return pk
}
func getOrGenerate(cmd *cobra.Command) (*ecdsa.PrivateKey, error) {
if viper.GetBool(commonflags.GenerateKey) {
priv, err := keys.NewPrivateKey()
if err != nil {
return nil, fmt.Errorf("%w: %v", errCantGenerateKey, err)
}
return &priv.PrivateKey, nil
}
return get(cmd)
}

View file

@ -1,7 +0,0 @@
package main
import cmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules"
func main() {
cmd.Execute()
}

View file

@ -1,28 +0,0 @@
package basic
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
"github.com/spf13/cobra"
)
var printACLCmd = &cobra.Command{
Use: "print",
Short: "Pretty print basic ACL from the HEX representation",
Example: `frostfs-cli acl basic print 0x1C8C8CCC`,
Long: `Pretty print basic ACL from the HEX representation.
Few roles have exclusive default access to set of operation, even if particular bit deny it.
Container have access to the operations of the data replication mechanism:
Get, Head, Put, Search, Hash.
InnerRing members are allowed to data audit ops only:
Get, Head, Hash, Search.`,
Run: printACL,
Args: cobra.ExactArgs(1),
}
func printACL(cmd *cobra.Command, args []string) {
var bacl acl.Basic
commonCmd.ExitOnErr(cmd, "unable to parse basic acl: %w", bacl.DecodeString(args[0]))
util.PrettyPrintTableBACL(cmd, &bacl)
}

View file

@ -1,14 +0,0 @@
package basic
import (
"github.com/spf13/cobra"
)
var Cmd = &cobra.Command{
Use: "basic",
Short: "Operations with Basic Access Control Lists",
}
func init() {
Cmd.AddCommand(printACLCmd)
}

View file

@ -1,127 +0,0 @@
package extended
import (
"bytes"
"encoding/json"
"os"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"github.com/spf13/cobra"
)
var createCmd = &cobra.Command{
Use: "create",
Short: "Create extended ACL from the text representation",
Long: `Create extended ACL from the text representation.
Rule consist of these blocks: <action> <operation> [<filter1> ...] [<target1> ...]
Action is 'allow' or 'deny'.
Operation is an object service verb: 'get', 'head', 'put', 'search', 'delete', 'getrange', or 'getrangehash'.
Filter consists of <typ>:<key><match><value>
Typ is 'obj' for object applied filter or 'req' for request applied filter.
Key is a valid unicode string corresponding to object or request header key.
Well-known system object headers start with '$Object:' prefix.
User defined headers start without prefix.
Read more about filter keys at git.frostfs.info.com/TrueCloudLab/frostfs-api/src/branch/master/proto-docs/acl.md#message-eaclrecordfilter
Match is '=' for matching and '!=' for non-matching filter.
Value is a valid unicode string corresponding to object or request header value.
Target is
'user' for container owner,
'system' for Storage nodes in container and Inner Ring nodes,
'others' for all other request senders,
'pubkey:<key1>,<key2>,...' for exact request sender, where <key> is a hex-encoded 33-byte public key.
When both '--rule' and '--file' arguments are used, '--rule' records will be placed higher in resulting extended ACL table.
`,
Example: `frostfs-cli acl extended create --cid EutHBsdT1YCzHxjCfQHnLPL1vFrkSyLSio4vkphfnEk -f rules.txt --out table.json
frostfs-cli acl extended create --cid EutHBsdT1YCzHxjCfQHnLPL1vFrkSyLSio4vkphfnEk -r 'allow get obj:Key=Value others' -r 'deny put others'`,
Run: createEACL,
}
func init() {
createCmd.Flags().StringArrayP("rule", "r", nil, "Extended ACL table record to apply")
createCmd.Flags().StringP("file", "f", "", "Read list of extended ACL table records from text file")
createCmd.Flags().StringP("out", "o", "", "Save JSON formatted extended ACL table in file")
createCmd.Flags().StringP(commonflags.CIDFlag, "", "", commonflags.CIDFlagUsage)
_ = cobra.MarkFlagFilename(createCmd.Flags(), "file")
_ = cobra.MarkFlagFilename(createCmd.Flags(), "out")
}
func createEACL(cmd *cobra.Command, _ []string) {
rules, _ := cmd.Flags().GetStringArray("rule")
fileArg, _ := cmd.Flags().GetString("file")
outArg, _ := cmd.Flags().GetString("out")
cidArg, _ := cmd.Flags().GetString(commonflags.CIDFlag)
var containerID cid.ID
if cidArg != "" {
if err := containerID.DecodeString(cidArg); err != nil {
cmd.PrintErrf("invalid container ID: %v\n", err)
os.Exit(1)
}
}
rulesFile, err := getRulesFromFile(fileArg)
if err != nil {
cmd.PrintErrf("can't read rules from file: %v\n", err)
os.Exit(1)
}
rules = append(rules, rulesFile...)
if len(rules) == 0 {
cmd.PrintErrln("no extended ACL rules has been provided")
os.Exit(1)
}
tb := eacl.NewTable()
commonCmd.ExitOnErr(cmd, "unable to parse provided rules: %w", util.ParseEACLRules(tb, rules))
tb.SetCID(containerID)
data, err := tb.MarshalJSON()
if err != nil {
cmd.PrintErrln(err)
os.Exit(1)
}
buf := new(bytes.Buffer)
err = json.Indent(buf, data, "", " ")
if err != nil {
cmd.PrintErrln(err)
os.Exit(1)
}
if len(outArg) == 0 {
cmd.Println(buf)
return
}
err = os.WriteFile(outArg, buf.Bytes(), 0644)
if err != nil {
cmd.PrintErrln(err)
os.Exit(1)
}
}
func getRulesFromFile(filename string) ([]string, error) {
if len(filename) == 0 {
return nil, nil
}
data, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
return strings.Split(strings.TrimSpace(string(data)), "\n"), nil
}

View file

@ -1,38 +0,0 @@
package extended
import (
"os"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"github.com/spf13/cobra"
)
var printEACLCmd = &cobra.Command{
Use: "print",
Short: "Pretty print extended ACL from the file(in text or json format) or for given container.",
Run: printEACL,
}
func init() {
flags := printEACLCmd.Flags()
flags.StringP("file", "f", "",
"Read list of extended ACL table records from text or json file")
_ = printEACLCmd.MarkFlagRequired("file")
}
func printEACL(cmd *cobra.Command, _ []string) {
file, _ := cmd.Flags().GetString("file")
eaclTable := new(eacl.Table)
data, err := os.ReadFile(file)
commonCmd.ExitOnErr(cmd, "can't read file with EACL: %w", err)
if strings.HasSuffix(file, ".json") {
commonCmd.ExitOnErr(cmd, "unable to parse json: %w", eaclTable.UnmarshalJSON(data))
} else {
rules := strings.Split(strings.TrimSpace(string(data)), "\n")
commonCmd.ExitOnErr(cmd, "can't parse file with EACL: %w", util.ParseEACLRules(eaclTable, rules))
}
util.PrettyPrintTableEACL(cmd, eaclTable)
}

View file

@ -1,17 +0,0 @@
package acl
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/acl/basic"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/acl/extended"
"github.com/spf13/cobra"
)
var Cmd = &cobra.Command{
Use: "acl",
Short: "Operations with Access Control Lists",
}
func init() {
Cmd.AddCommand(extended.Cmd)
Cmd.AddCommand(basic.Cmd)
}

View file

@ -1,9 +0,0 @@
package cmd
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/autocomplete"
)
func init() {
rootCmd.AddCommand(autocomplete.Command("frostfs-cli"))
}

View file

@ -1,229 +0,0 @@
package container
import (
"errors"
"fmt"
"os"
"strings"
"time"
containerApi "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
subnetid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/spf13/cobra"
)
var (
containerACL string
containerPolicy string
containerAttributes []string
containerAwait bool
containerName string
containerNnsName string
containerNnsZone string
containerNoTimestamp bool
containerSubnet string
force bool
)
var createContainerCmd = &cobra.Command{
Use: "create",
Short: "Create new container",
Long: `Create new container and register it in the FrostFS.
It will be stored in sidechain when inner ring will accepts it.`,
Run: func(cmd *cobra.Command, args []string) {
placementPolicy, err := parseContainerPolicy(cmd, containerPolicy)
commonCmd.ExitOnErr(cmd, "", err)
key := key.Get(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, key, commonflags.RPC)
if !force {
var prm internalclient.NetMapSnapshotPrm
prm.SetClient(cli)
resmap, err := internalclient.NetMapSnapshot(prm)
commonCmd.ExitOnErr(cmd, "unable to get netmap snapshot to validate container placement, "+
"use --force option to skip this check: %w", err)
nodesByRep, err := resmap.NetMap().ContainerNodes(*placementPolicy, nil)
commonCmd.ExitOnErr(cmd, "could not build container nodes based on given placement policy, "+
"use --force option to skip this check: %w", err)
for i, nodes := range nodesByRep {
if placementPolicy.ReplicaNumberByIndex(i) > uint32(len(nodes)) {
commonCmd.ExitOnErr(cmd, "", fmt.Errorf(
"the number of nodes '%d' in selector is not enough for the number of replicas '%d', "+
"use --force option to skip this check",
len(nodes),
placementPolicy.ReplicaNumberByIndex(i),
))
}
}
}
if containerSubnet != "" {
var subnetID subnetid.ID
err = subnetID.DecodeString(containerSubnet)
commonCmd.ExitOnErr(cmd, "could not parse subnetID: %w", err)
placementPolicy.RestrictSubnet(subnetID)
}
var cnr container.Container
cnr.Init()
err = parseAttributes(&cnr, containerAttributes)
commonCmd.ExitOnErr(cmd, "", err)
var basicACL acl.Basic
commonCmd.ExitOnErr(cmd, "decode basic ACL string: %w", basicACL.DecodeString(containerACL))
tok := getSession(cmd)
if tok != nil {
issuer := tok.Issuer()
cnr.SetOwner(issuer)
} else {
var idOwner user.ID
user.IDFromKey(&idOwner, key.PublicKey)
cnr.SetOwner(idOwner)
}
cnr.SetPlacementPolicy(*placementPolicy)
cnr.SetBasicACL(basicACL)
var syncContainerPrm internalclient.SyncContainerPrm
syncContainerPrm.SetClient(cli)
syncContainerPrm.SetContainer(&cnr)
_, err = internalclient.SyncContainerSettings(syncContainerPrm)
commonCmd.ExitOnErr(cmd, "syncing container's settings rpc error: %w", err)
var putPrm internalclient.PutContainerPrm
putPrm.SetClient(cli)
putPrm.SetContainer(cnr)
if tok != nil {
putPrm.WithinSession(*tok)
}
res, err := internalclient.PutContainer(putPrm)
commonCmd.ExitOnErr(cmd, "put container rpc error: %w", err)
id := res.ID()
cmd.Println("container ID:", id)
if containerAwait {
cmd.Println("awaiting...")
var getPrm internalclient.GetContainerPrm
getPrm.SetClient(cli)
getPrm.SetContainer(id)
for i := 0; i < awaitTimeout; i++ {
time.Sleep(1 * time.Second)
_, err := internalclient.GetContainer(getPrm)
if err == nil {
cmd.Println("container has been persisted on sidechain")
return
}
}
commonCmd.ExitOnErr(cmd, "", errCreateTimeout)
}
},
}
func initContainerCreateCmd() {
flags := createContainerCmd.Flags()
// Init common flags
flags.StringP(commonflags.RPC, commonflags.RPCShorthand, commonflags.RPCDefault, commonflags.RPCUsage)
flags.DurationP(commonflags.Timeout, commonflags.TimeoutShorthand, commonflags.TimeoutDefault, commonflags.TimeoutUsage)
flags.StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, commonflags.WalletPathDefault, commonflags.WalletPathUsage)
flags.StringP(commonflags.Account, commonflags.AccountShorthand, commonflags.AccountDefault, commonflags.AccountUsage)
flags.StringVar(&containerACL, "basic-acl", acl.NamePrivate, fmt.Sprintf("HEX encoded basic ACL value or keywords like '%s', '%s', '%s'",
acl.NamePublicRW, acl.NamePrivate, acl.NamePublicROExtended,
))
flags.StringVarP(&containerPolicy, "policy", "p", "", "QL-encoded or JSON-encoded placement policy or path to file with it")
flags.StringSliceVarP(&containerAttributes, "attributes", "a", nil, "Comma separated pairs of container attributes in form of Key1=Value1,Key2=Value2")
flags.BoolVar(&containerAwait, "await", false, "Block execution until container is persisted")
flags.StringVar(&containerName, "name", "", "Container name attribute")
flags.StringVar(&containerNnsName, "nns-name", "", "Container nns name attribute")
flags.StringVar(&containerNnsZone, "nns-zone", "", "Container nns zone attribute")
flags.BoolVar(&containerNoTimestamp, "disable-timestamp", false, "Disable timestamp container attribute")
flags.StringVar(&containerSubnet, "subnet", "", "String representation of container subnetwork")
flags.BoolVarP(&force, commonflags.ForceFlag, commonflags.ForceFlagShorthand, false,
"Skip placement validity check")
}
func parseContainerPolicy(cmd *cobra.Command, policyString string) (*netmap.PlacementPolicy, error) {
_, err := os.Stat(policyString) // check if `policyString` is a path to file with placement policy
if err == nil {
common.PrintVerbose(cmd, "Reading placement policy from file: %s", policyString)
data, err := os.ReadFile(policyString)
if err != nil {
return nil, fmt.Errorf("can't read file with placement policy: %w", err)
}
policyString = string(data)
}
var result netmap.PlacementPolicy
err = result.DecodeString(policyString)
if err == nil {
common.PrintVerbose(cmd, "Parsed QL encoded policy")
return &result, nil
}
if err = result.UnmarshalJSON([]byte(policyString)); err == nil {
common.PrintVerbose(cmd, "Parsed JSON encoded policy")
return &result, nil
}
return nil, errors.New("can't parse placement policy")
}
func parseAttributes(dst *container.Container, attributes []string) error {
for i := range attributes {
k, v, found := strings.Cut(attributes[i], attributeDelimiter)
if !found {
return errors.New("invalid container attribute")
}
dst.SetAttribute(k, v)
}
if !containerNoTimestamp {
container.SetCreationTime(dst, time.Now())
}
if containerName != "" {
container.SetName(dst, containerName)
}
if containerNnsName != "" {
dst.SetAttribute(containerApi.SysAttributeName, containerNnsName)
}
if containerNnsZone != "" {
dst.SetAttribute(containerApi.SysAttributeZone, containerNnsZone)
}
return nil
}

View file

@ -1,131 +0,0 @@
package container
import (
"fmt"
"time"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/spf13/cobra"
)
var deleteContainerCmd = &cobra.Command{
Use: "delete",
Short: "Delete existing container",
Long: `Delete existing container.
Only owner of the container has a permission to remove container.`,
Run: func(cmd *cobra.Command, args []string) {
id := parseContainerID(cmd)
tok := getSession(cmd)
pk := key.Get(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
if force, _ := cmd.Flags().GetBool(commonflags.ForceFlag); !force {
common.PrintVerbose(cmd, "Reading the container to check ownership...")
var getPrm internalclient.GetContainerPrm
getPrm.SetClient(cli)
getPrm.SetContainer(id)
resGet, err := internalclient.GetContainer(getPrm)
commonCmd.ExitOnErr(cmd, "can't get the container: %w", err)
owner := resGet.Container().Owner()
if tok != nil {
common.PrintVerbose(cmd, "Checking session issuer...")
if !tok.Issuer().Equals(owner) {
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("session issuer differs with the container owner: expected %s, has %s", owner, tok.Issuer()))
}
} else {
common.PrintVerbose(cmd, "Checking provided account...")
var acc user.ID
user.IDFromKey(&acc, pk.PublicKey)
if !acc.Equals(owner) {
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("provided account differs with the container owner: expected %s, has %s", owner, acc))
}
}
common.PrintVerbose(cmd, "Account matches the container owner.")
if tok != nil {
common.PrintVerbose(cmd, "Skip searching for LOCK objects - session provided.")
} else {
fs := objectSDK.NewSearchFilters()
fs.AddTypeFilter(objectSDK.MatchStringEqual, objectSDK.TypeLock)
var searchPrm internalclient.SearchObjectsPrm
searchPrm.SetClient(cli)
searchPrm.SetContainerID(id)
searchPrm.SetFilters(fs)
searchPrm.SetTTL(2)
common.PrintVerbose(cmd, "Searching for LOCK objects...")
res, err := internalclient.SearchObjects(searchPrm)
commonCmd.ExitOnErr(cmd, "can't search for LOCK objects: %w", err)
if len(res.IDList()) != 0 {
commonCmd.ExitOnErr(cmd, "",
fmt.Errorf("Container wasn't removed because LOCK objects were found.\n"+
"Use --%s flag to remove anyway.", commonflags.ForceFlag))
}
}
}
var delPrm internalclient.DeleteContainerPrm
delPrm.SetClient(cli)
delPrm.SetContainer(id)
if tok != nil {
delPrm.WithinSession(*tok)
}
_, err := internalclient.DeleteContainer(delPrm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
cmd.Println("container delete method invoked")
if containerAwait {
cmd.Println("awaiting...")
var getPrm internalclient.GetContainerPrm
getPrm.SetClient(cli)
getPrm.SetContainer(id)
for i := 0; i < awaitTimeout; i++ {
time.Sleep(1 * time.Second)
_, err := internalclient.GetContainer(getPrm)
if err != nil {
cmd.Println("container has been removed:", containerID)
return
}
}
commonCmd.ExitOnErr(cmd, "", errDeleteTimeout)
}
},
}
func initContainerDeleteCmd() {
flags := deleteContainerCmd.Flags()
flags.StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, commonflags.WalletPathDefault, commonflags.WalletPathUsage)
flags.StringP(commonflags.Account, commonflags.AccountShorthand, commonflags.AccountDefault, commonflags.AccountUsage)
flags.StringP(commonflags.RPC, commonflags.RPCShorthand, commonflags.RPCDefault, commonflags.RPCUsage)
flags.StringVar(&containerID, commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
flags.BoolVar(&containerAwait, "await", false, "Block execution until container is removed")
flags.BoolP(commonflags.ForceFlag, commonflags.ForceFlagShorthand, false, "Skip validation checks (ownership, presence of LOCK objects)")
}

View file

@ -1,64 +0,0 @@
package container
import (
"crypto/sha256"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
containerAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/spf13/cobra"
)
var short bool
var containerNodesCmd = &cobra.Command{
Use: "nodes",
Short: "Show nodes for container",
Long: "Show nodes taking part in a container at the current epoch.",
Run: func(cmd *cobra.Command, args []string) {
var cnr, pkey = getContainer(cmd)
if pkey == nil {
pkey = key.GetOrGenerate(cmd)
}
cli := internalclient.GetSDKClientByFlag(cmd, pkey, commonflags.RPC)
var prm internalclient.NetMapSnapshotPrm
prm.SetClient(cli)
resmap, err := internalclient.NetMapSnapshot(prm)
commonCmd.ExitOnErr(cmd, "unable to get netmap snapshot", err)
var id cid.ID
containerAPI.CalculateID(&id, cnr)
binCnr := make([]byte, sha256.Size)
id.Encode(binCnr)
policy := cnr.PlacementPolicy()
var cnrNodes [][]netmap.NodeInfo
cnrNodes, err = resmap.NetMap().ContainerNodes(policy, binCnr)
commonCmd.ExitOnErr(cmd, "could not build container nodes for given container: %w", err)
for i := range cnrNodes {
cmd.Printf("Descriptor #%d, REP %d:\n", i+1, policy.ReplicaNumberByIndex(i))
for j := range cnrNodes[i] {
commonCmd.PrettyPrintNodeInfo(cmd, cnrNodes[i][j], j, "\t", short)
}
}
},
}
func initContainerNodesCmd() {
commonflags.Init(containerNodesCmd)
flags := containerNodesCmd.Flags()
flags.StringVar(&containerID, commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
flags.StringVar(&containerPathFrom, fromFlag, "", fromFlagUsage)
flags.BoolVar(&short, "short", false, "Shortens output of node info")
}

View file

@ -1,104 +0,0 @@
package container
import (
"bytes"
"errors"
"time"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"github.com/spf13/cobra"
)
var flagVarsSetEACL struct {
noPreCheck bool
srcPath string
}
var setExtendedACLCmd = &cobra.Command{
Use: "set-eacl",
Short: "Set new extended ACL table for container",
Long: `Set new extended ACL table for container.
Container ID in EACL table will be substituted with ID from the CLI.`,
Run: func(cmd *cobra.Command, args []string) {
id := parseContainerID(cmd)
eaclTable := common.ReadEACL(cmd, flagVarsSetEACL.srcPath)
tok := getSession(cmd)
eaclTable.SetCID(id)
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
if !flagVarsSetEACL.noPreCheck {
cmd.Println("Checking the ability to modify access rights in the container...")
extendable, err := internalclient.IsACLExtendable(cli, id)
commonCmd.ExitOnErr(cmd, "Extensibility check failure: %w", err)
if !extendable {
commonCmd.ExitOnErr(cmd, "", errors.New("container ACL is immutable"))
}
cmd.Println("ACL extension is enabled in the container, continue processing.")
}
var setEACLPrm internalclient.SetEACLPrm
setEACLPrm.SetClient(cli)
setEACLPrm.SetTable(*eaclTable)
if tok != nil {
setEACLPrm.WithinSession(*tok)
}
_, err := internalclient.SetEACL(setEACLPrm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
if containerAwait {
exp, err := eaclTable.Marshal()
commonCmd.ExitOnErr(cmd, "broken EACL table: %w", err)
cmd.Println("awaiting...")
var getEACLPrm internalclient.EACLPrm
getEACLPrm.SetClient(cli)
getEACLPrm.SetContainer(id)
for i := 0; i < awaitTimeout; i++ {
time.Sleep(1 * time.Second)
res, err := internalclient.EACL(getEACLPrm)
if err == nil {
// compare binary values because EACL could have been set already
table := res.EACL()
got, err := table.Marshal()
if err != nil {
continue
}
if bytes.Equal(exp, got) {
cmd.Println("EACL has been persisted on sidechain")
return
}
}
}
commonCmd.ExitOnErr(cmd, "", errSetEACLTimeout)
}
},
}
func initContainerSetEACLCmd() {
commonflags.Init(setExtendedACLCmd)
flags := setExtendedACLCmd.Flags()
flags.StringVar(&containerID, commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
flags.StringVar(&flagVarsSetEACL.srcPath, "table", "", "path to file with JSON or binary encoded EACL table")
flags.BoolVar(&containerAwait, "await", false, "block execution until EACL is persisted")
flags.BoolVar(&flagVarsSetEACL.noPreCheck, "no-precheck", false, "do not pre-check the extensibility of the container ACL")
}

View file

@ -1,58 +0,0 @@
package container
import (
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"github.com/spf13/cobra"
)
const (
attributeDelimiter = "="
awaitTimeout = 120 // in seconds
)
var (
errCreateTimeout = errors.New("timeout: container has not been persisted on sidechain")
errDeleteTimeout = errors.New("timeout: container has not been removed from sidechain")
errSetEACLTimeout = errors.New("timeout: EACL has not been persisted on sidechain")
)
func parseContainerID(cmd *cobra.Command) cid.ID {
if containerID == "" {
commonCmd.ExitOnErr(cmd, "", errors.New("container ID is not set"))
}
var id cid.ID
err := id.DecodeString(containerID)
commonCmd.ExitOnErr(cmd, "can't decode container ID value: %w", err)
return id
}
// decodes session.Container from the file by path provided in
// commonflags.SessionToken flag. Returns nil if the path is not specified.
func getSession(cmd *cobra.Command) *session.Container {
common.PrintVerbose(cmd, "Reading container session...")
path, _ := cmd.Flags().GetString(commonflags.SessionToken)
if path == "" {
common.PrintVerbose(cmd, "Session not provided.")
return nil
}
common.PrintVerbose(cmd, "Reading container session from the file [%s]...", path)
var res session.Container
err := common.ReadBinaryOrJSON(cmd, &res, path)
commonCmd.ExitOnErr(cmd, "read container session: %v", err)
common.PrintVerbose(cmd, "Session successfully read.")
return &res
}

View file

@ -1,53 +0,0 @@
package control
import (
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
"github.com/spf13/cobra"
)
var evacuateShardCmd = &cobra.Command{
Use: "evacuate",
Short: "Evacuate objects from shard",
Long: "Evacuate objects from shard to other shards",
Run: evacuateShard,
}
func evacuateShard(cmd *cobra.Command, _ []string) {
pk := key.Get(cmd)
req := &control.EvacuateShardRequest{Body: new(control.EvacuateShardRequest_Body)}
req.Body.Shard_ID = getShardIDList(cmd)
req.Body.IgnoreErrors, _ = cmd.Flags().GetBool(dumpIgnoreErrorsFlag)
signRequest(cmd, pk, req)
cli := getClient(cmd, pk)
var resp *control.EvacuateShardResponse
var err error
err = cli.ExecRaw(func(client *client.Client) error {
resp, err = control.EvacuateShard(client, req)
return err
})
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
cmd.Printf("Objects moved: %d\n", resp.GetBody().GetCount())
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
cmd.Println("Shard has successfully been evacuated.")
}
func initControlEvacuateShardCmd() {
initControlFlags(evacuateShardCmd)
flags := evacuateShardCmd.Flags()
flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
flags.Bool(shardAllFlag, false, "Process all shards")
flags.Bool(dumpIgnoreErrorsFlag, false, "Skip invalid/unreadable objects")
evacuateShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
}

View file

@ -1,49 +0,0 @@
package control
import (
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
"github.com/spf13/cobra"
)
var flushCacheCmd = &cobra.Command{
Use: "flush-cache",
Short: "Flush objects from the write-cache to the main storage",
Long: "Flush objects from the write-cache to the main storage",
Run: flushCache,
}
func flushCache(cmd *cobra.Command, _ []string) {
pk := key.Get(cmd)
req := &control.FlushCacheRequest{Body: new(control.FlushCacheRequest_Body)}
req.Body.Shard_ID = getShardIDList(cmd)
signRequest(cmd, pk, req)
cli := getClient(cmd, pk)
var resp *control.FlushCacheResponse
var err error
err = cli.ExecRaw(func(client *client.Client) error {
resp, err = control.FlushCache(client, req)
return err
})
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
cmd.Println("Write-cache has been flushed.")
}
func initControlFlushCacheCmd() {
initControlFlags(flushCacheCmd)
ff := flushCacheCmd.Flags()
ff.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
ff.Bool(shardAllFlag, false, "Process all shards")
flushCacheCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
}

View file

@ -1,95 +0,0 @@
package control
import (
"fmt"
rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
"github.com/spf13/cobra"
)
const (
netmapStatusFlag = "status"
netmapStatusOnline = "online"
netmapStatusOffline = "offline"
netmapStatusMaintenance = "maintenance"
)
var setNetmapStatusCmd = &cobra.Command{
Use: "set-status",
Short: "Set status of the storage node in FrostFS network map",
Long: "Set status of the storage node in FrostFS network map",
Run: setNetmapStatus,
}
func initControlSetNetmapStatusCmd() {
initControlFlags(setNetmapStatusCmd)
flags := setNetmapStatusCmd.Flags()
flags.String(netmapStatusFlag, "",
fmt.Sprintf("New netmap status keyword ('%s', '%s', '%s')",
netmapStatusOnline,
netmapStatusOffline,
netmapStatusMaintenance,
),
)
_ = setNetmapStatusCmd.MarkFlagRequired(netmapStatusFlag)
flags.BoolP(commonflags.ForceFlag, commonflags.ForceFlagShorthand, false,
"Force turning to local maintenance")
}
func setNetmapStatus(cmd *cobra.Command, _ []string) {
pk := key.Get(cmd)
body := new(control.SetNetmapStatusRequest_Body)
force, _ := cmd.Flags().GetBool(commonflags.ForceFlag)
printIgnoreForce := func(st control.NetmapStatus) {
if force {
common.PrintVerbose(cmd, "Ignore --%s flag for %s state.", commonflags.ForceFlag, st)
}
}
switch st, _ := cmd.Flags().GetString(netmapStatusFlag); st {
default:
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("unsupported status %s", st))
case netmapStatusOnline:
body.SetStatus(control.NetmapStatus_ONLINE)
printIgnoreForce(control.NetmapStatus_ONLINE)
case netmapStatusOffline:
body.SetStatus(control.NetmapStatus_OFFLINE)
printIgnoreForce(control.NetmapStatus_OFFLINE)
case netmapStatusMaintenance:
body.SetStatus(control.NetmapStatus_MAINTENANCE)
if force {
body.SetForceMaintenance()
common.PrintVerbose(cmd, "Local maintenance will be forced.")
}
}
req := new(control.SetNetmapStatusRequest)
req.SetBody(body)
signRequest(cmd, pk, req)
cli := getClient(cmd, pk)
var resp *control.SetNetmapStatusResponse
var err error
err = cli.ExecRaw(func(client *rawclient.Client) error {
resp, err = control.SetNetmapStatus(client, req)
return err
})
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
cmd.Println("Network status update request successfully sent.")
}

View file

@ -1,178 +0,0 @@
package control
import (
"fmt"
"strings"
rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
"github.com/mr-tron/base58"
"github.com/spf13/cobra"
)
const (
shardModeFlag = "mode"
shardIDFlag = "id"
shardAllFlag = "all"
shardClearErrorsFlag = "clear-errors"
)
// maps string command input to control.ShardMode. To support new mode, it's
// enough to add the map entry. Modes are automatically printed in command help
// messages.
var mShardModes = map[string]struct {
val control.ShardMode
// flag to support shard mode implicitly without help message. The flag is set
// for values which are not expected to be set by users but still supported
// for developers.
unsafe bool
}{
"read-only": {val: control.ShardMode_READ_ONLY},
"read-write": {val: control.ShardMode_READ_WRITE},
"degraded-read-write": {val: control.ShardMode_DEGRADED, unsafe: true},
"degraded-read-only": {val: control.ShardMode_DEGRADED_READ_ONLY},
}
// iterates over string representations of safe supported shard modes. Safe means
// modes which are expected to be used by any user. All other supported modes
// are for developers only.
func iterateSafeShardModes(f func(string)) {
for strMode, mode := range mShardModes {
if !mode.unsafe {
f(strMode)
}
}
}
// looks up for supported control.ShardMode represented by the given string.
// Returns false if no corresponding mode exists.
func lookUpShardModeFromString(str string) (control.ShardMode, bool) {
mode, ok := mShardModes[str]
if !ok {
return control.ShardMode_SHARD_MODE_UNDEFINED, false
}
return mode.val, true
}
// looks up for string representation of supported shard mode. Returns false
// if mode is not supported.
func lookUpShardModeString(m control.ShardMode) (string, bool) {
for strMode, mode := range mShardModes {
if mode.val == m {
return strMode, true
}
}
return "", false
}
var setShardModeCmd = &cobra.Command{
Use: "set-mode",
Short: "Set work mode of the shard",
Long: "Set work mode of the shard",
Run: setShardMode,
}
func initControlSetShardModeCmd() {
initControlFlags(setShardModeCmd)
flags := setShardModeCmd.Flags()
flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
flags.Bool(shardAllFlag, false, "Process all shards")
modes := make([]string, 0)
iterateSafeShardModes(func(strMode string) {
modes = append(modes, "'"+strMode+"'")
})
flags.String(shardModeFlag, "",
fmt.Sprintf("New shard mode (%s)", strings.Join(modes, ", ")),
)
_ = setShardModeCmd.MarkFlagRequired(shardModeFlag)
flags.Bool(shardClearErrorsFlag, false, "Set shard error count to 0")
setShardModeCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
}
func setShardMode(cmd *cobra.Command, _ []string) {
pk := key.Get(cmd)
strMode, _ := cmd.Flags().GetString(shardModeFlag)
mode, ok := lookUpShardModeFromString(strMode)
if !ok {
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("unsupported mode %s", strMode))
}
req := new(control.SetShardModeRequest)
body := new(control.SetShardModeRequest_Body)
req.SetBody(body)
body.SetMode(mode)
body.SetShardIDList(getShardIDList(cmd))
reset, _ := cmd.Flags().GetBool(shardClearErrorsFlag)
body.ClearErrorCounter(reset)
signRequest(cmd, pk, req)
cli := getClient(cmd, pk)
var resp *control.SetShardModeResponse
var err error
err = cli.ExecRaw(func(client *rawclient.Client) error {
resp, err = control.SetShardMode(client, req)
return err
})
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
cmd.Println("Shard mode update request successfully sent.")
}
func getShardID(cmd *cobra.Command) []byte {
sid, _ := cmd.Flags().GetString(shardIDFlag)
raw, err := base58.Decode(sid)
commonCmd.ExitOnErr(cmd, "incorrect shard ID encoding: %w", err)
return raw
}
func getShardIDList(cmd *cobra.Command) [][]byte {
all, _ := cmd.Flags().GetBool(shardAllFlag)
if all {
return nil
}
sidList, _ := cmd.Flags().GetStringSlice(shardIDFlag)
if len(sidList) == 0 {
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("either --%s or --%s flag must be provided", shardIDFlag, shardAllFlag))
}
// We can sort the ID list and perform this check without additional allocations,
// but preserving the user order is a nice thing to have.
// Also, this is a CLI, we don't care too much about this.
seen := make(map[string]struct{})
for i := range sidList {
if _, ok := seen[sidList[i]]; ok {
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("duplicated shard IDs: %s", sidList[i]))
}
seen[sidList[i]] = struct{}{}
}
res := make([][]byte, 0, len(sidList))
for i := range sidList {
raw, err := base58.Decode(sidList[i])
commonCmd.ExitOnErr(cmd, "incorrect shard ID encoding: %w", err)
res = append(res, raw)
}
return res
}

View file

@ -1,59 +0,0 @@
package control
import (
"crypto/ecdsa"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
"github.com/spf13/cobra"
)
func initControlFlags(cmd *cobra.Command) {
ff := cmd.Flags()
ff.StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, commonflags.WalletPathDefault, commonflags.WalletPathUsage)
ff.StringP(commonflags.Account, commonflags.AccountShorthand, commonflags.AccountDefault, commonflags.AccountUsage)
ff.String(controlRPC, controlRPCDefault, controlRPCUsage)
ff.DurationP(commonflags.Timeout, commonflags.TimeoutShorthand, commonflags.TimeoutDefault, commonflags.TimeoutUsage)
}
func signRequest(cmd *cobra.Command, pk *ecdsa.PrivateKey, req controlSvc.SignedMessage) {
err := controlSvc.SignMessage(pk, req)
commonCmd.ExitOnErr(cmd, "could not sign request: %w", err)
}
func verifyResponse(cmd *cobra.Command,
sigControl interface {
GetKey() []byte
GetSign() []byte
},
body interface {
StableMarshal([]byte) []byte
},
) {
if sigControl == nil {
commonCmd.ExitOnErr(cmd, "", errors.New("missing response signature"))
}
// TODO(@cthulhu-rider): #1387 use Signature message from NeoFS API to avoid conversion
var sigV2 refs.Signature
sigV2.SetScheme(refs.ECDSA_SHA512)
sigV2.SetKey(sigControl.GetKey())
sigV2.SetSign(sigControl.GetSign())
var sig frostfscrypto.Signature
commonCmd.ExitOnErr(cmd, "can't read signature: %w", sig.ReadFromV2(sigV2))
if !sig.Verify(body.StableMarshal(nil)) {
commonCmd.ExitOnErr(cmd, "", errors.New("invalid response signature"))
}
}
func getClient(cmd *cobra.Command, pk *ecdsa.PrivateKey) *client.Client {
return internalclient.GetSDKClientByFlag(cmd, pk, controlRPC)
}

View file

@ -1,32 +0,0 @@
package netmap
import (
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"github.com/spf13/cobra"
)
var snapshotCmd = &cobra.Command{
Use: "snapshot",
Short: "Request current local snapshot of the network map",
Long: `Request current local snapshot of the network map`,
Run: func(cmd *cobra.Command, args []string) {
p := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, p, commonflags.RPC)
var prm internalclient.NetMapSnapshotPrm
prm.SetClient(cli)
res, err := internalclient.NetMapSnapshot(prm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
commonCmd.PrettyPrintNetMap(cmd, res.NetMap(), false)
},
}
func initSnapshotCmd() {
commonflags.Init(snapshotCmd)
commonflags.InitAPI(snapshotCmd)
}

View file

@ -1,75 +0,0 @@
package object
import (
"fmt"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
)
var objectDelCmd = &cobra.Command{
Use: "delete",
Aliases: []string{"del"},
Short: "Delete object from FrostFS",
Long: "Delete object from FrostFS",
Run: deleteObject,
}
func initObjectDeleteCmd() {
commonflags.Init(objectDelCmd)
initFlagSession(objectDelCmd, "DELETE")
flags := objectDelCmd.Flags()
flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
flags.Bool(binaryFlag, false, "Deserialize object structure from given file.")
flags.String(fileFlag, "", "File with object payload")
}
func deleteObject(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
var objAddr oid.Address
binary, _ := cmd.Flags().GetBool(binaryFlag)
if binary {
filename, _ := cmd.Flags().GetString(fileFlag)
if filename == "" {
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("required flag \"%s\" not set", fileFlag))
}
objAddr = readObjectAddressBin(cmd, &cnr, &obj, filename)
} else {
cidVal, _ := cmd.Flags().GetString(commonflags.CIDFlag)
if cidVal == "" {
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("required flag \"%s\" not set", commonflags.CIDFlag))
}
oidVal, _ := cmd.Flags().GetString(commonflags.OIDFlag)
if oidVal == "" {
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("required flag \"%s\" not set", commonflags.OIDFlag))
}
objAddr = readObjectAddress(cmd, &cnr, &obj)
}
pk := key.GetOrGenerate(cmd)
var prm internalclient.DeleteObjectPrm
ReadOrOpenSession(cmd, &prm, pk, cnr, &obj)
Prepare(cmd, &prm)
prm.SetAddress(objAddr)
res, err := internalclient.DeleteObject(prm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
tomb := res.Tombstone()
cmd.Println("Object removed successfully.")
cmd.Printf(" ID: %s\n CID: %s\n", tomb, cnr)
}

View file

@ -1,142 +0,0 @@
package object
import (
"bytes"
"fmt"
"io"
"os"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/cheggaaa/pb"
"github.com/spf13/cobra"
)
var objectGetCmd = &cobra.Command{
Use: "get",
Short: "Get object from FrostFS",
Long: "Get object from FrostFS",
Run: getObject,
}
func initObjectGetCmd() {
commonflags.Init(objectGetCmd)
initFlagSession(objectGetCmd, "GET")
flags := objectGetCmd.Flags()
flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
_ = objectGetCmd.MarkFlagRequired(commonflags.CIDFlag)
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectGetCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.String(fileFlag, "", "File to write object payload to(with -b together with signature and header). Default: stdout.")
flags.Bool(rawFlag, false, rawFlagDesc)
flags.Bool(noProgressFlag, false, "Do not show progress bar")
flags.Bool(binaryFlag, false, "Serialize whole object structure into given file(id + signature + header + payload).")
}
// nolint: funlen
func getObject(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
objAddr := readObjectAddress(cmd, &cnr, &obj)
var out io.Writer
filename := cmd.Flag(fileFlag).Value.String()
if filename == "" {
out = os.Stdout
} else {
f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
if err != nil {
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("can't open file '%s': %w", filename, err))
}
defer f.Close()
out = f
}
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
var prm internalclient.GetObjectPrm
prm.SetClient(cli)
Prepare(cmd, &prm)
readSession(cmd, &prm, pk, cnr, obj)
raw, _ := cmd.Flags().GetBool(rawFlag)
prm.SetRawFlag(raw)
prm.SetAddress(objAddr)
var p *pb.ProgressBar
noProgress, _ := cmd.Flags().GetBool(noProgressFlag)
var payloadWriter io.Writer
var payloadBuffer *bytes.Buffer
binary, _ := cmd.Flags().GetBool(binaryFlag)
if binary {
payloadBuffer = new(bytes.Buffer)
payloadWriter = payloadBuffer
} else {
payloadWriter = out
}
if filename == "" || noProgress {
prm.SetPayloadWriter(payloadWriter)
} else {
p = pb.New64(0)
p.Output = cmd.OutOrStdout()
prm.SetPayloadWriter(p.NewProxyWriter(payloadWriter))
prm.SetHeaderCallback(func(o *object.Object) {
p.SetTotal64(int64(o.PayloadSize()))
p.Start()
})
}
res, err := internalclient.GetObject(prm)
if p != nil {
p.Finish()
}
if err != nil {
if ok := printSplitInfoErr(cmd, err); ok {
return
}
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
}
if binary {
objToStore := res.Header()
// TODO(@acid-ant): #1932 Use streams to marshal/unmarshal payload
objToStore.SetPayload(payloadBuffer.Bytes())
objBytes, err := objToStore.Marshal()
commonCmd.ExitOnErr(cmd, "", err)
_, err = out.Write(objBytes)
commonCmd.ExitOnErr(cmd, "unable to write binary object in out: %w ", err)
}
if filename != "" && !strictOutput(cmd) {
cmd.Printf("[%s] Object successfully saved\n", filename)
}
// Print header only if file is not streamed to stdout.
if filename != "" {
err = printHeader(cmd, res.Header())
commonCmd.ExitOnErr(cmd, "", err)
}
}
func strictOutput(cmd *cobra.Command) bool {
toJSON, _ := cmd.Flags().GetBool(commonflags.JSON)
toProto, _ := cmd.Flags().GetBool("proto")
return toJSON || toProto
}

View file

@ -1,112 +0,0 @@
package object
import (
"context"
"errors"
"fmt"
"strconv"
"time"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/spf13/cobra"
)
// object lock command.
var objectLockCmd = &cobra.Command{
Use: "lock",
Short: "Lock object in container",
Long: "Lock object in container",
Run: func(cmd *cobra.Command, _ []string) {
cidRaw, _ := cmd.Flags().GetString(commonflags.CIDFlag)
var cnr cid.ID
err := cnr.DecodeString(cidRaw)
commonCmd.ExitOnErr(cmd, "Incorrect container arg: %v", err)
oidsRaw, _ := cmd.Flags().GetStringSlice(commonflags.OIDFlag)
lockList := make([]oid.ID, len(oidsRaw))
for i := range oidsRaw {
err = lockList[i].DecodeString(oidsRaw[i])
commonCmd.ExitOnErr(cmd, fmt.Sprintf("Incorrect object arg #%d: %%v", i+1), err)
}
key := key.GetOrGenerate(cmd)
var idOwner user.ID
user.IDFromKey(&idOwner, key.PublicKey)
var lock objectSDK.Lock
lock.WriteMembers(lockList)
exp, _ := cmd.Flags().GetUint64(commonflags.ExpireAt)
lifetime, _ := cmd.Flags().GetUint64(commonflags.Lifetime)
if exp == 0 && lifetime == 0 { // mutual exclusion is ensured by cobra
commonCmd.ExitOnErr(cmd, "", errors.New("either expiration epoch of a lifetime is required"))
}
if lifetime != 0 {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
endpoint, _ := cmd.Flags().GetString(commonflags.RPC)
currEpoch, err := internalclient.GetCurrentEpoch(ctx, cmd, endpoint)
commonCmd.ExitOnErr(cmd, "Request current epoch: %w", err)
exp = currEpoch + lifetime
}
common.PrintVerbose(cmd, "Lock object will expire after %d epoch", exp)
var expirationAttr objectSDK.Attribute
expirationAttr.SetKey(objectV2.SysAttributeExpEpoch)
expirationAttr.SetValue(strconv.FormatUint(exp, 10))
obj := objectSDK.New()
obj.SetContainerID(cnr)
obj.SetOwnerID(&idOwner)
obj.SetType(objectSDK.TypeLock)
obj.SetAttributes(expirationAttr)
obj.SetPayload(lock.Marshal())
var prm internalclient.PutObjectPrm
ReadOrOpenSession(cmd, &prm, key, cnr, nil)
Prepare(cmd, &prm)
prm.SetHeader(obj)
res, err := internalclient.PutObject(prm)
commonCmd.ExitOnErr(cmd, "Store lock object in FrostFS: %w", err)
cmd.Printf("Lock object ID: %s\n", res.ID())
cmd.Println("Objects successfully locked.")
},
}
func initCommandObjectLock() {
commonflags.Init(objectLockCmd)
initFlagSession(objectLockCmd, "PUT")
ff := objectLockCmd.Flags()
ff.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
_ = objectLockCmd.MarkFlagRequired(commonflags.CIDFlag)
ff.StringSlice(commonflags.OIDFlag, nil, commonflags.OIDFlagUsage)
_ = objectLockCmd.MarkFlagRequired(commonflags.OIDFlag)
ff.Uint64P(commonflags.ExpireAt, "e", 0, "The last active epoch for the lock")
ff.Uint64(commonflags.Lifetime, 0, "Lock lifetime")
objectLockCmd.MarkFlagsMutuallyExclusive(commonflags.ExpireAt, commonflags.Lifetime)
}

View file

@ -1,481 +0,0 @@
package object
import (
"crypto/ecdsa"
"errors"
"fmt"
"os"
"strings"
internal "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
sessionCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/session"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
const (
bearerTokenFlag = "bearer"
rawFlag = "raw"
rawFlagDesc = "Set raw request option"
fileFlag = "file"
binaryFlag = "binary"
)
type RPCParameters interface {
SetBearerToken(prm *bearer.Token)
SetTTL(uint32)
SetXHeaders([]string)
}
// InitBearer adds bearer token flag to a command.
func InitBearer(cmd *cobra.Command) {
flags := cmd.Flags()
flags.String(bearerTokenFlag, "", "File with signed JSON or binary encoded bearer token")
}
// Prepare prepares object-related parameters for a command.
func Prepare(cmd *cobra.Command, prms ...RPCParameters) {
ttl := viper.GetUint32(commonflags.TTL)
common.PrintVerbose(cmd, "TTL: %d", ttl)
for i := range prms {
btok := common.ReadBearerToken(cmd, bearerTokenFlag)
prms[i].SetBearerToken(btok)
prms[i].SetTTL(ttl)
prms[i].SetXHeaders(parseXHeaders(cmd))
}
}
func parseXHeaders(cmd *cobra.Command) []string {
xHeaders, _ := cmd.Flags().GetStringSlice(commonflags.XHeadersKey)
xs := make([]string, 0, 2*len(xHeaders))
for i := range xHeaders {
k, v, found := strings.Cut(xHeaders[i], "=")
if !found {
panic(fmt.Errorf("invalid X-Header format: %s", xHeaders[i]))
}
xs = append(xs, k, v)
}
return xs
}
func readObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address {
readCID(cmd, cnr)
readOID(cmd, obj)
var addr oid.Address
addr.SetContainer(*cnr)
addr.SetObject(*obj)
return addr
}
func readObjectAddressBin(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID, filename string) oid.Address {
buf, err := os.ReadFile(filename)
commonCmd.ExitOnErr(cmd, "unable to read given file: %w", err)
objTemp := object.New()
commonCmd.ExitOnErr(cmd, "can't unmarshal object from given file: %w", objTemp.Unmarshal(buf))
var addr oid.Address
*cnr, _ = objTemp.ContainerID()
*obj, _ = objTemp.ID()
addr.SetContainer(*cnr)
addr.SetObject(*obj)
return addr
}
func readCID(cmd *cobra.Command, id *cid.ID) {
err := id.DecodeString(cmd.Flag(commonflags.CIDFlag).Value.String())
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
}
func readOID(cmd *cobra.Command, id *oid.ID) {
err := id.DecodeString(cmd.Flag(commonflags.OIDFlag).Value.String())
commonCmd.ExitOnErr(cmd, "decode object ID string: %w", err)
}
// SessionPrm is a common interface of object operation's input which supports
// sessions.
type SessionPrm interface {
SetSessionToken(*session.Object)
SetClient(*client.Client)
}
// forwards all parameters to _readVerifiedSession and object as nil.
func readSessionGlobal(cmd *cobra.Command, dst SessionPrm, key *ecdsa.PrivateKey, cnr cid.ID) {
_readVerifiedSession(cmd, dst, key, cnr, nil)
}
// forwards all parameters to _readVerifiedSession.
func readSession(cmd *cobra.Command, dst SessionPrm, key *ecdsa.PrivateKey, cnr cid.ID, obj oid.ID) {
_readVerifiedSession(cmd, dst, key, cnr, &obj)
}
// decodes session.Object from the file by path specified in the
// commonflags.SessionToken flag. Returns nil if flag is not set.
func getSession(cmd *cobra.Command) *session.Object {
common.PrintVerbose(cmd, "Trying to read session from the file...")
path, _ := cmd.Flags().GetString(commonflags.SessionToken)
if path == "" {
common.PrintVerbose(cmd, "File with session token is not provided.")
return nil
}
common.PrintVerbose(cmd, "Reading session from the file [%s]...", path)
var tok session.Object
err := common.ReadBinaryOrJSON(cmd, &tok, path)
commonCmd.ExitOnErr(cmd, "read session: %v", err)
return &tok
}
// decodes object session from JSON file from commonflags.SessionToken command
// flag if it is provided, and writes resulting session into the provided SessionPrm.
// Returns flag presence. Checks:
//
// - if session verb corresponds to given SessionPrm according to its type
// - relation to the given container
// - relation to the given object if non-nil
// - relation to the given private key used within the command
// - session signature
//
// SessionPrm MUST be one of:
//
// *internal.GetObjectPrm
// *internal.HeadObjectPrm
// *internal.SearchObjectsPrm
// *internal.PayloadRangePrm
// *internal.HashPayloadRangesPrm
func _readVerifiedSession(cmd *cobra.Command, dst SessionPrm, key *ecdsa.PrivateKey, cnr cid.ID, obj *oid.ID) {
var cmdVerb session.ObjectVerb
switch dst.(type) {
default:
panic(fmt.Sprintf("unsupported op parameters %T", dst))
case *internal.GetObjectPrm:
cmdVerb = session.VerbObjectGet
case *internal.HeadObjectPrm:
cmdVerb = session.VerbObjectHead
case *internal.SearchObjectsPrm:
cmdVerb = session.VerbObjectSearch
case *internal.PayloadRangePrm:
cmdVerb = session.VerbObjectRange
case *internal.HashPayloadRangesPrm:
cmdVerb = session.VerbObjectRangeHash
}
tok := getSession(cmd)
if tok == nil {
return
}
common.PrintVerbose(cmd, "Checking session correctness...")
switch false {
case tok.AssertContainer(cnr):
commonCmd.ExitOnErr(cmd, "", errors.New("unrelated container in the session"))
case obj == nil || tok.AssertObject(*obj):
commonCmd.ExitOnErr(cmd, "", errors.New("unrelated object in the session"))
case tok.AssertVerb(cmdVerb):
commonCmd.ExitOnErr(cmd, "", errors.New("wrong verb of the session"))
case tok.AssertAuthKey((*frostfsecdsa.PublicKey)(&key.PublicKey)):
commonCmd.ExitOnErr(cmd, "", errors.New("unrelated key in the session"))
case tok.VerifySignature():
commonCmd.ExitOnErr(cmd, "", errors.New("invalid signature of the session data"))
}
common.PrintVerbose(cmd, "Session is correct.")
dst.SetSessionToken(tok)
}
// ReadOrOpenSession opens client connection and calls ReadOrOpenSessionViaClient with it.
func ReadOrOpenSession(cmd *cobra.Command, dst SessionPrm, key *ecdsa.PrivateKey, cnr cid.ID, obj *oid.ID) {
cli := internal.GetSDKClientByFlag(cmd, key, commonflags.RPC)
ReadOrOpenSessionViaClient(cmd, dst, cli, key, cnr, obj)
}
// ReadOrOpenSessionViaClient tries to read session from the file specified in
// commonflags.SessionToken flag, finalizes structures of the decoded token
// and write the result into provided SessionPrm. If file is missing,
// ReadOrOpenSessionViaClient calls OpenSessionViaClient.
func ReadOrOpenSessionViaClient(cmd *cobra.Command, dst SessionPrm, cli *client.Client, key *ecdsa.PrivateKey, cnr cid.ID, obj *oid.ID) {
tok := getSession(cmd)
if tok == nil {
OpenSessionViaClient(cmd, dst, cli, key, cnr, obj)
return
}
var objs []oid.ID
if obj != nil {
objs = []oid.ID{*obj}
if _, ok := dst.(*internal.DeleteObjectPrm); ok {
common.PrintVerbose(cmd, "Collecting relatives of the removal object...")
objs = append(objs, collectObjectRelatives(cmd, cli, cnr, *obj)...)
}
}
finalizeSession(cmd, dst, tok, key, cnr, objs...)
dst.SetClient(cli)
}
// OpenSession opens client connection and calls OpenSessionViaClient with it.
func OpenSession(cmd *cobra.Command, dst SessionPrm, key *ecdsa.PrivateKey, cnr cid.ID, obj *oid.ID) {
cli := internal.GetSDKClientByFlag(cmd, key, commonflags.RPC)
OpenSessionViaClient(cmd, dst, cli, key, cnr, obj)
}
// OpenSessionViaClient opens object session with the remote node, finalizes
// structure of the session token and writes the result into the provided
// SessionPrm. Also writes provided client connection to the SessionPrm.
//
// SessionPrm MUST be one of:
//
// *internal.PutObjectPrm
// *internal.DeleteObjectPrm
//
// If provided SessionPrm is of type internal.DeleteObjectPrm, OpenSessionViaClient
// spreads the session to all object's relatives.
func OpenSessionViaClient(cmd *cobra.Command, dst SessionPrm, cli *client.Client, key *ecdsa.PrivateKey, cnr cid.ID, obj *oid.ID) {
var objs []oid.ID
if obj != nil {
if _, ok := dst.(*internal.DeleteObjectPrm); ok {
common.PrintVerbose(cmd, "Collecting relatives of the removal object...")
rels := collectObjectRelatives(cmd, cli, cnr, *obj)
if len(rels) == 0 {
objs = []oid.ID{*obj}
} else {
objs = append(rels, *obj)
}
}
}
var tok session.Object
const sessionLifetime = 10 // in FrostFS epochs
common.PrintVerbose(cmd, "Opening remote session with the node...")
err := sessionCli.CreateSession(&tok, cli, sessionLifetime)
commonCmd.ExitOnErr(cmd, "open remote session: %w", err)
common.PrintVerbose(cmd, "Session successfully opened.")
finalizeSession(cmd, dst, &tok, key, cnr, objs...)
dst.SetClient(cli)
}
// specifies session verb, binds the session to the given container and limits
// the session by the given objects (if specified). After all data is written,
// signs session using provided private key and writes the session into the
// given SessionPrm.
//
// SessionPrm MUST be one of:
//
// *internal.PutObjectPrm
// *internal.DeleteObjectPrm
func finalizeSession(cmd *cobra.Command, dst SessionPrm, tok *session.Object, key *ecdsa.PrivateKey, cnr cid.ID, objs ...oid.ID) {
common.PrintVerbose(cmd, "Finalizing session token...")
switch dst.(type) {
default:
panic(fmt.Sprintf("unsupported op parameters %T", dst))
case *internal.PutObjectPrm:
common.PrintVerbose(cmd, "Binding session to object PUT...")
tok.ForVerb(session.VerbObjectPut)
case *internal.DeleteObjectPrm:
common.PrintVerbose(cmd, "Binding session to object DELETE...")
tok.ForVerb(session.VerbObjectDelete)
}
common.PrintVerbose(cmd, "Binding session to container %s...", cnr)
tok.BindContainer(cnr)
if len(objs) > 0 {
common.PrintVerbose(cmd, "Limiting session by the objects %v...", objs)
tok.LimitByObjects(objs...)
}
common.PrintVerbose(cmd, "Signing session...")
err := tok.Sign(*key)
commonCmd.ExitOnErr(cmd, "sign session: %w", err)
common.PrintVerbose(cmd, "Session token successfully formed and attached to the request.")
dst.SetSessionToken(tok)
}
// calls commonflags.InitSession with "object <verb>" name.
func initFlagSession(cmd *cobra.Command, verb string) {
commonflags.InitSession(cmd, "object "+verb)
}
// collects and returns all relatives of the given object stored in the specified
// container. Empty result without an error means lack of relationship in the
// container.
//
// The object itself is not included in the result.
//
// nolint: funlen
func collectObjectRelatives(cmd *cobra.Command, cli *client.Client, cnr cid.ID, obj oid.ID) []oid.ID {
common.PrintVerbose(cmd, "Fetching raw object header...")
// request raw header first
var addrObj oid.Address
addrObj.SetContainer(cnr)
addrObj.SetObject(obj)
var prmHead internal.HeadObjectPrm
prmHead.SetClient(cli)
prmHead.SetAddress(addrObj)
prmHead.SetRawFlag(true)
Prepare(cmd, &prmHead)
_, err := internal.HeadObject(prmHead)
var errSplit *object.SplitInfoError
switch {
default:
commonCmd.ExitOnErr(cmd, "failed to get raw object header: %w", err)
case err == nil:
common.PrintVerbose(cmd, "Raw header received - object is singular.")
return nil
case errors.As(err, &errSplit):
common.PrintVerbose(cmd, "Split information received - object is virtual.")
}
splitInfo := errSplit.SplitInfo()
// collect split chain by the descending ease of operations (ease is evaluated heuristically).
// If any approach fails, we don't try the next since we assume that it will fail too.
if idLinking, ok := splitInfo.Link(); ok {
common.PrintVerbose(cmd, "Collecting split members using linking object %s...", idLinking)
addrObj.SetObject(idLinking)
prmHead.SetAddress(addrObj)
prmHead.SetRawFlag(false)
// client is already set
res, err := internal.HeadObject(prmHead)
if err == nil {
children := res.Header().Children()
common.PrintVerbose(cmd, "Received split members from the linking object: %v", children)
// include linking object
return append(children, idLinking)
}
// linking object is not required for
// object collecting
common.PrintVerbose(cmd, "failed to get linking object's header: %w", err)
}
if idSplit := splitInfo.SplitID(); idSplit != nil {
common.PrintVerbose(cmd, "Collecting split members by split ID...")
var query object.SearchFilters
query.AddSplitIDFilter(object.MatchStringEqual, idSplit)
var prm internal.SearchObjectsPrm
prm.SetContainerID(cnr)
prm.SetClient(cli)
prm.SetFilters(query)
res, err := internal.SearchObjects(prm)
commonCmd.ExitOnErr(cmd, "failed to search objects by split ID: %w", err)
members := res.IDList()
common.PrintVerbose(cmd, "Found objects by split ID: %v", res.IDList())
return members
}
idMember, ok := splitInfo.LastPart()
if !ok {
commonCmd.ExitOnErr(cmd, "", errors.New("missing any data in received object split information"))
}
common.PrintVerbose(cmd, "Traverse the object split chain in reverse...", idMember)
var res *internal.HeadObjectRes
chain := []oid.ID{idMember}
chainSet := map[oid.ID]struct{}{idMember: {}}
prmHead.SetRawFlag(false)
// split members are almost definitely singular, but don't get hung up on it
for {
common.PrintVerbose(cmd, "Reading previous element of the split chain member %s...", idMember)
addrObj.SetObject(idMember)
res, err = internal.HeadObject(prmHead)
commonCmd.ExitOnErr(cmd, "failed to read split chain member's header: %w", err)
idMember, ok = res.Header().PreviousID()
if !ok {
common.PrintVerbose(cmd, "Chain ended.")
break
}
if _, ok = chainSet[idMember]; ok {
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("duplicated member in the split chain %s", idMember))
}
chain = append(chain, idMember)
chainSet[idMember] = struct{}{}
}
common.PrintVerbose(cmd, "Looking for a linking object...")
var query object.SearchFilters
query.AddParentIDFilter(object.MatchStringEqual, obj)
var prmSearch internal.SearchObjectsPrm
prmSearch.SetClient(cli)
prmSearch.SetContainerID(cnr)
prmSearch.SetFilters(query)
resSearch, err := internal.SearchObjects(prmSearch)
commonCmd.ExitOnErr(cmd, "failed to find object children: %w", err)
list := resSearch.IDList()
for i := range list {
if _, ok = chainSet[list[i]]; !ok {
common.PrintVerbose(cmd, "Found one more related object %s.", list[i])
chain = append(chain, list[i])
}
}
return chain
}

View file

@ -1,133 +0,0 @@
package cmd
import (
"os"
"path/filepath"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
accountingCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/accounting"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/acl"
bearerCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/bearer"
containerCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/container"
controlCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/control"
netmapCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/netmap"
objectCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object"
sessionCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/session"
sgCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/storagegroup"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/tree"
utilCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/gendoc"
"github.com/mitchellh/go-homedir"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
const (
envPrefix = "FROSTFS_CLI"
)
// Global scope flags.
var (
cfgFile string
cfgDir string
)
// rootCmd represents the base command when called without any subcommands.
var rootCmd = &cobra.Command{
Use: "frostfs-cli",
Short: "Command Line Tool to work with FrostFS",
Long: `FrostFS CLI provides all basic interactions with FrostFS and it's services.
It contains commands for interaction with FrostFS nodes using different versions
of frostfs-api and some useful utilities for compiling ACL rules from JSON
notation, managing container access through protocol gates, querying network map
and much more!`,
Run: entryPoint,
}
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
err := rootCmd.Execute()
commonCmd.ExitOnErr(rootCmd, "", err)
}
func init() {
cobra.OnInitialize(initConfig)
// use stdout as default output for cmd.Print()
rootCmd.SetOut(os.Stdout)
// Here you will define your flags and configuration settings.
// Cobra supports persistent flags, which, if defined here,
// will be global for your application.
rootCmd.PersistentFlags().StringVarP(&cfgFile, "config", "c", "", "Config file (default is $HOME/.config/frostfs-cli/config.yaml)")
rootCmd.PersistentFlags().StringVar(&cfgDir, "config-dir", "", "Config directory")
rootCmd.PersistentFlags().BoolP(commonflags.Verbose, commonflags.VerboseShorthand,
false, commonflags.VerboseUsage)
_ = viper.BindPFlag(commonflags.Verbose, rootCmd.PersistentFlags().Lookup(commonflags.Verbose))
// Cobra also supports local flags, which will only run
// when this action is called directly.
rootCmd.Flags().Bool("version", false, "Application version and FrostFS API compatibility")
rootCmd.AddCommand(acl.Cmd)
rootCmd.AddCommand(bearerCli.Cmd)
rootCmd.AddCommand(sessionCli.Cmd)
rootCmd.AddCommand(accountingCli.Cmd)
rootCmd.AddCommand(controlCli.Cmd)
rootCmd.AddCommand(utilCli.Cmd)
rootCmd.AddCommand(netmapCli.Cmd)
rootCmd.AddCommand(objectCli.Cmd)
rootCmd.AddCommand(sgCli.Cmd)
rootCmd.AddCommand(containerCli.Cmd)
rootCmd.AddCommand(tree.Cmd)
rootCmd.AddCommand(gendoc.Command(rootCmd))
}
func entryPoint(cmd *cobra.Command, _ []string) {
printVersion, _ := cmd.Flags().GetBool("version")
if printVersion {
cmd.Print(misc.BuildInfo("FrostFS CLI"))
return
}
_ = cmd.Usage()
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
if cfgFile != "" {
// Use config file from the flag.
viper.SetConfigFile(cfgFile)
} else {
// Find home directory.
home, err := homedir.Dir()
commonCmd.ExitOnErr(rootCmd, "", err)
// Search config in `$HOME/.config/frostfs-cli/` with name "config.yaml"
viper.AddConfigPath(filepath.Join(home, ".config", "frostfs-cli"))
viper.SetConfigName("config")
viper.SetConfigType("yaml")
}
viper.SetEnvPrefix(envPrefix)
viper.AutomaticEnv() // read in environment variables that match
// If a config file is found, read it in.
if err := viper.ReadInConfig(); err == nil {
common.PrintVerbose(rootCmd, "Using config file: %s", viper.ConfigFileUsed())
}
if cfgDir != "" {
if err := config.ReadConfigDir(viper.GetViper(), cfgDir); err != nil {
commonCmd.ExitOnErr(rootCmd, "failed to read config dir: %w", err)
}
}
}

View file

@ -1,53 +0,0 @@
package storagegroup
import (
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
objectCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
)
var sgDelCmd = &cobra.Command{
Use: "delete",
Short: "Delete storage group from FrostFS",
Long: "Delete storage group from FrostFS",
Run: delSG,
}
func initSGDeleteCmd() {
commonflags.Init(sgDelCmd)
flags := sgDelCmd.Flags()
flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
_ = sgDelCmd.MarkFlagRequired(commonflags.CIDFlag)
flags.StringVarP(&sgID, sgIDFlag, "", "", "Storage group identifier")
_ = sgDelCmd.MarkFlagRequired(sgIDFlag)
}
func delSG(cmd *cobra.Command, _ []string) {
pk := key.GetOrGenerate(cmd)
var cnr cid.ID
var obj oid.ID
addr := readObjectAddress(cmd, &cnr, &obj)
var prm internalclient.DeleteObjectPrm
objectCli.OpenSession(cmd, &prm, pk, cnr, &obj)
objectCli.Prepare(cmd, &prm)
prm.SetAddress(addr)
res, err := internalclient.DeleteObject(prm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
tombstone := res.Tombstone()
cmd.Println("Storage group removed successfully.")
cmd.Printf(" Tombstone: %s\n", tombstone)
}

View file

@ -1,82 +0,0 @@
package storagegroup
import (
"bytes"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
objectCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
storagegroupSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/storagegroup"
"github.com/spf13/cobra"
)
var sgID string
var sgGetCmd = &cobra.Command{
Use: "get",
Short: "Get storage group from FrostFS",
Long: "Get storage group from FrostFS",
Run: getSG,
}
func initSGGetCmd() {
commonflags.Init(sgGetCmd)
flags := sgGetCmd.Flags()
flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
_ = sgGetCmd.MarkFlagRequired(commonflags.CIDFlag)
flags.StringVarP(&sgID, sgIDFlag, "", "", "Storage group identifier")
_ = sgGetCmd.MarkFlagRequired(sgIDFlag)
flags.Bool(sgRawFlag, false, "Set raw request option")
}
func getSG(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
addr := readObjectAddress(cmd, &cnr, &obj)
pk := key.GetOrGenerate(cmd)
buf := bytes.NewBuffer(nil)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
var prm internalclient.GetObjectPrm
objectCli.Prepare(cmd, &prm)
prm.SetClient(cli)
raw, _ := cmd.Flags().GetBool(sgRawFlag)
prm.SetRawFlag(raw)
prm.SetAddress(addr)
prm.SetPayloadWriter(buf)
res, err := internalclient.GetObject(prm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
rawObj := res.Header()
rawObj.SetPayload(buf.Bytes())
var sg storagegroupSDK.StorageGroup
err = storagegroupSDK.ReadFromObject(&sg, *rawObj)
commonCmd.ExitOnErr(cmd, "could not read storage group from the obj: %w", err)
cmd.Printf("The last active epoch: %d\n", sg.ExpirationEpoch())
cmd.Printf("Group size: %d\n", sg.ValidationDataSize())
common.PrintChecksum(cmd, "Group hash", sg.ValidationDataHash)
if members := sg.Members(); len(members) > 0 {
cmd.Println("Members:")
for i := range members {
cmd.Printf("\t%s\n", members[i].String())
}
}
}

View file

@ -1,52 +0,0 @@
package storagegroup
import (
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
objectCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/storagegroup"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/spf13/cobra"
)
var sgListCmd = &cobra.Command{
Use: "list",
Short: "List storage groups in FrostFS container",
Long: "List storage groups in FrostFS container",
Run: listSG,
}
func initSGListCmd() {
commonflags.Init(sgListCmd)
sgListCmd.Flags().String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
_ = sgListCmd.MarkFlagRequired(commonflags.CIDFlag)
}
func listSG(cmd *cobra.Command, _ []string) {
var cnr cid.ID
readCID(cmd, &cnr)
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
var prm internalclient.SearchObjectsPrm
objectCli.Prepare(cmd, &prm)
prm.SetClient(cli)
prm.SetContainerID(cnr)
prm.SetFilters(storagegroup.SearchQuery())
res, err := internalclient.SearchObjects(prm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
ids := res.IDList()
cmd.Printf("Found %d storage groups.\n", len(ids))
for i := range ids {
cmd.Println(ids[i].String())
}
}

View file

@ -1,45 +0,0 @@
package storagegroup
import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
)
func readObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address {
readCID(cmd, cnr)
readSGID(cmd, obj)
var addr oid.Address
addr.SetContainer(*cnr)
addr.SetObject(*obj)
return addr
}
func readCID(cmd *cobra.Command, id *cid.ID) {
f := cmd.Flag(commonflags.CIDFlag)
if f == nil {
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("missing container flag (%s)", commonflags.CIDFlag))
return
}
err := id.DecodeString(f.Value.String())
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
}
func readSGID(cmd *cobra.Command, id *oid.ID) {
const flag = "id"
f := cmd.Flag(flag)
if f == nil {
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("missing storage group flag (%s)", flag))
return
}
err := id.DecodeString(f.Value.String())
commonCmd.ExitOnErr(cmd, "decode storage group ID string: %w", err)
}

View file

@ -1,95 +0,0 @@
package tree
import (
"crypto/sha256"
"fmt"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/spf13/cobra"
)
var addCmd = &cobra.Command{
Use: "add",
Short: "Add a node to the tree service",
Run: add,
PersistentPreRun: func(cmd *cobra.Command, _ []string) {
commonflags.Bind(cmd)
},
}
func initAddCmd() {
commonflags.Init(addCmd)
initCTID(addCmd)
ff := addCmd.Flags()
ff.StringSlice(metaFlagKey, nil, "Meta pairs in the form of Key1=[0x]Value1,Key2=[0x]Value2")
ff.Uint64(parentIDFlagKey, 0, "Parent node ID")
_ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func add(cmd *cobra.Command, _ []string) {
pk := key.GetOrGenerate(cmd)
var cnr cid.ID
err := cnr.DecodeString(cmd.Flag(commonflags.CIDFlag).Value.String())
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
pid, _ := cmd.Flags().GetUint64(parentIDFlagKey)
meta, err := parseMeta(cmd)
commonCmd.ExitOnErr(cmd, "meta data parsing: %w", err)
ctx := cmd.Context()
cli, err := _client(ctx)
commonCmd.ExitOnErr(cmd, "client: %w", err)
rawCID := make([]byte, sha256.Size)
cnr.Encode(rawCID)
req := new(tree.AddRequest)
req.Body = &tree.AddRequest_Body{
ContainerId: rawCID,
TreeId: tid,
ParentId: pid,
Meta: meta,
BearerToken: nil, // TODO: #1891 add token handling
}
commonCmd.ExitOnErr(cmd, "message signing: %w", tree.SignMessage(req, pk))
resp, err := cli.Add(ctx, req)
commonCmd.ExitOnErr(cmd, "rpc call: %w", err)
cmd.Println("Node ID: ", resp.Body.NodeId)
}
func parseMeta(cmd *cobra.Command) ([]*tree.KeyValue, error) {
raws, _ := cmd.Flags().GetStringSlice(metaFlagKey)
if len(raws) == 0 {
return nil, nil
}
pairs := make([]*tree.KeyValue, 0, len(raws))
for i := range raws {
k, v, found := strings.Cut(raws[i], "=")
if !found {
return nil, fmt.Errorf("invalid meta pair format: %s", raws[i])
}
var pair tree.KeyValue
pair.Key = k
pair.Value = []byte(v)
pairs = append(pairs, &pair)
}
return pairs, nil
}

View file

@ -1,95 +0,0 @@
package tree
import (
"crypto/sha256"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/spf13/cobra"
)
var addByPathCmd = &cobra.Command{
Use: "add-by-path",
Short: "Add a node by the path",
Run: addByPath,
PersistentPreRun: func(cmd *cobra.Command, _ []string) {
commonflags.Bind(cmd)
},
}
func initAddByPathCmd() {
commonflags.Init(addByPathCmd)
initCTID(addByPathCmd)
ff := addByPathCmd.Flags()
// tree service does not allow any attribute except
// the 'FileName' but that's a limitation of the
// current implementation, not the rule
// ff.String(pathAttributeFlagKey, "", "Path attribute")
ff.String(pathFlagKey, "", "Path to a node")
ff.StringSlice(metaFlagKey, nil, "Meta pairs in the form of Key1=[0x]Value1,Key2=[0x]Value2")
_ = cobra.MarkFlagRequired(ff, commonflags.RPC)
_ = cobra.MarkFlagRequired(ff, pathFlagKey)
}
func addByPath(cmd *cobra.Command, _ []string) {
pk := key.GetOrGenerate(cmd)
cidRaw, _ := cmd.Flags().GetString(commonflags.CIDFlag)
var cnr cid.ID
err := cnr.DecodeString(cidRaw)
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
ctx := cmd.Context()
cli, err := _client(ctx)
commonCmd.ExitOnErr(cmd, "client: %w", err)
rawCID := make([]byte, sha256.Size)
cnr.Encode(rawCID)
meta, err := parseMeta(cmd)
commonCmd.ExitOnErr(cmd, "meta data parsing: %w", err)
path, _ := cmd.Flags().GetString(pathFlagKey)
// pAttr, _ := cmd.Flags().GetString(pathAttributeFlagKey)
req := new(tree.AddByPathRequest)
req.Body = &tree.AddByPathRequest_Body{
ContainerId: rawCID,
TreeId: tid,
PathAttribute: object.AttributeFileName,
// PathAttribute: pAttr,
Path: strings.Split(path, "/"),
Meta: meta,
BearerToken: nil, // TODO: #1891 add token handling
}
commonCmd.ExitOnErr(cmd, "message signing: %w", tree.SignMessage(req, pk))
resp, err := cli.AddByPath(ctx, req)
commonCmd.ExitOnErr(cmd, "rpc call: %w", err)
cmd.Printf("Parent ID: %d\n", resp.GetBody().GetParentId())
nn := resp.GetBody().GetNodes()
if len(nn) == 0 {
common.PrintVerbose(cmd, "No new nodes were created")
return
}
cmd.Println("Created nodes:")
for _, node := range resp.GetBody().GetNodes() {
cmd.Printf("\t%d\n", node)
}
}

View file

@ -1,40 +0,0 @@
package tree
import (
"context"
"strings"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
"github.com/spf13/viper"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
)
// _client returns grpc Tree service client. Should be removed
// after making Tree API public.
func _client(ctx context.Context) (tree.TreeServiceClient, error) {
var netAddr network.Address
err := netAddr.FromString(viper.GetString(commonflags.RPC))
if err != nil {
return nil, err
}
opts := make([]grpc.DialOption, 1, 2)
opts[0] = grpc.WithBlock()
if !strings.HasPrefix(netAddr.URIAddr(), "grpcs:") {
opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
}
// a default connection establishing timeout
const defaultClientConnectTimeout = time.Second * 2
ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout)
cc, err := grpc.DialContext(ctx, netAddr.URIAddr(), opts...)
cancel()
return tree.NewTreeServiceClient(cc), err
}

View file

@ -1,99 +0,0 @@
package tree
import (
"crypto/sha256"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/spf13/cobra"
)
var getByPathCmd = &cobra.Command{
Use: "get-by-path",
Short: "Get a node by its path",
Run: getByPath,
PersistentPreRun: func(cmd *cobra.Command, _ []string) {
commonflags.Bind(cmd)
},
}
func initGetByPathCmd() {
commonflags.Init(getByPathCmd)
initCTID(getByPathCmd)
ff := getByPathCmd.Flags()
// tree service does not allow any attribute except
// the 'FileName' but that's a limitation of the
// current implementation, not the rule
// ff.String(pathAttributeFlagKey, "", "Path attribute")
ff.String(pathFlagKey, "", "Path to a node")
ff.Bool(latestOnlyFlagKey, false, "Look only for the latest version of a node")
_ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func getByPath(cmd *cobra.Command, _ []string) {
pk := key.GetOrGenerate(cmd)
cidRaw, _ := cmd.Flags().GetString(commonflags.CIDFlag)
var cnr cid.ID
err := cnr.DecodeString(cidRaw)
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
ctx := cmd.Context()
cli, err := _client(ctx)
commonCmd.ExitOnErr(cmd, "client: %w", err)
rawCID := make([]byte, sha256.Size)
cnr.Encode(rawCID)
latestOnly, _ := cmd.Flags().GetBool(latestOnlyFlagKey)
path, _ := cmd.Flags().GetString(pathFlagKey)
// pAttr, _ := cmd.Flags().GetString(pathAttributeFlagKey)
req := new(tree.GetNodeByPathRequest)
req.Body = &tree.GetNodeByPathRequest_Body{
ContainerId: rawCID,
TreeId: tid,
PathAttribute: object.AttributeFileName,
// PathAttribute: pAttr,
Path: strings.Split(path, "/"),
LatestOnly: latestOnly,
AllAttributes: true,
BearerToken: nil, // TODO: #1891 add token handling
}
commonCmd.ExitOnErr(cmd, "message signing: %w", tree.SignMessage(req, pk))
resp, err := cli.GetNodeByPath(ctx, req)
commonCmd.ExitOnErr(cmd, "rpc call: %w", err)
nn := resp.GetBody().GetNodes()
if len(nn) == 0 {
common.PrintVerbose(cmd, "The node is not found")
return
}
for _, n := range nn {
cmd.Printf("%d:\n", n.GetNodeId())
cmd.Println("\tParent ID: ", n.GetParentId())
cmd.Println("\tTimestamp: ", n.GetTimestamp())
cmd.Println("\tMeta pairs: ")
for _, kv := range n.GetMeta() {
cmd.Printf("\t\t%s: %s\n", kv.GetKey(), string(kv.GetValue()))
}
}
}

View file

@ -1,63 +0,0 @@
package tree
import (
"crypto/sha256"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/spf13/cobra"
)
var listCmd = &cobra.Command{
Use: "list",
Short: "Get tree IDs",
Run: list,
PersistentPreRun: func(cmd *cobra.Command, _ []string) {
commonflags.Bind(cmd)
},
}
func initListCmd() {
commonflags.Init(listCmd)
ff := listCmd.Flags()
ff.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
_ = listCmd.MarkFlagRequired(commonflags.CIDFlag)
_ = cobra.MarkFlagRequired(ff, commonflags.RPC)
}
func list(cmd *cobra.Command, _ []string) {
pk := key.GetOrGenerate(cmd)
cidString, _ := cmd.Flags().GetString(commonflags.CIDFlag)
var cnr cid.ID
err := cnr.DecodeString(cidString)
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
ctx := cmd.Context()
cli, err := _client(ctx)
commonCmd.ExitOnErr(cmd, "client: %w", err)
rawCID := make([]byte, sha256.Size)
cnr.Encode(rawCID)
req := &tree.TreeListRequest{
Body: &tree.TreeListRequest_Body{
ContainerId: rawCID,
},
}
commonCmd.ExitOnErr(cmd, "message signing: %w", tree.SignMessage(req, pk))
resp, err := cli.TreeList(ctx, req)
commonCmd.ExitOnErr(cmd, "rpc call: %w", err)
for _, treeID := range resp.GetBody().GetIds() {
cmd.Println(treeID)
}
}

View file

@ -1,45 +0,0 @@
package tree
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"github.com/spf13/cobra"
)
var Cmd = &cobra.Command{
Use: "tree",
Short: "Operations with the Tree service",
}
func init() {
Cmd.AddCommand(addCmd)
Cmd.AddCommand(getByPathCmd)
Cmd.AddCommand(addByPathCmd)
Cmd.AddCommand(listCmd)
initAddCmd()
initGetByPathCmd()
initAddByPathCmd()
initListCmd()
}
const (
treeIDFlagKey = "tid"
parentIDFlagKey = "pid"
metaFlagKey = "meta"
pathFlagKey = "path"
pathAttributeFlagKey = "pattr"
latestOnlyFlagKey = "latest"
)
func initCTID(cmd *cobra.Command) {
ff := cmd.Flags()
ff.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
_ = cmd.MarkFlagRequired(commonflags.CIDFlag)
ff.String(treeIDFlagKey, "", "Tree ID")
_ = cmd.MarkFlagRequired(treeIDFlagKey)
}

View file

@ -1,327 +0,0 @@
package util
import (
"bytes"
"crypto/ecdsa"
"encoding/hex"
"errors"
"fmt"
"strings"
"text/tabwriter"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"github.com/flynn-archive/go-shlex"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/olekukonko/tablewriter"
"github.com/spf13/cobra"
)
// PrettyPrintTableBACL print basic ACL in table format.
func PrettyPrintTableBACL(cmd *cobra.Command, bacl *acl.Basic) {
// Header
w := tabwriter.NewWriter(cmd.OutOrStdout(), 1, 4, 4, ' ', 0)
fmt.Fprintln(w, "\tRangeHASH\tRange\tSearch\tDelete\tPut\tHead\tGet")
// Bits
bits := []string{
boolToString(bacl.Sticky()) + " " + boolToString(!bacl.Extendable()),
getRoleBitsForOperation(bacl, acl.OpObjectHash), getRoleBitsForOperation(bacl, acl.OpObjectRange),
getRoleBitsForOperation(bacl, acl.OpObjectSearch), getRoleBitsForOperation(bacl, acl.OpObjectDelete),
getRoleBitsForOperation(bacl, acl.OpObjectPut), getRoleBitsForOperation(bacl, acl.OpObjectHead),
getRoleBitsForOperation(bacl, acl.OpObjectGet),
}
fmt.Fprintln(w, strings.Join(bits, "\t"))
// Footer
footer := []string{"X F"}
for i := 0; i < 7; i++ {
footer = append(footer, "U S O B")
}
fmt.Fprintln(w, strings.Join(footer, "\t"))
w.Flush()
cmd.Println(" X-Sticky F-Final U-User S-System O-Others B-Bearer")
}
func getRoleBitsForOperation(bacl *acl.Basic, op acl.Op) string {
return boolToString(bacl.IsOpAllowed(op, acl.RoleOwner)) + " " +
boolToString(bacl.IsOpAllowed(op, acl.RoleContainer)) + " " +
boolToString(bacl.IsOpAllowed(op, acl.RoleOthers)) + " " +
boolToString(bacl.AllowedBearerRules(op))
}
func boolToString(b bool) string {
if b {
return "1"
}
return "0"
}
// PrettyPrintTableEACL print extended ACL in table format.
func PrettyPrintTableEACL(cmd *cobra.Command, table *eacl.Table) {
out := tablewriter.NewWriter(cmd.OutOrStdout())
out.SetHeader([]string{"Operation", "Action", "Filters", "Targets"})
out.SetAlignment(tablewriter.ALIGN_CENTER)
out.SetRowLine(true)
out.SetAutoWrapText(false)
for _, r := range table.Records() {
out.Append([]string{
r.Operation().String(),
r.Action().String(),
eaclFiltersToString(r.Filters()),
eaclTargetsToString(r.Targets()),
})
}
out.Render()
}
func eaclTargetsToString(ts []eacl.Target) string {
b := bytes.NewBuffer(nil)
for _, t := range ts {
keysExists := len(t.BinaryKeys()) > 0
switch t.Role() {
case eacl.RoleUser:
b.WriteString("User")
if keysExists {
b.WriteString(": ")
}
case eacl.RoleSystem:
b.WriteString("System")
if keysExists {
b.WriteString(": ")
}
case eacl.RoleOthers:
b.WriteString("Others")
if keysExists {
b.WriteString(": ")
}
default:
b.WriteString("Unknown")
if keysExists {
b.WriteString(": ")
}
}
for i, pub := range t.BinaryKeys() {
if i != 0 {
b.WriteString(" ")
}
b.WriteString(hex.EncodeToString(pub))
b.WriteString("\n")
}
}
return b.String()
}
func eaclFiltersToString(fs []eacl.Filter) string {
b := bytes.NewBuffer(nil)
tw := tabwriter.NewWriter(b, 0, 0, 1, ' ', 0)
for _, f := range fs {
switch f.From() {
case eacl.HeaderFromObject:
_, _ = tw.Write([]byte("O:\t"))
case eacl.HeaderFromRequest:
_, _ = tw.Write([]byte("R:\t"))
case eacl.HeaderFromService:
_, _ = tw.Write([]byte("S:\t"))
default:
_, _ = tw.Write([]byte(" \t"))
}
_, _ = tw.Write([]byte(f.Key()))
switch f.Matcher() {
case eacl.MatchStringEqual:
_, _ = tw.Write([]byte("\t==\t"))
case eacl.MatchStringNotEqual:
_, _ = tw.Write([]byte("\t!=\t"))
case eacl.MatchUnknown:
}
_, _ = tw.Write([]byte(f.Value() + "\t"))
_, _ = tw.Write([]byte("\n"))
}
_ = tw.Flush()
// To have nice output with tabwriter, we must append newline
// after the last line. Here we strip it to delete empty line
// in the final output.
s := b.String()
if len(s) > 0 {
s = s[:len(s)-1]
}
return s
}
// ParseEACLRules parses eACL table.
// Uses ParseEACLRule.
//
//nolint:godot
func ParseEACLRules(table *eacl.Table, rules []string) error {
if len(rules) == 0 {
return errors.New("no extended ACL rules has been provided")
}
for _, ruleStr := range rules {
err := ParseEACLRule(table, ruleStr)
if err != nil {
return fmt.Errorf("can't create extended acl record from rule '%s': %v", ruleStr, err)
}
}
return nil
}
// ParseEACLRule parses eACL table from the following form:
// <action> <operation> [<filter1> ...] [<target1> ...]
//
// Examples:
// allow get req:X-Header=123 obj:Attr=value others:0xkey1,key2 system:key3 user:key4
//
//nolint:godot
func ParseEACLRule(table *eacl.Table, rule string) error {
r, err := shlex.Split(rule)
if err != nil {
return fmt.Errorf("can't parse rule '%s': %v", rule, err)
}
return parseEACLTable(table, r)
}
func parseEACLTable(tb *eacl.Table, args []string) error {
if len(args) < 2 {
return errors.New("at least 2 arguments must be provided")
}
var action eacl.Action
if !action.FromString(strings.ToUpper(args[0])) {
return errors.New("invalid action (expected 'allow' or 'deny')")
}
ops, err := eaclOperationsFromString(args[1])
if err != nil {
return err
}
r, err := parseEACLRecord(args[2:])
if err != nil {
return err
}
r.SetAction(action)
for _, op := range ops {
r := *r
r.SetOperation(op)
tb.AddRecord(&r)
}
return nil
}
func parseEACLRecord(args []string) (*eacl.Record, error) {
r := new(eacl.Record)
for _, arg := range args {
before, after, found := strings.Cut(arg, ":")
switch prefix := strings.ToLower(before); prefix {
case "req", "obj": // filters
if !found {
return nil, fmt.Errorf("invalid filter or target: %s", arg)
}
var key, value string
var op eacl.Match
var f bool
key, value, f = strings.Cut(after, "!=")
if f {
op = eacl.MatchStringNotEqual
} else {
key, value, f = strings.Cut(after, "=")
if !f {
return nil, fmt.Errorf("invalid filter key-value pair: %s", after)
}
op = eacl.MatchStringEqual
}
typ := eacl.HeaderFromRequest
if before == "obj" {
typ = eacl.HeaderFromObject
}
r.AddFilter(typ, op, key, value)
case "others", "system", "user", "pubkey": // targets
var err error
var pubs []ecdsa.PublicKey
if found {
pubs, err = parseKeyList(after)
if err != nil {
return nil, err
}
}
var role eacl.Role
if prefix != "pubkey" {
role, err = eaclRoleFromString(prefix)
if err != nil {
return nil, err
}
}
eacl.AddFormedTarget(r, role, pubs...)
default:
return nil, fmt.Errorf("invalid prefix: %s", before)
}
}
return r, nil
}
// eaclRoleFromString parses eacl.Role from string.
func eaclRoleFromString(s string) (eacl.Role, error) {
var r eacl.Role
if !r.FromString(strings.ToUpper(s)) {
return r, fmt.Errorf("unexpected role %s", s)
}
return r, nil
}
// parseKeyList parses list of hex-encoded public keys separated by comma.
func parseKeyList(s string) ([]ecdsa.PublicKey, error) {
ss := strings.Split(s, ",")
pubs := make([]ecdsa.PublicKey, len(ss))
for i := range ss {
st := strings.TrimPrefix(ss[i], "0x")
pub, err := keys.NewPublicKeyFromString(st)
if err != nil {
return nil, fmt.Errorf("invalid public key '%s': %w", ss[i], err)
}
pubs[i] = ecdsa.PublicKey(*pub)
}
return pubs, nil
}
// eaclOperationsFromString parses list of eacl.Operation separated by comma.
func eaclOperationsFromString(s string) ([]eacl.Operation, error) {
ss := strings.Split(s, ",")
ops := make([]eacl.Operation, len(ss))
for i := range ss {
if !ops[i].FromString(strings.ToUpper(ss[i])) {
return nil, fmt.Errorf("invalid operation: %s", ss[i])
}
}
return ops, nil
}

View file

@ -1,48 +0,0 @@
package blobovnicza
import (
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
)
var inspectCMD = &cobra.Command{
Use: "inspect",
Short: "Object inspection",
Long: `Inspect specific object in a blobovnicza.`,
Run: inspectFunc,
}
func init() {
common.AddAddressFlag(inspectCMD, &vAddress)
common.AddComponentPathFlag(inspectCMD, &vPath)
common.AddOutputFileFlag(inspectCMD, &vOut)
}
func inspectFunc(cmd *cobra.Command, _ []string) {
var addr oid.Address
err := addr.DecodeString(vAddress)
common.ExitOnErr(cmd, common.Errf("invalid address argument: %w", err))
blz := openBlobovnicza(cmd)
defer blz.Close()
var prm blobovnicza.GetPrm
prm.SetAddress(addr)
res, err := blz.Get(prm)
common.ExitOnErr(cmd, common.Errf("could not fetch object: %w", err))
data := res.Object()
var o object.Object
common.ExitOnErr(cmd, common.Errf("could not unmarshal object: %w",
o.Unmarshal(data)),
)
common.PrintObjectHeader(cmd, o)
common.WriteObjectToFile(cmd, vOut, data)
}

View file

@ -1,38 +0,0 @@
package blobovnicza
import (
"fmt"
"io"
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
)
var listCMD = &cobra.Command{
Use: "list",
Short: "Object listing",
Long: `List all objects stored in a blobovnicza.`,
Run: listFunc,
}
func init() {
common.AddComponentPathFlag(listCMD, &vPath)
}
func listFunc(cmd *cobra.Command, _ []string) {
// other targets can be supported
w := cmd.OutOrStderr()
wAddr := func(addr oid.Address) error {
_, err := io.WriteString(w, fmt.Sprintf("%s\n", addr))
return err
}
blz := openBlobovnicza(cmd)
defer blz.Close()
err := blobovnicza.IterateAddresses(blz, wAddr)
common.ExitOnErr(cmd, common.Errf("blobovnicza iterator failure: %w", err))
}

View file

@ -1,33 +0,0 @@
package blobovnicza
import (
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"github.com/spf13/cobra"
)
var (
vAddress string
vPath string
vOut string
)
// Root contains `blobovnicza` command definition.
var Root = &cobra.Command{
Use: "blobovnicza",
Short: "Operations with a blobovnicza",
}
func init() {
Root.AddCommand(listCMD, inspectCMD)
}
func openBlobovnicza(cmd *cobra.Command) *blobovnicza.Blobovnicza {
blz := blobovnicza.New(
blobovnicza.WithPath(vPath),
blobovnicza.WithReadOnly(true),
)
common.ExitOnErr(cmd, blz.Open())
return blz
}

View file

@ -1,35 +0,0 @@
package common
import (
"github.com/spf13/cobra"
)
const (
flagAddress = "address"
flagEnginePath = "path"
flagOutFile = "out"
)
// AddAddressFlag adds the address flag to the passed cobra command.
func AddAddressFlag(cmd *cobra.Command, v *string) {
cmd.Flags().StringVar(v, flagAddress, "", "Object address")
_ = cmd.MarkFlagRequired(flagAddress)
}
// AddComponentPathFlag adds the path-to-component flag to the
// passed cobra command.
func AddComponentPathFlag(cmd *cobra.Command, v *string) {
cmd.Flags().StringVar(v, flagEnginePath, "",
"Path to storage engine component",
)
_ = cmd.MarkFlagFilename(flagEnginePath)
_ = cmd.MarkFlagRequired(flagEnginePath)
}
// AddOutputFileFlag adds the output file flag to the passed cobra
// command.
func AddOutputFileFlag(cmd *cobra.Command, v *string) {
cmd.Flags().StringVar(v, flagOutFile, "",
"File to save object payload")
_ = cmd.MarkFlagFilename(flagOutFile)
}

View file

@ -1,74 +0,0 @@
package meta
import (
"errors"
"fmt"
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
)
var inspectCMD = &cobra.Command{
Use: "inspect",
Short: "Object inspection",
Long: `Inspect specific object in a metabase.`,
Run: inspectFunc,
}
func init() {
common.AddAddressFlag(inspectCMD, &vAddress)
common.AddComponentPathFlag(inspectCMD, &vPath)
}
func inspectFunc(cmd *cobra.Command, _ []string) {
var addr oid.Address
err := addr.DecodeString(vAddress)
common.ExitOnErr(cmd, common.Errf("invalid address argument: %w", err))
db := openMeta(cmd)
defer db.Close()
storageID := meta.StorageIDPrm{}
storageID.SetAddress(addr)
resStorageID, err := db.StorageID(storageID)
common.ExitOnErr(cmd, common.Errf("could not check if the obj is small: %w", err))
if id := resStorageID.StorageID(); id != nil {
cmd.Printf("Object storageID: %s\n\n", blobovnicza.NewIDFromBytes(id).String())
} else {
cmd.Printf("Object does not contain storageID\n\n")
}
prm := meta.GetPrm{}
prm.SetAddress(addr)
prm.SetRaw(true)
siErr := new(object.SplitInfoError)
res, err := db.Get(prm)
if errors.As(err, &siErr) {
link, linkSet := siErr.SplitInfo().Link()
last, lastSet := siErr.SplitInfo().LastPart()
fmt.Println("Object is split")
cmd.Println("\tSplitID:", siErr.SplitInfo().SplitID().String())
if linkSet {
cmd.Println("\tLink:", link)
}
if lastSet {
cmd.Println("\tLast:", last)
}
return
}
common.ExitOnErr(cmd, common.Errf("could not get object: %w", err))
common.PrintObjectHeader(cmd, *res.Header())
}

View file

@ -1,33 +0,0 @@
package meta
import (
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"github.com/spf13/cobra"
)
var listGarbageCMD = &cobra.Command{
Use: "list-garbage",
Short: "Garbage listing",
Long: `List all the objects that have received GC Mark.`,
Run: listGarbageFunc,
}
func init() {
common.AddComponentPathFlag(listGarbageCMD, &vPath)
}
func listGarbageFunc(cmd *cobra.Command, _ []string) {
db := openMeta(cmd)
defer db.Close()
var garbPrm meta.GarbageIterationPrm
garbPrm.SetHandler(
func(garbageObject meta.GarbageObject) error {
cmd.Println(garbageObject.Address().EncodeToString())
return nil
})
err := db.IterateOverGarbage(garbPrm)
common.ExitOnErr(cmd, common.Errf("could not iterate over garbage bucket: %w", err))
}

View file

@ -1,38 +0,0 @@
package meta
import (
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"github.com/spf13/cobra"
)
var listGraveyardCMD = &cobra.Command{
Use: "list-graveyard",
Short: "Graveyard listing",
Long: `List all the objects that have been covered with a Tomb Stone.`,
Run: listGraveyardFunc,
}
func init() {
common.AddComponentPathFlag(listGraveyardCMD, &vPath)
}
func listGraveyardFunc(cmd *cobra.Command, _ []string) {
db := openMeta(cmd)
defer db.Close()
var gravePrm meta.GraveyardIterationPrm
gravePrm.SetHandler(
func(tsObj meta.TombstonedObject) error {
cmd.Printf(
"Object: %s\nTS: %s\n",
tsObj.Address().EncodeToString(),
tsObj.Tombstone().EncodeToString(),
)
return nil
})
err := db.IterateOverGraveyard(gravePrm)
common.ExitOnErr(cmd, common.Errf("could not iterate over graveyard bucket: %w", err))
}

View file

@ -1,49 +0,0 @@
package meta
import (
"time"
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"github.com/spf13/cobra"
"go.etcd.io/bbolt"
)
var (
vAddress string
vPath string
)
type epochState struct{}
func (s epochState) CurrentEpoch() uint64 {
return 0
}
// Root contains `meta` command definition.
var Root = &cobra.Command{
Use: "meta",
Short: "Operations with a metabase",
}
func init() {
Root.AddCommand(
inspectCMD,
listGraveyardCMD,
listGarbageCMD,
)
}
func openMeta(cmd *cobra.Command) *meta.DB {
db := meta.New(
meta.WithPath(vPath),
meta.WithBoltDBOptions(&bbolt.Options{
ReadOnly: true,
Timeout: 100 * time.Millisecond,
}),
meta.WithEpochState(epochState{}),
)
common.ExitOnErr(cmd, common.Errf("could not open metabase: %w", db.Open(true)))
return db
}

View file

@ -1,65 +0,0 @@
package common
import (
"os"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
)
// PrintObjectHeader prints passed object's header fields via
// the passed cobra command. Does nothing with the payload.
func PrintObjectHeader(cmd *cobra.Command, h object.Object) {
cmd.Println("Version:", h.Version())
cmd.Println("Type:", h.Type())
printContainerID(cmd, h.ContainerID)
printObjectID(cmd, h.ID)
cmd.Println("Owner:", h.OwnerID())
cmd.Println("CreatedAt:", h.CreationEpoch())
cmd.Println("PayloadSize:", h.PayloadSize())
cmd.Println("Attributes:")
for _, attr := range h.Attributes() {
cmd.Printf(" %s: %s\n", attr.Key(), attr.Value())
}
}
func printContainerID(cmd *cobra.Command, recv func() (cid.ID, bool)) {
var val string
id, ok := recv()
if ok {
val = id.String()
} else {
val = "<empty>"
}
cmd.Println("CID:", val)
}
func printObjectID(cmd *cobra.Command, recv func() (oid.ID, bool)) {
var val string
id, ok := recv()
if ok {
val = id.String()
} else {
val = "<empty>"
}
cmd.Println("ID:", val)
}
// WriteObjectToFile writes object to the provided path. Does nothing if
// the path is empty.
func WriteObjectToFile(cmd *cobra.Command, path string, data []byte) {
if path == "" {
return
}
ExitOnErr(cmd, Errf("could not write file: %w",
os.WriteFile(path, data, 0644)))
cmd.Printf("\nSaved payload to '%s' file\n", path)
}

View file

@ -1,35 +0,0 @@
package writecache
import (
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/spf13/cobra"
)
var inspectCMD = &cobra.Command{
Use: "inspect",
Short: "Object inspection",
Long: `Inspect specific object in a write-cache.`,
Run: inspectFunc,
}
func init() {
common.AddAddressFlag(inspectCMD, &vAddress)
common.AddComponentPathFlag(inspectCMD, &vPath)
common.AddOutputFileFlag(inspectCMD, &vOut)
}
func inspectFunc(cmd *cobra.Command, _ []string) {
db := openWC(cmd)
defer db.Close()
data, err := writecache.Get(db, []byte(vAddress))
common.ExitOnErr(cmd, common.Errf("could not fetch object: %w", err))
var o object.Object
common.ExitOnErr(cmd, common.Errf("could not unmarshal object: %w", o.Unmarshal(data)))
common.PrintObjectHeader(cmd, o)
common.WriteObjectToFile(cmd, vOut, data)
}

View file

@ -1,38 +0,0 @@
package writecache
import (
"fmt"
"io"
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
)
var listCMD = &cobra.Command{
Use: "inspect",
Short: "Object inspection",
Long: `Inspect specific object in a write-cache.`,
Run: listFunc,
}
func init() {
common.AddComponentPathFlag(listCMD, &vPath)
}
func listFunc(cmd *cobra.Command, _ []string) {
// other targets can be supported
w := cmd.OutOrStderr()
wAddr := func(addr oid.Address) error {
_, err := io.WriteString(w, fmt.Sprintf("%s\n", addr))
return err
}
db := openWC(cmd)
defer db.Close()
err := writecache.IterateDB(db, wAddr)
common.ExitOnErr(cmd, common.Errf("write-cache iterator failure: %w", err))
}

View file

@ -1,31 +0,0 @@
package writecache
import (
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
"github.com/spf13/cobra"
"go.etcd.io/bbolt"
)
var (
vAddress string
vPath string
vOut string
)
// Root contains `write-cache` command definition.
var Root = &cobra.Command{
Use: "write-cache",
Short: "Operations with write-cache",
}
func init() {
Root.AddCommand(listCMD, inspectCMD)
}
func openWC(cmd *cobra.Command) *bbolt.DB {
db, err := writecache.OpenDB(vPath, true)
common.ExitOnErr(cmd, common.Errf("could not open write-cache db: %w", err))
return db
}

View file

@ -1,50 +0,0 @@
package main
import (
"os"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/meta"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/writecache"
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/gendoc"
"github.com/spf13/cobra"
)
var command = &cobra.Command{
Use: "frostfs-lens",
Short: "FrostFS Storage Engine Lens",
Long: `FrostFS Storage Engine Lens provides tools to browse the contents of the FrostFS storage engine.`,
RunE: entryPoint,
SilenceUsage: true,
}
func entryPoint(cmd *cobra.Command, _ []string) error {
printVersion, _ := cmd.Flags().GetBool("version")
if printVersion {
cmd.Print(misc.BuildInfo("FrostFS Lens"))
return nil
}
return cmd.Usage()
}
func init() {
// use stdout as default output for cmd.Print()
command.SetOut(os.Stdout)
command.Flags().Bool("version", false, "Application version")
command.AddCommand(
blobovnicza.Root,
meta.Root,
writecache.Root,
gendoc.Command(command),
)
}
func main() {
err := command.Execute()
if err != nil {
os.Exit(1)
}
}

View file

@ -1,399 +0,0 @@
package main
import (
"sync"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
lru "github.com/hashicorp/golang-lru/v2"
)
type netValueReader[K any, V any] func(K) (V, error)
type valueWithTime[V any] struct {
v V
t time.Time
// cached error in order to not repeat failed request for some time
e error
}
// entity that provides TTL cache interface.
type ttlNetCache[K comparable, V any] struct {
ttl time.Duration
sz int
cache *lru.Cache[K, *valueWithTime[V]]
netRdr netValueReader[K, V]
}
// complicates netValueReader with TTL caching mechanism.
func newNetworkTTLCache[K comparable, V any](sz int, ttl time.Duration, netRdr netValueReader[K, V]) *ttlNetCache[K, V] {
cache, err := lru.New[K, *valueWithTime[V]](sz)
fatalOnErr(err)
return &ttlNetCache[K, V]{
ttl: ttl,
sz: sz,
cache: cache,
netRdr: netRdr,
}
}
// reads value by the key.
//
// updates the value from the network on cache miss or by TTL.
//
// returned value should not be modified.
func (c *ttlNetCache[K, V]) get(key K) (V, error) {
val, ok := c.cache.Peek(key)
if ok {
if time.Since(val.t) < c.ttl {
return val.v, val.e
}
c.cache.Remove(key)
}
v, err := c.netRdr(key)
c.set(key, v, err)
return v, err
}
func (c *ttlNetCache[K, V]) set(k K, v V, e error) {
c.cache.Add(k, &valueWithTime[V]{
v: v,
t: time.Now(),
e: e,
})
}
func (c *ttlNetCache[K, V]) remove(key K) {
c.cache.Remove(key)
}
// entity that provides LRU cache interface.
type lruNetCache struct {
cache *lru.Cache[uint64, *netmapSDK.NetMap]
netRdr netValueReader[uint64, *netmapSDK.NetMap]
}
// newNetworkLRUCache returns wrapper over netValueReader with LRU cache.
func newNetworkLRUCache(sz int, netRdr netValueReader[uint64, *netmapSDK.NetMap]) *lruNetCache {
cache, err := lru.New[uint64, *netmapSDK.NetMap](sz)
fatalOnErr(err)
return &lruNetCache{
cache: cache,
netRdr: netRdr,
}
}
// reads value by the key.
//
// updates the value from the network on cache miss.
//
// returned value should not be modified.
func (c *lruNetCache) get(key uint64) (*netmapSDK.NetMap, error) {
val, ok := c.cache.Get(key)
if ok {
return val, nil
}
val, err := c.netRdr(key)
if err != nil {
return nil, err
}
c.cache.Add(key, val)
return val, nil
}
// wrapper over TTL cache of values read from the network
// that implements container storage.
type ttlContainerStorage struct {
*ttlNetCache[cid.ID, *container.Container]
}
func newCachedContainerStorage(v container.Source, ttl time.Duration) ttlContainerStorage {
const containerCacheSize = 100
lruCnrCache := newNetworkTTLCache[cid.ID, *container.Container](containerCacheSize, ttl, func(id cid.ID) (*container.Container, error) {
return v.Get(id)
})
return ttlContainerStorage{lruCnrCache}
}
func (s ttlContainerStorage) handleRemoval(cnr cid.ID) {
s.set(cnr, nil, apistatus.ContainerNotFound{})
}
// Get returns container value from the cache. If value is missing in the cache
// or expired, then it returns value from side chain and updates the cache.
func (s ttlContainerStorage) Get(cnr cid.ID) (*container.Container, error) {
return s.get(cnr)
}
type ttlEACLStorage struct {
*ttlNetCache[cid.ID, *container.EACL]
}
func newCachedEACLStorage(v container.EACLSource, ttl time.Duration) ttlEACLStorage {
const eaclCacheSize = 100
lruCnrCache := newNetworkTTLCache(eaclCacheSize, ttl, func(id cid.ID) (*container.EACL, error) {
return v.GetEACL(id)
})
return ttlEACLStorage{lruCnrCache}
}
// GetEACL returns eACL value from the cache. If value is missing in the cache
// or expired, then it returns value from side chain and updates cache.
func (s ttlEACLStorage) GetEACL(cnr cid.ID) (*container.EACL, error) {
return s.get(cnr)
}
// InvalidateEACL removes cached eACL value.
func (s ttlEACLStorage) InvalidateEACL(cnr cid.ID) {
s.remove(cnr)
}
type lruNetmapSource struct {
netState netmap.State
cache *lruNetCache
}
func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source {
const netmapCacheSize = 10
lruNetmapCache := newNetworkLRUCache(netmapCacheSize, func(key uint64) (*netmapSDK.NetMap, error) {
return v.GetNetMapByEpoch(key)
})
return &lruNetmapSource{
netState: s,
cache: lruNetmapCache,
}
}
func (s *lruNetmapSource) GetNetMap(diff uint64) (*netmapSDK.NetMap, error) {
return s.getNetMapByEpoch(s.netState.CurrentEpoch() - diff)
}
func (s *lruNetmapSource) GetNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) {
return s.getNetMapByEpoch(epoch)
}
func (s *lruNetmapSource) getNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) {
val, err := s.cache.get(epoch)
if err != nil {
return nil, err
}
return val, nil
}
func (s *lruNetmapSource) Epoch() (uint64, error) {
return s.netState.CurrentEpoch(), nil
}
// wrapper over TTL cache of values read from the network
// that implements container lister.
type ttlContainerLister struct {
inner *ttlNetCache[string, *cacheItemContainerList]
client *cntClient.Client
}
// value type for ttlNetCache used by ttlContainerLister.
type cacheItemContainerList struct {
// protects list from concurrent add/remove ops
mtx sync.RWMutex
// actual list of containers owner by the particular user
list []cid.ID
}
func newCachedContainerLister(c *cntClient.Client, ttl time.Duration) ttlContainerLister {
const containerListerCacheSize = 100
lruCnrListerCache := newNetworkTTLCache(containerListerCacheSize, ttl, func(strID string) (*cacheItemContainerList, error) {
var id *user.ID
if strID != "" {
id = new(user.ID)
err := id.DecodeString(strID)
if err != nil {
return nil, err
}
}
list, err := c.List(id)
if err != nil {
return nil, err
}
return &cacheItemContainerList{
list: list,
}, nil
})
return ttlContainerLister{inner: lruCnrListerCache, client: c}
}
// List returns list of container IDs from the cache. If list is missing in the
// cache or expired, then it returns container IDs from side chain and updates
// the cache.
func (s ttlContainerLister) List(id *user.ID) ([]cid.ID, error) {
if id == nil {
return s.client.List(nil)
}
item, err := s.inner.get(id.EncodeToString())
if err != nil {
return nil, err
}
item.mtx.RLock()
res := make([]cid.ID, len(item.list))
copy(res, item.list)
item.mtx.RUnlock()
return res, nil
}
// updates cached list of owner's containers: cnr is added if flag is true, otherwise it's removed.
// Concurrent calls can lead to some races:
// - two parallel additions to missing owner's cache can lead to only one container to be cached
// - async cache value eviction can lead to idle addition
//
// All described race cases aren't critical since cache values expire anyway, we just try
// to increase cache actuality w/o huge overhead on synchronization.
func (s *ttlContainerLister) update(owner user.ID, cnr cid.ID, add bool) {
strOwner := owner.EncodeToString()
val, ok := s.inner.cache.Peek(strOwner)
if !ok {
// we could cache the single cnr but in this case we will disperse
// with the Sidechain a lot
return
}
if s.inner.ttl <= time.Since(val.t) {
return
}
item := val.v
item.mtx.Lock()
{
found := false
for i := range item.list {
if found = item.list[i].Equals(cnr); found {
if !add {
item.list = append(item.list[:i], item.list[i+1:]...)
// if list became empty we don't remove the value from the cache
// since empty list is a correct value, and we don't want to insta
// re-request it from the Sidechain
}
break
}
}
if add && !found {
item.list = append(item.list, cnr)
}
}
item.mtx.Unlock()
}
type cachedIRFetcher struct {
*ttlNetCache[struct{}, [][]byte]
}
func newCachedIRFetcher(f interface{ InnerRingKeys() ([][]byte, error) }) cachedIRFetcher {
const (
irFetcherCacheSize = 1 // we intend to store only one value
// Without the cache in the testnet we can see several hundred simultaneous
// requests (frostfs-node #1278), so limiting the request rate solves the issue.
//
// Exact request rate doesn't really matter because Inner Ring list update
// happens extremely rare, but there is no side chain events for that as
// for now (frostfs-contract v0.15.0 notary disabled env) to monitor it.
irFetcherCacheTTL = 30 * time.Second
)
irFetcherCache := newNetworkTTLCache[struct{}, [][]byte](irFetcherCacheSize, irFetcherCacheTTL,
func(_ struct{}) ([][]byte, error) {
return f.InnerRingKeys()
},
)
return cachedIRFetcher{irFetcherCache}
}
// InnerRingKeys returns cached list of Inner Ring keys. If keys are missing in
// the cache or expired, then it returns keys from side chain and updates
// the cache.
func (f cachedIRFetcher) InnerRingKeys() ([][]byte, error) {
val, err := f.get(struct{}{})
if err != nil {
return nil, err
}
return val, nil
}
type ttlMaxObjectSizeCache struct {
mtx sync.RWMutex
lastUpdated time.Time
lastSize uint64
src putsvc.MaxSizeSource
}
func newCachedMaxObjectSizeSource(src putsvc.MaxSizeSource) putsvc.MaxSizeSource {
return &ttlMaxObjectSizeCache{
src: src,
}
}
func (c *ttlMaxObjectSizeCache) MaxObjectSize() uint64 {
const ttl = time.Second * 30
c.mtx.RLock()
prevUpdated := c.lastUpdated
size := c.lastSize
c.mtx.RUnlock()
if time.Since(prevUpdated) < ttl {
return size
}
c.mtx.Lock()
size = c.lastSize
if !c.lastUpdated.After(prevUpdated) {
size = c.src.MaxObjectSize()
c.lastSize = size
c.lastUpdated = time.Now()
}
c.mtx.Unlock()
return size
}

View file

@ -1,25 +0,0 @@
package main
type closer struct {
name string
fn func()
}
func getCloser(c *cfg, name string) *closer {
for _, clsr := range c.closers {
if clsr.name == name {
return &clsr
}
}
return nil
}
func delCloser(c *cfg, name string) {
for i, clsr := range c.closers {
if clsr.name == name {
c.closers[i] = c.closers[len(c.closers)-1]
c.closers = c.closers[:len(c.closers)-1]
return
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,64 +0,0 @@
package apiclientconfig
import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
)
const (
subsection = "apiclient"
// DialTimeoutDefault is a default dial timeout of FrostFS API client connection.
DialTimeoutDefault = 5 * time.Second
// StreamTimeoutDefault is a default timeout of FrostFS API streaming operation.
StreamTimeoutDefault = 15 * time.Second
)
// DialTimeout returns the value of "dial_timeout" config parameter
// from "apiclient" section.
//
// Returns DialTimeoutDefault if the value is not positive duration.
func DialTimeout(c *config.Config) time.Duration {
v := config.DurationSafe(c.Sub(subsection), "dial_timeout")
if v > 0 {
return v
}
return DialTimeoutDefault
}
// StreamTimeout returns the value of "stream_timeout" config parameter
// from "apiclient" section.
//
// Returns DialTimeoutDefault if the value is not positive duration.
func StreamTimeout(c *config.Config) time.Duration {
v := config.DurationSafe(c.Sub(subsection), "stream_timeout")
if v > 0 {
return v
}
return StreamTimeoutDefault
}
// ReconnectTimeout returns the value of "reconnect_timeout" config parameter
// from "apiclient" section.
//
// Returns 0 if the value is not positive duration.
func ReconnectTimeout(c *config.Config) time.Duration {
v := config.DurationSafe(c.Sub(subsection), "reconnect_timeout")
if v > 0 {
return v
}
return 0
}
// AllowExternal returns the value of "allow_external" config parameter
// from "apiclient" section.
//
// Returns false if the value is missing or invalid.
func AllowExternal(c *config.Config) bool {
return config.BoolSafe(c.Sub(subsection), "allow_external")
}

View file

@ -1,37 +0,0 @@
package apiclientconfig_test
import (
"testing"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
apiclientconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/apiclient"
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
"github.com/stretchr/testify/require"
)
func TestApiclientSection(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
empty := configtest.EmptyConfig()
require.Equal(t, apiclientconfig.DialTimeoutDefault, apiclientconfig.DialTimeout(empty))
require.Equal(t, apiclientconfig.StreamTimeoutDefault, apiclientconfig.StreamTimeout(empty))
require.Equal(t, time.Duration(0), apiclientconfig.ReconnectTimeout(empty))
require.False(t, apiclientconfig.AllowExternal(empty))
})
const path = "../../../../config/example/node"
var fileConfigTest = func(c *config.Config) {
require.Equal(t, 15*time.Second, apiclientconfig.DialTimeout(c))
require.Equal(t, 20*time.Second, apiclientconfig.StreamTimeout(c))
require.Equal(t, 30*time.Second, apiclientconfig.ReconnectTimeout(c))
require.True(t, apiclientconfig.AllowExternal(c))
}
configtest.ForEachFileType(path, fileConfigTest)
t.Run("ENV", func(t *testing.T) {
configtest.ForEnvFileType(path, fileConfigTest)
})
}

View file

@ -1,38 +0,0 @@
package blobstorconfig
import (
"strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/storage"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
)
// Config is a wrapper over the config section
// which provides access to BlobStor configurations.
type Config config.Config
// From wraps config section into Config.
func From(c *config.Config) *Config {
return (*Config)(c)
}
// Storages returns the value of storage subcomponents.
func (x *Config) Storages() []*storage.Config {
var ss []*storage.Config
for i := 0; ; i++ {
typ := config.String(
(*config.Config)(x),
strconv.Itoa(i)+".type")
switch typ {
case "":
return ss
case fstree.Type, blobovniczatree.Type:
sub := storage.From((*config.Config)(x).Sub(strconv.Itoa(i)))
ss = append(ss, sub)
default:
panic("invalid type")
}
}
}

View file

@ -1,47 +0,0 @@
package fstree
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
)
// Config is a wrapper over the config section
// which provides access to Blobovnicza configurations.
type Config config.Config
// DepthDefault is a default shallow dir depth.
const DepthDefault = 4
// From wraps config section into Config.
func From(c *config.Config) *Config {
return (*Config)(c)
}
// Type returns the storage type.
func (x *Config) Type() string {
return fstree.Type
}
// Depth returns the value of "depth" config parameter.
//
// Returns DepthDefault if the value is out of
// [1:fstree.MaxDepth] range.
func (x *Config) Depth() uint64 {
d := config.UintSafe(
(*config.Config)(x),
"depth",
)
if d >= 1 && d <= fstree.MaxDepth {
return d
}
return DepthDefault
}
// NoSync returns the value of "no_sync" config parameter.
//
// Returns false if the value is not a boolean or is missing.
func (x *Config) NoSync() bool {
return config.BoolSafe((*config.Config)(x), "no_sync")
}

View file

@ -1,55 +0,0 @@
package storage
import (
"io/fs"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
)
type Config config.Config
// PermDefault are default permission bits for BlobStor data.
const PermDefault = 0660
func From(x *config.Config) *Config {
return (*Config)(x)
}
// Type returns storage type.
func (x *Config) Type() string {
return config.String(
(*config.Config)(x),
"type")
}
// Path returns the value of "path" config parameter.
//
// Panics if the value is not a non-empty string.
func (x *Config) Path() string {
p := config.String(
(*config.Config)(x),
"path",
)
if p == "" {
panic("blobstor path not set")
}
return p
}
// Perm returns the value of "perm" config parameter as a fs.FileMode.
//
// Returns PermDefault if the value is not a non-zero number.
func (x *Config) Perm() fs.FileMode {
p := config.UintSafe(
(*config.Config)(x),
"perm",
)
if p == 0 {
p = PermDefault
}
return fs.FileMode(p)
}

View file

@ -1,19 +0,0 @@
package objectconfig
import "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
const (
deleteSubsection = "delete"
// DefaultTombstoneLifetime is the default value of tombstone lifetime in epochs.
DefaultTombstoneLifetime = 5
)
// TombstoneLifetime returns the value of `tombstone_lifetime` config parameter.
func TombstoneLifetime(c *config.Config) uint64 {
ts := config.UintSafe(c.Sub(subsection).Sub(deleteSubsection), "tombstone_lifetime")
if ts <= 0 {
return DefaultTombstoneLifetime
}
return ts
}

View file

@ -1,70 +0,0 @@
package main
import (
"context"
"fmt"
"net/http"
"time"
httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http"
)
type httpComponent struct {
address string
name string
handler http.Handler
shutdownDur time.Duration
enabled bool
cfg *cfg
preReload func(c *cfg)
}
func (cmp *httpComponent) init(c *cfg) {
if !cmp.enabled {
c.log.Info(fmt.Sprintf("%s is disabled", cmp.name))
return
}
// Init server with parameters
srv := httputil.New(
*httputil.NewHTTPSrvPrm(
cmp.address,
cmp.handler,
),
httputil.WithShutdownTimeout(
cmp.shutdownDur,
),
)
c.closers = append(c.closers, closer{
cmp.name,
func() { stopAndLog(c, cmp.name, srv.Shutdown) },
})
c.workers = append(c.workers, worker{
cmp.name,
func(ctx context.Context) {
runAndLog(c, cmp.name, false, func(c *cfg) {
fatalOnErr(srv.Serve())
})
},
})
}
func (cmp *httpComponent) reload() error {
if cmp.preReload != nil {
cmp.preReload(cmp.cfg)
}
// Shutdown server
closer := getCloser(cmp.cfg, cmp.name)
if closer != nil {
closer.fn()
}
// Cleanup
delCloser(cmp.cfg, cmp.name)
delWorker(cmp.cfg, cmp.name)
// Init server with new parameters
cmp.init(cmp.cfg)
// Start worker
if cmp.enabled {
startWorker(cmp.cfg, *getWorker(cmp.cfg, cmp.name))
}
return nil
}

View file

@ -1,45 +0,0 @@
package main
import (
metricsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/metrics"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
func metricsComponent(c *cfg) (*httpComponent, bool) {
var updated bool
// check if it has been inited before
if c.dynamicConfiguration.metrics == nil {
c.dynamicConfiguration.metrics = new(httpComponent)
c.dynamicConfiguration.metrics.cfg = c
c.dynamicConfiguration.metrics.name = "metrics"
c.dynamicConfiguration.metrics.handler = promhttp.Handler()
updated = true
}
// (re)init read configuration
enabled := metricsconfig.Enabled(c.appCfg)
if enabled != c.dynamicConfiguration.metrics.enabled {
c.dynamicConfiguration.metrics.enabled = enabled
updated = true
}
address := metricsconfig.Address(c.appCfg)
if address != c.dynamicConfiguration.metrics.address {
c.dynamicConfiguration.metrics.address = address
updated = true
}
dur := metricsconfig.ShutdownTimeout(c.appCfg)
if dur != c.dynamicConfiguration.metrics.shutdownDur {
c.dynamicConfiguration.metrics.shutdownDur = dur
updated = true
}
return c.dynamicConfiguration.metrics, updated
}
func enableMetricsSvc(c *cfg) {
c.shared.metricsSvc.Enable()
}
func disableMetricsSvc(c *cfg) {
c.shared.metricsSvc.Disable()
}

Some files were not shown because too many files have changed in this diff Show more