Compare commits

...

No commits in common. "master" and "empty" have entirely different histories.

286 changed files with 2 additions and 19067 deletions

View file

@ -1,21 +0,0 @@
name: DCO
on: [pull_request]
jobs:
dco:
name: DCO
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.21'
- name: Run commit format checker
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
with:
from: 'origin/${{ github.event.pull_request.base.ref }}'

View file

@ -1,22 +0,0 @@
on:
push:
workflow_dispatch:
jobs:
image:
name: Publish Maven packages
runs-on: docker
container: git.frostfs.info/truecloudlab/env:openjdk-11-maven-3.8.6
steps:
- name: Clone git repo
uses: actions/checkout@v3
- name: Publish release packages
run: mvn clean --batch-mode --update-snapshots deploy
if: >-
startsWith(github.ref, 'refs/tags/v') &&
(github.event_name == 'workflow_dispatch' || github.event_name == 'push')
env:
MAVEN_REGISTRY: TrueCloudLab
MAVEN_REGISTRY_USER: ${{secrets.MAVEN_REGISTRY_USER}}
MAVEN_REGISTRY_PASSWORD: ${{secrets.MAVEN_REGISTRY_PASSWORD}}

View file

@ -1,12 +0,0 @@
name: Verify code phase
on: [pull_request]
jobs:
verify-code:
name: Verify code
runs-on: docker
container: git.frostfs.info/truecloudlab/env:openjdk-11-maven-3.8.6
steps:
- uses: actions/checkout@v3
- name: Run the Maven verify phase
run: mvn --batch-mode --update-snapshots verify

41
.gitignore vendored
View file

@ -1,41 +0,0 @@
### Maven ###
target/
!.mvn/wrapper/maven-wrapper.jar
!**/src/main/**/target/
!**/src/test/**/target/
**/.flattened-pom.xml
### IntelliJ IDEA ###
.idea/modules.xml
.idea/jarRepositories.xml
.idea/compiler.xml
.idea/libraries/
.idea/
*.iws
*.iml
*.ipr
### Eclipse ###
.apt_generated
.classpath
.factorypath
.project
.settings
.springBeans
.sts4-cache
### NetBeans ###
/nbproject/private/
/nbbuild/
/dist/
/nbdist/
/.nb-gradle/
build/
!**/src/main/**/build/
!**/src/test/**/build/
### VS Code ###
.vscode/
### Mac OS ###
.DS_Store

View file

@ -1,62 +0,0 @@
# Changelog
## [0.12.0] - 2025-04-24
### Fixed
- Patch logic
- Patch payload requirements
## [0.11.0] - 2025-04-23
### Added
- Placement policy vectors
## [0.10.0] - 2025-03-10
### Added
- Auto deploy to forgejo
## [0.9.0] - 2025-03-05
### Added
- APE rule deserializer
## [0.9.0] - 2025-03-05
### Added
- APE rule deserializer
## [0.8.0] - 2025-03-04
### Added
- Creating client via wallet and password
## [0.7.0] - 2025-02-20
### Added
- Expanding the parameters for creating a container
### Fixed
- Creating a session for working with objects
## [0.6.0] - 2025-02-13
### Added
- APE rules serializer
## [0.5.0] - 2025-02-11
### Fixed
- Loading large objects in chunks
- .gitignore
- pom revision

View file

@ -1,3 +0,0 @@
.* @orikik
.forgejo/.* @potyarkin
Makefile @potyarkin

View file

@ -1,156 +0,0 @@
# Contribution guide
First, thank you for contributing! We love and encourage pull requests from
everyone. Please follow the guidelines:
- Check the open [issues](https://git.frostfs.info/TrueCloudLab/frostfs-sdk-java/issues) and
[pull requests](https://git.frostfs.info/TrueCloudLab/frostfs-sdk-java/pulls) for existing
discussions.
- Open an issue first, to discuss a new feature or enhancement.
- Write tests and make sure the test suite passes locally and on CI.
- Open a pull request and reference the relevant issue(s).
- Make sure your commits are logically separated and have good comments
explaining the details of your change.
- After receiving a feedback, amend your commits or add new ones as
appropriate.
- **Have fun!**
## Development Workflow
Start by forking the `frostfs-sdk-java` repository, make changes in a branch and then
send a pull request. We encourage pull requests to discuss code changes. Here
are the steps in details:
### Set up your git repository
Fork [FrostFS S3 Gateway
upstream](https://git.frostfs.info/repo/fork/346) source repository
to your own personal repository. Copy the URL of your fork (you will need it for
the `git clone` command below).
```sh
$ git clone https://git.frostfs.info/<username>/frostfs-sdk-java.git
```
### Set up git remote as ``upstream``
```sh
$ cd frostfs-sdk-java
$ git remote add upstream https://git.frostfs.info/TrueCloudLab/frostfs-sdk-java.git
$ git fetch upstream
$ git merge upstream/master
...
```
### Create your feature branch
Before making code changes, make sure you create a separate branch for these
changes. Maybe you will find it convenient to name a branch in
`<type>/<Issue>-<changes_topic>` format.
```
$ git checkout -b feature/123-something_awesome
```
### Test your changes
After your code changes, make sure
- To add test cases for the new code.
- To run `mvn clean verify`
- To squash your commits into a single commit or a series of logically separated
commits with `git rebase -i`. It's okay to force update your pull request.
- To run `mvn clean package` successfully.
### Commit changes
After verification, commit your changes. There is a [great
post](https://chris.beams.io/posts/git-commit/) on how to write useful commit
messages. Try following this template:
```
[#Issue] <component> Summary
Description
<Macros>
<Sign-Off>
```
```
$ git commit -ams '[#123] Add some feature'
```
### Push to the branch
Push your locally committed changes to the remote origin (your fork)
```
$ git push origin feature/123-something_awesome
```
### Create a Pull Request
Pull requests can be created via Forgejo. Refer to [this
document](https://docs.codeberg.org/collaborating/pull-requests-and-git-flow/) for
detailed steps on how to create a pull request. After a Pull Request gets peer
reviewed and approved, it will be merged.
## DCO Sign off
All authors to the project retain copyright to their work. However, to ensure
that they are only submitting work that they have rights to, we require
everyone to acknowledge this by signing their work.
Any copyright notices in this repository should specify the authors as "the
contributors".
To sign your work, just add a line like this at the end of your commit message:
```
Signed-off-by: Samii Sakisaka <samii@frostfs.info>
```
This can be easily done with the `--signoff` option to `git commit`.
By doing this you state that you can certify the following (from [The Developer
Certificate of Origin](https://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
1 Letterman Drive
Suite D4700
San Francisco, CA, 94129
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```

74
LICENSE
View file

@ -1,74 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
(a) You must give any other recipients of the Work or Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives.
Copyright 2024 TrueCloudLab
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

165
README.md
View file

@ -1,164 +1,3 @@
# frostfs-sdk-java
# WIP area: this repo is just a fork!
Java implementation of FrostFS SDK
## Prerequisites
### Get the key for your wallet
1. Get the address
```bash
cat <path_to_your_wallet> | jq .accounts[0].address | tr -d '"'
```
2. Get the key
```bash
neo-go wallet export -w <path_to_your_wallet> -d <address_from_p1>
```
## Example usage
### Container operations
```java
import info.frostfs.sdk.FrostFSClient;
import info.frostfs.sdk.dto.container.Container;
import info.frostfs.sdk.dto.netmap.PlacementPolicy;
import info.frostfs.sdk.dto.netmap.Replica;
import info.frostfs.sdk.jdo.ClientSettings;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerCreate;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerDelete;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerGet;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerGetAll;
public class ContainerExample {
public void example() {
var callContext = new CallContext();
ClientSettings clientSettings = new ClientSettings(<your_key>, <your_host>);
FrostFSClient frostFSClient = new FrostFSClient(clientSettings);
// Create container
var placementPolicy = new PlacementPolicy(new Replica[]{new Replica(3)}, true, 1);
var prmContainerCreate = new PrmContainerCreate(new Container(placementPolicy));
var containerId = frostFSClient.createContainer(prmContainerCreate, callContext);
// Get container
var prmContainerGet = new PrmContainerGet(containerId);
var container = frostFSClient.getContainer(prmContainerGet, callContext);
// List containers
var containerIds = frostFSClient.listContainers(new PrmContainerGetAll(), callContext);
// Delete container
var prmContainerDelete = new PrmContainerDelete(containerId);
frostFSClient.deleteContainer(prmContainerDelete, callContext);
}
}
```
### Object operations
```java
import info.frostfs.sdk.dto.object.*;
import info.frostfs.sdk.enums.ObjectType;
import info.frostfs.sdk.jdo.ClientSettings;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.jdo.parameters.object.*;
import org.apache.commons.lang3.ArrayUtils;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import static java.util.Objects.isNull;
public class ObjectExample {
public void example() {
CallContext callContext = new CallContext();
ClientSettings clientSettings = new ClientSettings(<your_key>, <your_host>);
FrostFSClient frostFSClient = new FrostFSClient(clientSettings);
// Put object
ObjectId objectId;
try (FileInputStream file = new FileInputStream("/path/to/file/cat.jpg")) {
var attribute = new ObjectAttribute("Filename", "cat.jpg");
var cat = new ObjectHeader(containerId, ObjectType.REGULAR, attribute);
var prmObjectPut = PrmObjectPut.builder().objectHeader(cat).build();
var writer = frostFSClient.putObject(prmObjectPut, callContext);
writer.write(file.readAllBytes());
objectId = writer.complete();
} catch (IOException e) {
throw new RuntimeException(e);
}
// Get object
var prmObjectGet = new PrmObjectGet(containerId, oid);
ObjectFrostFS object = frostFSClient.getObject(prmObjectGet, callContext);
var reader = object.getObjectReader();
var chunk = reader.readChunk();
var length = chunk.length;
byte[] buffer = null;
while (length > 0) {
buffer = isNull(buffer) ? chunk : ArrayHelper.concat(buffer, chunk);
chunk = object.getObjectReader().readChunk();
length = ArrayUtils.isEmpty(chunk) ? 0 : chunk.length;
}
try (FileOutputStream fos = new FileOutputStream("/path/to/file/newCat.jpg")) {
fos.write(buffer);
} catch (Exception ignored) {
}
// Get object header
var prmObjectHeadGet = new PrmObjectHeadGet(containerId, objectId);
var objectHeader = frostFSClient.getObjectHead(prmObjectHeadGet, callContext);
// Search regular objects
var prmObjectSearch = new PrmObjectSearch(containerId, new ObjectFilter.FilterByRootObject());
var objectIds = frostFSClient.searchObjects(prmObjectSearch, callContext);
// Delete object
var prmObjectDelete = new PrmObjectDelete(containerId, objectId);
frostFSClient.deleteObject(prmObjectDelete, callContext);
}
}
```
### Pool init
```java
import info.frostfs.sdk.jdo.ECDsa;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.jdo.pool.NodeParameters;
import info.frostfs.sdk.jdo.pool.PoolInitParameters;
import info.frostfs.sdk.pool.Pool;
public class PoolExample {
public static void example() {
CallContext callContext = new CallContext();
//Init
var nodeParam1 = new NodeParameters(1, <your_host1>, 1);
var nodeParam2 = new NodeParameters(1, <your_host2>, 1);
var nodeParam3 = new NodeParameters(1, <your_host3>, 1);
var nodeParam4 = new NodeParameters(1, <your_host4>, 1);
PoolInitParameters initParameters = new PoolInitParameters();
initParameters.setKey(new ECDsa(<your_key>));
initParameters.setNodeParams(new NodeParameters[]{nodeParam1, nodeParam2, nodeParam3, nodeParam4});
Pool pool = new Pool(initParameters);
//Dial (Required!)
pool.dial(callContext);
}
}
```
Useful things may be published only in [other branches](../../../branches)

View file

@ -1,84 +0,0 @@
<!DOCTYPE module PUBLIC
"-//Puppy Crawl//DTD Check Configuration 1.3//EN"
"http://www.puppycrawl.com/dtds/configuration_1_3.dtd">
<module name="Checker">
<property name="charset" value="UTF-8"/>
<property name="severity" value="error"/>
<property name="fileExtensions" value="java, properties, xml"/>
<module name="SuppressWarningsFilter" />
<module name="FileTabCharacter">
<property name="eachLine" value="true"/>
</module>
<module name="NewlineAtEndOfFile"/>
<!-- Checks for Size Violations. -->
<!-- See http://checkstyle.sf.net/config_sizes.html -->
<module name="LineLength">
<property name="max" value="120"/>
</module>
<module name="TreeWalker">
<property name="tabWidth" value="4"/>
<module name="Regexp">
<property name="message" value="Blank line at end of the block is not allowed"/>
<property name="format" value="^\s*$^\s*\}"/>
<property name="ignoreComments" value="true"/>
<property name="illegalPattern" value="true"/>
</module>
<module name="HideUtilityClassConstructor" />
<module name="SuppressWarningsHolder" />
<!-- Checks for Naming Conventions. -->
<!-- See http://checkstyle.sf.net/config_naming.html -->
<module name="AbbreviationAsWordInName">
<property name="tokens" value="METHOD_DEF,CLASS_DEF"/>
<property name="ignoreStatic" value="false"/>
</module>
<!-- Checks for imports -->
<!-- See http://checkstyle.sf.net/config_import.html -->
<module name="IllegalImport"> <!-- defaults to sun.* packages -->
</module>
<module name="RedundantImport"/>
<module name="UnusedImports"/>
<module name="ConstantName">
<property name="applyToPrivate" value="false"/>
</module>
<module name="WhitespaceAround">
<property name="allowEmptyConstructors" value="true"/>
<property name="allowEmptyMethods" value="true"/>
</module>
<module name="MethodParamPad"/>
<module name="EmptyForInitializerPad"/>
<module name="MissingOverride"/>
<module name="ParameterNumber">
<property name="ignoreOverriddenMethods" value="true"/>
</module>
<!-- Checks for blocks. You know, those {}'s -->
<!-- See http://checkstyle.sf.net/config_blocks.html -->
<module name="AvoidNestedBlocks"/>
<module name="EmptyBlock"/>
<module name="LeftCurly"/>
<module name="NeedBraces"/>
<module name="RightCurly"/>
<!-- Checks for common coding problems -->
<!-- See http://checkstyle.sf.net/config_coding.html -->
<module name="MagicNumber">
<property name="ignoreHashCodeMethod" value="true"/>
<property name="ignoreAnnotation" value="true"/>
<property name="ignoreFieldDeclaration" value="true"/>
<property name="ignoreNumbers" value="-1, 0, 1, 2, 3, 4"/>
</module>
<module name="RequireThis"/>
<module name="DeclarationOrder"/>
</module>
</module>

View file

@ -1,63 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>info.frostfs.sdk</groupId>
<artifactId>frostfs-sdk-java</artifactId>
<version>${revision}</version>
</parent>
<artifactId>client</artifactId>
<properties>
<maven.compiler.source>11</maven.compiler.source>
<maven.compiler.target>11</maven.compiler.target>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
</properties>
<dependencies>
<dependency>
<groupId>info.frostfs.sdk</groupId>
<artifactId>cryptography</artifactId>
<version>${revision}</version>
</dependency>
<dependency>
<groupId>info.frostfs.sdk</groupId>
<artifactId>models</artifactId>
<version>${revision}</version>
</dependency>
<dependency>
<groupId>info.frostfs.sdk</groupId>
<artifactId>exceptions</artifactId>
<version>${revision}</version>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<version>1.17.0</version>
</dependency>
<!-- Prometheus instrumentation -->
<dependency>
<groupId>io.prometheus</groupId>
<artifactId>simpleclient</artifactId>
<version>0.16.0</version>
</dependency>
<dependency>
<groupId>io.prometheus</groupId>
<artifactId>simpleclient_hotspot</artifactId>
<version>0.16.0</version>
</dependency>
<dependency>
<groupId>io.prometheus</groupId>
<artifactId>simpleclient_common</artifactId>
<version>0.16.0</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>2.0.16</version>
</dependency>
</dependencies>
</project>

View file

@ -1,248 +0,0 @@
package info.frostfs.sdk;
import frostfs.accounting.Types;
import info.frostfs.sdk.dto.ape.Chain;
import info.frostfs.sdk.dto.container.Container;
import info.frostfs.sdk.dto.container.ContainerId;
import info.frostfs.sdk.dto.netmap.NetmapSnapshot;
import info.frostfs.sdk.dto.netmap.NodeInfo;
import info.frostfs.sdk.dto.netmap.Version;
import info.frostfs.sdk.dto.object.ObjectFrostFS;
import info.frostfs.sdk.dto.object.ObjectHeader;
import info.frostfs.sdk.dto.object.ObjectId;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.exceptions.ProcessFrostFSException;
import info.frostfs.sdk.jdo.ClientEnvironment;
import info.frostfs.sdk.jdo.ClientSettings;
import info.frostfs.sdk.jdo.ECDsa;
import info.frostfs.sdk.jdo.NetworkSettings;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.jdo.parameters.ape.PrmApeChainAdd;
import info.frostfs.sdk.jdo.parameters.ape.PrmApeChainList;
import info.frostfs.sdk.jdo.parameters.ape.PrmApeChainRemove;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerCreate;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerDelete;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerGet;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerGetAll;
import info.frostfs.sdk.jdo.parameters.object.*;
import info.frostfs.sdk.jdo.parameters.object.patch.PrmObjectPatch;
import info.frostfs.sdk.jdo.parameters.object.patch.PrmRangeGet;
import info.frostfs.sdk.jdo.parameters.object.patch.PrmRangeHashGet;
import info.frostfs.sdk.jdo.parameters.session.PrmSessionCreate;
import info.frostfs.sdk.jdo.result.ObjectHeaderResult;
import info.frostfs.sdk.pool.SessionCache;
import info.frostfs.sdk.pool.WrapperPrm;
import info.frostfs.sdk.services.CommonClient;
import info.frostfs.sdk.services.impl.*;
import info.frostfs.sdk.services.impl.interceptor.Configuration;
import info.frostfs.sdk.services.impl.interceptor.MonitoringClientInterceptor;
import info.frostfs.sdk.services.impl.rwhelper.ObjectWriter;
import info.frostfs.sdk.services.impl.rwhelper.RangeReader;
import info.frostfs.sdk.utils.Validator;
import io.grpc.Channel;
import io.grpc.ClientInterceptors;
import io.grpc.ManagedChannel;
import org.apache.commons.lang3.StringUtils;
import java.util.List;
import static info.frostfs.sdk.constants.ErrorConst.VERSION_UNSUPPORTED_TEMPLATE;
import static info.frostfs.sdk.tools.GrpcClient.initGrpcChannel;
import static java.util.Objects.nonNull;
public class FrostFSClient implements CommonClient {
private static final MonitoringClientInterceptor MONITORING_CLIENT_INTERCEPTOR =
MonitoringClientInterceptor.create(Configuration.allMetrics());
private final ContainerClientImpl containerClientImpl;
private final ObjectClientImpl objectClientImpl;
private final ApeManagerClientImpl apeManagerClient;
private final NetmapClientImpl netmapClientImpl;
private final SessionClientImpl sessionClientImpl;
private final ObjectToolsImpl objectToolsImpl;
private final AccountingClientImpl accountingClient;
private final ManagedChannel channel;
public FrostFSClient(ClientSettings clientSettings) {
Validator.validate(clientSettings);
this.channel = nonNull(clientSettings.getChannel())
? clientSettings.getChannel()
: initGrpcChannel(clientSettings);
var ecdsa = StringUtils.isBlank(clientSettings.getWif())
? new ECDsa(clientSettings.getWallet(), clientSettings.getPassword())
: new ECDsa(clientSettings.getWif());
Channel interceptChannel = ClientInterceptors.intercept(channel, MONITORING_CLIENT_INTERCEPTOR);
ClientEnvironment clientEnvironment = new ClientEnvironment(
ecdsa, interceptChannel, new Version(), this, new SessionCache(0)
);
Validator.validate(clientEnvironment);
this.containerClientImpl = new ContainerClientImpl(clientEnvironment);
this.objectClientImpl = new ObjectClientImpl(clientEnvironment);
this.apeManagerClient = new ApeManagerClientImpl(clientEnvironment);
this.netmapClientImpl = new NetmapClientImpl(clientEnvironment);
this.sessionClientImpl = new SessionClientImpl(clientEnvironment);
this.objectToolsImpl = new ObjectToolsImpl(clientEnvironment);
this.accountingClient = new AccountingClientImpl(clientEnvironment);
checkFrostFSVersionSupport(clientEnvironment.getVersion());
}
public FrostFSClient(WrapperPrm prm, SessionCache cache) {
this.channel = initGrpcChannel(prm.getAddress());
Channel interceptChannel = ClientInterceptors.intercept(channel, MONITORING_CLIENT_INTERCEPTOR);
ClientEnvironment clientEnvironment =
new ClientEnvironment(prm.getKey(), interceptChannel, new Version(), this, cache);
Validator.validate(clientEnvironment);
this.containerClientImpl = new ContainerClientImpl(clientEnvironment);
this.objectClientImpl = new ObjectClientImpl(clientEnvironment);
this.apeManagerClient = new ApeManagerClientImpl(clientEnvironment);
this.netmapClientImpl = new NetmapClientImpl(clientEnvironment);
this.sessionClientImpl = new SessionClientImpl(clientEnvironment);
this.objectToolsImpl = new ObjectToolsImpl(clientEnvironment);
this.accountingClient = new AccountingClientImpl(clientEnvironment);
checkFrostFSVersionSupport(clientEnvironment.getVersion());
}
private void checkFrostFSVersionSupport(Version version) {
var localNodeInfo = netmapClientImpl.getLocalNodeInfo(new CallContext());
if (!localNodeInfo.getVersion().isSupported(version)) {
throw new ProcessFrostFSException(
String.format(VERSION_UNSUPPORTED_TEMPLATE, localNodeInfo.getVersion())
);
}
}
@Override
public Container getContainer(PrmContainerGet args, CallContext ctx) {
return containerClientImpl.getContainer(args, ctx);
}
@Override
public List<ContainerId> listContainers(PrmContainerGetAll args, CallContext ctx) {
return containerClientImpl.listContainers(args, ctx);
}
@Override
public ContainerId createContainer(PrmContainerCreate args, CallContext ctx) {
return containerClientImpl.createContainer(args, ctx);
}
@Override
public void deleteContainer(PrmContainerDelete args, CallContext ctx) {
containerClientImpl.deleteContainer(args, ctx);
}
@Override
public ObjectHeaderResult getObjectHead(PrmObjectHeadGet args, CallContext ctx) {
return objectClientImpl.getObjectHead(args, ctx);
}
@Override
public ObjectFrostFS getObject(PrmObjectGet args, CallContext ctx) {
return objectClientImpl.getObject(args, ctx);
}
@Override
public ObjectWriter putObject(PrmObjectPut args, CallContext ctx) {
return objectClientImpl.putObject(args, ctx);
}
@Override
public ObjectId putClientCutObject(PrmObjectClientCutPut args, CallContext ctx) {
return objectClientImpl.putClientCutObject(args, ctx);
}
@Override
public ObjectId putSingleObject(PrmObjectSinglePut args, CallContext ctx) {
return objectClientImpl.putSingleObject(args, ctx);
}
@Override
public void deleteObject(PrmObjectDelete args, CallContext ctx) {
objectClientImpl.deleteObject(args, ctx);
}
@Override
public Iterable<ObjectId> searchObjects(PrmObjectSearch args, CallContext ctx) {
return objectClientImpl.searchObjects(args, ctx);
}
@Override
public RangeReader getRange(PrmRangeGet args, CallContext ctx) {
return objectClientImpl.getRange(args, ctx);
}
@Override
public byte[][] getRangeHash(PrmRangeHashGet args, CallContext ctx) {
return objectClientImpl.getRangeHash(args, ctx);
}
@Override
public ObjectId patchObject(PrmObjectPatch args, CallContext ctx) {
return objectClientImpl.patchObject(args, ctx);
}
@Override
public byte[] addChain(PrmApeChainAdd args, CallContext ctx) {
return apeManagerClient.addChain(args, ctx);
}
@Override
public void removeChain(PrmApeChainRemove args, CallContext ctx) {
apeManagerClient.removeChain(args, ctx);
}
@Override
public List<Chain> listChains(PrmApeChainList args, CallContext ctx) {
return apeManagerClient.listChains(args, ctx);
}
@Override
public NetmapSnapshot getNetmapSnapshot(CallContext ctx) {
return netmapClientImpl.getNetmapSnapshot(ctx);
}
@Override
public NodeInfo getLocalNodeInfo(CallContext ctx) {
return netmapClientImpl.getLocalNodeInfo(ctx);
}
@Override
public NetworkSettings getNetworkSettings(CallContext ctx) {
return netmapClientImpl.getNetworkSettings(ctx);
}
@Override
public SessionToken createSession(PrmSessionCreate args, CallContext ctx) {
return sessionClientImpl.createSession(args, ctx);
}
public frostfs.session.Types.SessionToken createSessionInternal(PrmSessionCreate args, CallContext ctx) {
return sessionClientImpl.createSessionInternal(args, ctx);
}
@Override
public ObjectId calculateObjectId(ObjectHeader header) {
return objectToolsImpl.calculateObjectId(header);
}
@Override
public Types.Decimal getBalance(CallContext ctx) {
return accountingClient.getBalance(ctx);
}
@Override
public String dial(CallContext ctx) {
accountingClient.getBalance(ctx);
return null;
}
public void close() {
channel.shutdown();
}
}

View file

@ -1,14 +0,0 @@
package info.frostfs.sdk.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.TYPE, ElementType.ANNOTATION_TYPE})
public @interface AtLeastOneIsFilled {
String message() default "At least one of the fields (%s) must be filled in";
String[] fields();
}

View file

@ -1,12 +0,0 @@
package info.frostfs.sdk.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.TYPE, ElementType.ANNOTATION_TYPE})
public @interface ComplexAtLeastOneIsFilled {
AtLeastOneIsFilled[] value();
}

View file

@ -1,11 +0,0 @@
package info.frostfs.sdk.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.FIELD)
public @interface NotBlank {
}

View file

@ -1,11 +0,0 @@
package info.frostfs.sdk.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.FIELD)
public @interface NotNull {
}

View file

@ -1,11 +0,0 @@
package info.frostfs.sdk.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.FIELD)
public @interface Validate {
}

View file

@ -1,14 +0,0 @@
package info.frostfs.sdk.constants;
public class CryptoConst {
public static final String SIGNATURE_ALGORITHM = "NONEwithECDSAinP1363Format";
public static final int RFC6979_SIGNATURE_SIZE = 64;
public static final int HASH_SIGNATURE_SIZE = 65;
public static final int MURMUR_MULTIPLIER = 33;
public static final long LANDAU_PRIME_DIVISOR_64BIT = 0xc4ceb9fe1a85ec53L;
public static final long LANDAU_PRIME_DIVISOR_65BIT = 0xff51afd7ed558ccdL;
private CryptoConst() {
}
}

View file

@ -1,14 +0,0 @@
package info.frostfs.sdk.constants;
public class PoolConst {
public static final int DEFAULT_SESSION_TOKEN_EXPIRATION_DURATION = 100; // in epochs
public static final int DEFAULT_ERROR_THRESHOLD = 100;
public static final int DEFAULT_GRACEFUL_CLOSE_ON_SWITCH_TIMEOUT = 10; // Seconds
public static final int DEFAULT_REBALANCE_INTERVAL = 15; // Seconds
public static final int DEFAULT_HEALTHCHECK_TIMEOUT = 4; // Seconds
public static final int DEFAULT_DIAL_TIMEOUT = 5; // Seconds
public static final int DEFAULT_STREAM_TIMEOUT = 10; // Seconds
private PoolConst() {
}
}

View file

@ -1,30 +0,0 @@
package info.frostfs.sdk.constants;
public class RuleConst {
public static final byte VERSION = 0;
public static final int BYTE_SIZE = 1;
public static final int U_INT_8_SIZE = BYTE_SIZE;
public static final int BOOL_SIZE = BYTE_SIZE;
public static final long NULL_SLICE = -1L;
public static final int NULL_SLICE_SIZE = 1;
public static final byte BYTE_TRUE = 1;
public static final byte BYTE_FALSE = 0;
// maxSliceLen taken from
// https://github.com/neo-project/neo/blob/38218bbee5bbe8b33cd8f9453465a19381c9a547/src/Neo/IO/Helper.cs#L77
public static final int MAX_SLICE_LENGTH = 0x1000000;
public static final int MAX_VAR_INT_LENGTH = 10;
public static final int CHAIN_MARSHAL_VERSION = 0;
public static final long OFFSET127 = 0x7f;
public static final long OFFSET128 = 0x80;
public static final int UNSIGNED_SERIALIZE_SIZE = 7;
private RuleConst() {
}
}

View file

@ -1,23 +0,0 @@
package info.frostfs.sdk.enums;
public enum HealthyStatus {
// status HEALTHY is set when connection is ready to be used by the pool.
HEALTHY(1),
// status UNHEALTHY_ON_REQUEST is set when communication after dialing to the
// endpoint is failed due to immediate or accumulated errors, connection is
// available and pool should close it before re-establishing connection once again.
UNHEALTHY_ON_REQUEST(2),
// status UNHEALTHY_ON_DIAL is set when dialing to the endpoint is failed,
// so there is no connection to the endpoint, and pool should not close it
// before re-establishing connection once again.
UNHEALTHY_ON_DIAL(3),
;
public final int value;
HealthyStatus(int value) {
this.value = value;
}
}

View file

@ -1,29 +0,0 @@
package info.frostfs.sdk.enums;
public enum MethodIndex {
METHOD_BALANCE_GET("balanceGet"),
METHOD_CONTAINER_PUT("containerPut"),
METHOD_CONTAINER_GET("ContainerGet"),
METHOD_CONTAINER_LIST("ContainerList"),
METHOD_CONTAINER_DELETE("ContainerDelete"),
METHOD_ENDPOINT_INFO("EndpointInfo"),
METHOD_NETWORK_INFO("NetworkInfo"),
METHOD_NETMAP_SNAPSHOT("NetMapSnapshot"),
METHOD_OBJECT_PUT("ObjectPut"),
METHOD_OBJECT_DELETE("ObjectDelete"),
METHOD_OBJECT_GET("ObjectGet"),
METHOD_OBJECT_HEAD("ObjectHead"),
METHOD_OBJECT_RANGE("ObjectRange"),
METHOD_OBJECT_PATCH("ObjectPatch"),
METHOD_SESSION_CREATE("SessionCreate"),
METHOD_APE_MANAGER_ADD_CHAIN("APEManagerAddChain"),
METHOD_APE_MANAGER_REMOVE_CHAIN("APEManagerRemoveChain"),
METHOD_APE_MANAGER_LIST_CHAINS("APEManagerListChains"),
;
public final String methodName;
MethodIndex(String methodName) {
this.methodName = methodName;
}
}

View file

@ -1,6 +0,0 @@
package info.frostfs.sdk.enums;
public enum WaitExpects {
EXISTS,
REMOVED
}

View file

@ -1,19 +0,0 @@
package info.frostfs.sdk.exceptions;
import info.frostfs.sdk.dto.response.ResponseStatus;
import lombok.Getter;
@Getter
public class ResponseFrostFSException extends FrostFSException {
private final ResponseStatus status;
public ResponseFrostFSException(ResponseStatus status) {
super(status.toString());
this.status = status;
}
public ResponseFrostFSException(String message) {
super(message);
this.status = null;
}
}

View file

@ -1,57 +0,0 @@
package info.frostfs.sdk.jdo;
import info.frostfs.sdk.FrostFSClient;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.annotations.Validate;
import info.frostfs.sdk.dto.netmap.Version;
import info.frostfs.sdk.dto.object.OwnerId;
import info.frostfs.sdk.pool.SessionCache;
import io.grpc.Channel;
import lombok.Getter;
import lombok.Setter;
import org.apache.commons.lang3.StringUtils;
import static info.frostfs.sdk.Helper.getHexString;
import static info.frostfs.sdk.pool.Pool.formCacheKey;
@Getter
@Setter
public class ClientEnvironment {
@NotNull
private final OwnerId ownerId;
@NotNull
private final Version version;
@NotNull
@Validate
private final ECDsa key;
@NotNull
private final Channel channel;
@NotNull
private final FrostFSClient frostFSClient;
private String sessionKey;
private String address;
private NetworkSettings networkSettings;
private SessionCache sessionCache;
public ClientEnvironment(ECDsa key, Channel channel, Version version, FrostFSClient frostFSClient,
SessionCache sessionCache) {
this.key = key;
this.ownerId = new OwnerId(key.getAccount().getAddress());
this.version = version;
this.channel = channel;
this.frostFSClient = frostFSClient;
this.sessionCache = sessionCache;
this.address = channel.authority();
}
public String getSessionKey() {
if (StringUtils.isBlank(sessionKey)) {
this.sessionKey = formCacheKey(address, getHexString(key.getPublicKeyByte()));
}
return sessionKey;
}
}

View file

@ -1,61 +0,0 @@
package info.frostfs.sdk.jdo;
import info.frostfs.sdk.annotations.AtLeastOneIsFilled;
import info.frostfs.sdk.annotations.ComplexAtLeastOneIsFilled;
import io.grpc.ChannelCredentials;
import io.grpc.ManagedChannel;
import lombok.Getter;
import lombok.experimental.FieldNameConstants;
import java.io.File;
@Getter
@FieldNameConstants
@ComplexAtLeastOneIsFilled(value = {
@AtLeastOneIsFilled(fields = {ClientSettings.Fields.host, ClientSettings.Fields.channel}),
@AtLeastOneIsFilled(fields = {ClientSettings.Fields.wif, ClientSettings.Fields.wallet}),
})
public class ClientSettings {
private String wif;
private File wallet;
private String password;
private String host;
private ChannelCredentials credentials;
private ManagedChannel channel;
public ClientSettings(String wif, String host) {
this.wif = wif;
this.host = host;
}
public ClientSettings(String wif, String host, ChannelCredentials credentials) {
this.wif = wif;
this.host = host;
this.credentials = credentials;
}
public ClientSettings(String wif, ManagedChannel channel) {
this.wif = wif;
this.channel = channel;
}
public ClientSettings(File wallet, String password, String host) {
this.wallet = wallet;
this.password = password;
this.host = host;
}
public ClientSettings(File wallet, String password, String host, ChannelCredentials credentials) {
this.wallet = wallet;
this.password = password;
this.host = host;
this.credentials = credentials;
}
public ClientSettings(File wallet, String password, ManagedChannel channel) {
this.wallet = wallet;
this.password = password;
this.channel = channel;
}
}

View file

@ -1,73 +0,0 @@
package info.frostfs.sdk.jdo;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.exceptions.FrostFSException;
import info.frostfs.sdk.exceptions.ValidationFrostFSException;
import io.neow3j.wallet.Account;
import io.neow3j.wallet.nep6.NEP6Account;
import io.neow3j.wallet.nep6.NEP6Wallet;
import lombok.Getter;
import org.apache.commons.lang3.StringUtils;
import java.io.File;
import java.io.FileInputStream;
import java.security.PrivateKey;
import java.util.Optional;
import static info.frostfs.sdk.KeyExtension.loadPrivateKey;
import static info.frostfs.sdk.constants.ErrorConst.WALLET_IS_INVALID;
import static info.frostfs.sdk.constants.ErrorConst.WIF_IS_INVALID;
import static info.frostfs.sdk.constants.FieldConst.EMPTY_STRING;
import static io.neow3j.wallet.Wallet.OBJECT_MAPPER;
import static java.util.Objects.isNull;
@Getter
public class ECDsa {
@NotNull
private final byte[] publicKeyByte;
@NotNull
private final byte[] privateKeyByte;
@NotNull
private final PrivateKey privateKey;
@NotNull
private final Account account;
public ECDsa(String wif) {
if (StringUtils.isEmpty(wif)) {
throw new ValidationFrostFSException(WIF_IS_INVALID);
}
this.account = Account.fromWIF(wif);
this.privateKeyByte = account.getECKeyPair().getPrivateKey().getBytes();
this.publicKeyByte = account.getECKeyPair().getPublicKey().getEncoded(true);
this.privateKey = loadPrivateKey(privateKeyByte);
}
public ECDsa(File walletFile, String password) {
if (isNull(walletFile)) {
throw new ValidationFrostFSException(WALLET_IS_INVALID);
}
try (var walletStream = new FileInputStream(walletFile)) {
NEP6Wallet nep6Wallet = OBJECT_MAPPER.readValue(walletStream, NEP6Wallet.class);
Optional<NEP6Account> defaultAccount = nep6Wallet.getAccounts().stream()
.filter(NEP6Account::getDefault)
.findFirst();
var account = defaultAccount.map(Account::fromNEP6Account)
.orElseGet(() -> Account.fromNEP6Account(nep6Wallet.getAccounts().get(0)));
account.decryptPrivateKey(isNull(password) ? EMPTY_STRING : password);
this.account = account;
this.privateKeyByte = account.getECKeyPair().getPrivateKey().getBytes();
this.publicKeyByte = account.getECKeyPair().getPublicKey().getEncoded(true);
this.privateKey = loadPrivateKey(privateKeyByte);
} catch (Exception exp) {
throw new FrostFSException(exp.getMessage());
}
}
}

View file

@ -1,28 +0,0 @@
package info.frostfs.sdk.jdo;
import lombok.Getter;
import lombok.Setter;
import java.util.HashMap;
import java.util.Map;
@Getter
@Setter
public class NetworkSettings {
private Long auditFee;
private Long basicIncomeRate;
private Long containerFee;
private Long containerAliasFee;
private Long innerRingCandidateFee;
private Long withdrawFee;
private Long epochDuration;
private Long iRCandidateFee;
private Long maxObjectSize;
private Long maxECDataCount;
private Long maxECParityCount;
private Long withdrawalFee;
private Boolean homomorphicHashingDisabled;
private Boolean maintenanceModeAllowed;
private Map<String, Object> unnamedSettings = new HashMap<>();
}

View file

@ -1,13 +0,0 @@
package info.frostfs.sdk.jdo;
import info.frostfs.sdk.dto.object.ObjectId;
import lombok.AllArgsConstructor;
import lombok.Getter;
@Getter
@AllArgsConstructor
public class PutObjectResult {
private final ObjectId objectId;
private final int objectSize;
}

View file

@ -1,23 +0,0 @@
package info.frostfs.sdk.jdo.parameters;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.concurrent.TimeUnit;
import static info.frostfs.sdk.constants.AppConst.DEFAULT_GRPC_TIMEOUT;
@Getter
@Builder
@AllArgsConstructor
public class CallContext {
private final long timeout;
private final TimeUnit timeUnit;
public CallContext() {
this.timeout = DEFAULT_GRPC_TIMEOUT;
this.timeUnit = TimeUnit.SECONDS;
}
}

View file

@ -1,26 +0,0 @@
package info.frostfs.sdk.jdo.parameters;
import lombok.AllArgsConstructor;
import lombok.Getter;
import java.time.Duration;
import java.time.LocalDateTime;
@Getter
@AllArgsConstructor
public class PrmWait {
private static final Duration DEFAULT_TIMEOUT = Duration.ofSeconds(120);
private static final Duration DEFAULT_POLL_INTERVAL = Duration.ofSeconds(5);
private final Duration timeout;
private final Duration pollInterval;
public PrmWait() {
this.timeout = DEFAULT_TIMEOUT;
this.pollInterval = DEFAULT_POLL_INTERVAL;
}
public LocalDateTime getDeadline() {
return LocalDateTime.now().plus(timeout);
}
}

View file

@ -1,27 +0,0 @@
package info.frostfs.sdk.jdo.parameters.ape;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.ape.Chain;
import info.frostfs.sdk.dto.chain.ChainTarget;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmApeChainAdd {
@NotNull
private Chain chain;
@NotNull
private ChainTarget chainTarget;
private Map<String, String> xHeaders;
public PrmApeChainAdd(Chain chain, ChainTarget chainTarget) {
this.chain = chain;
this.chainTarget = chainTarget;
}
}

View file

@ -1,23 +0,0 @@
package info.frostfs.sdk.jdo.parameters.ape;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.chain.ChainTarget;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmApeChainList {
@NotNull
private ChainTarget chainTarget;
private Map<String, String> xHeaders;
public PrmApeChainList(ChainTarget chainTarget) {
this.chainTarget = chainTarget;
}
}

View file

@ -1,27 +0,0 @@
package info.frostfs.sdk.jdo.parameters.ape;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.chain.ChainTarget;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmApeChainRemove {
@NotNull
private byte[] chainId;
@NotNull
private ChainTarget chainTarget;
private Map<String, String> xHeaders;
public PrmApeChainRemove(byte[] chainId, ChainTarget chainTarget) {
this.chainId = chainId;
this.chainTarget = chainTarget;
}
}

View file

@ -1,33 +0,0 @@
package info.frostfs.sdk.jdo.parameters.container;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.container.Container;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.parameters.PrmWait;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmContainerCreate implements SessionContext {
@NotNull
private Container container;
private PrmWait waitParams;
private SessionToken sessionToken;
private Map<String, String> xHeaders;
public PrmContainerCreate(Container container, PrmWait waitParams) {
this.container = container;
this.waitParams = waitParams;
}
public PrmContainerCreate(Container container) {
this.container = container;
}
}

View file

@ -1,33 +0,0 @@
package info.frostfs.sdk.jdo.parameters.container;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.container.ContainerId;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.parameters.PrmWait;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmContainerDelete implements SessionContext {
@NotNull
private ContainerId containerId;
private PrmWait waitParams;
private SessionToken sessionToken;
private Map<String, String> xHeaders;
public PrmContainerDelete(ContainerId containerId, PrmWait waitParams) {
this.containerId = containerId;
this.waitParams = waitParams;
}
public PrmContainerDelete(ContainerId containerId) {
this.containerId = containerId;
}
}

View file

@ -1,23 +0,0 @@
package info.frostfs.sdk.jdo.parameters.container;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.container.ContainerId;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmContainerGet {
@NotNull
private ContainerId containerId;
private Map<String, String> xHeaders;
public PrmContainerGet(ContainerId containerId) {
this.containerId = containerId;
}
}

View file

@ -1,16 +0,0 @@
package info.frostfs.sdk.jdo.parameters.container;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import lombok.NoArgsConstructor;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
@NoArgsConstructor
public class PrmContainerGetAll {
private Map<String, String> xHeaders;
}

View file

@ -1,28 +0,0 @@
package info.frostfs.sdk.jdo.parameters.object;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.object.ObjectHeader;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.io.InputStream;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmObjectClientCutPut implements PrmObjectPutBase, SessionContext {
@NotNull
private final PutObjectContext putObjectContext = new PutObjectContext();
@NotNull
private ObjectHeader objectHeader;
@NotNull
private InputStream payload;
private int bufferMaxSize;
private byte[] customerBuffer;
private SessionToken sessionToken;
private Map<String, String> xHeaders;
}

View file

@ -1,30 +0,0 @@
package info.frostfs.sdk.jdo.parameters.object;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.container.ContainerId;
import info.frostfs.sdk.dto.object.ObjectId;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmObjectDelete implements SessionContext {
@NotNull
private ContainerId containerId;
@NotNull
private ObjectId objectId;
private SessionToken sessionToken;
private Map<String, String> xHeaders;
public PrmObjectDelete(ContainerId containerId, ObjectId objectId) {
this.containerId = containerId;
this.objectId = objectId;
}
}

View file

@ -1,30 +0,0 @@
package info.frostfs.sdk.jdo.parameters.object;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.container.ContainerId;
import info.frostfs.sdk.dto.object.ObjectId;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmObjectGet implements SessionContext {
@NotNull
private ContainerId containerId;
@NotNull
private ObjectId objectId;
private SessionToken sessionToken;
private Map<String, String> xHeaders;
public PrmObjectGet(ContainerId containerId, ObjectId objectId) {
this.containerId = containerId;
this.objectId = objectId;
}
}

View file

@ -1,31 +0,0 @@
package info.frostfs.sdk.jdo.parameters.object;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.container.ContainerId;
import info.frostfs.sdk.dto.object.ObjectId;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmObjectHeadGet implements SessionContext {
@NotNull
private ContainerId containerId;
@NotNull
private ObjectId objectId;
private boolean raw;
private SessionToken sessionToken;
private Map<String, String> xHeaders;
public PrmObjectHeadGet(ContainerId containerId, ObjectId objectId) {
this.containerId = containerId;
this.objectId = objectId;
}
}

View file

@ -1,28 +0,0 @@
package info.frostfs.sdk.jdo.parameters.object;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.object.ObjectHeader;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmObjectPut implements PrmObjectPutBase, SessionContext {
@NotNull
private final PutObjectContext putObjectContext = new PutObjectContext();
@NotNull
private ObjectHeader objectHeader;
private SessionToken sessionToken;
private Map<String, String> xHeaders;
public PrmObjectPut(ObjectHeader objectHeader) {
this.objectHeader = objectHeader;
}
}

View file

@ -1,12 +0,0 @@
package info.frostfs.sdk.jdo.parameters.object;
import info.frostfs.sdk.dto.object.ObjectHeader;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import java.util.Map;
public interface PrmObjectPutBase extends SessionContext {
ObjectHeader getObjectHeader();
Map<String, String> getXHeaders();
}

View file

@ -1,30 +0,0 @@
package info.frostfs.sdk.jdo.parameters.object;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.container.ContainerId;
import info.frostfs.sdk.dto.object.ObjectFilter;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmObjectSearch implements SessionContext {
@NotNull
private ContainerId containerId;
@NotNull
private ObjectFilter<?>[] filters;
private SessionToken sessionToken;
private Map<String, String> xHeaders;
public PrmObjectSearch(ContainerId containerId, ObjectFilter<?>... filters) {
this.containerId = containerId;
this.filters = filters;
}
}

View file

@ -1,26 +0,0 @@
package info.frostfs.sdk.jdo.parameters.object;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.object.ObjectFrostFS;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmObjectSinglePut implements SessionContext {
@NotNull
private ObjectFrostFS objectFrostFS;
private SessionToken sessionToken;
private Map<String, String> xHeaders;
public PrmObjectSinglePut(ObjectFrostFS objectFrostFS) {
this.objectFrostFS = objectFrostFS;
}
}

View file

@ -1,14 +0,0 @@
package info.frostfs.sdk.jdo.parameters.object;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Setter;
@Getter
@Setter
@NoArgsConstructor
public class PutObjectContext {
private int maxObjectSizeCache;
private long currentStreamPosition;
private long fullLength;
}

View file

@ -1,42 +0,0 @@
package info.frostfs.sdk.jdo.parameters.object.patch;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.object.ObjectAttribute;
import info.frostfs.sdk.dto.object.patch.Address;
import info.frostfs.sdk.dto.object.patch.Range;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import lombok.*;
import java.io.InputStream;
import java.util.List;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmObjectPatch implements SessionContext {
@NotNull
private Address address;
private Range range;
private InputStream payload;
private List<ObjectAttribute> newAttributes;
private boolean replaceAttributes;
private int maxChunkLength;
private SessionToken sessionToken;
private Map<String, String> xHeaders;
public PrmObjectPatch(Address address, Range range, InputStream payload, int maxChunkLength) {
this.address = address;
this.range = range;
this.payload = payload;
this.maxChunkLength = maxChunkLength;
}
public PrmObjectPatch(Address address, List<ObjectAttribute> newAttributes, boolean replaceAttributes) {
this.address = address;
this.newAttributes = newAttributes;
this.replaceAttributes = replaceAttributes;
}
}

View file

@ -1,35 +0,0 @@
package info.frostfs.sdk.jdo.parameters.object.patch;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.container.ContainerId;
import info.frostfs.sdk.dto.object.ObjectId;
import info.frostfs.sdk.dto.object.patch.Range;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmRangeGet implements SessionContext {
@NotNull
private ContainerId containerId;
@NotNull
private ObjectId objectId;
@NotNull
private Range range;
private boolean raw;
private SessionToken sessionToken;
private Map<String, String> xHeaders;
public PrmRangeGet(ContainerId containerId, ObjectId objectId, Range range) {
this.containerId = containerId;
this.objectId = objectId;
this.range = range;
}
}

View file

@ -1,38 +0,0 @@
package info.frostfs.sdk.jdo.parameters.object.patch;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.container.ContainerId;
import info.frostfs.sdk.dto.object.ObjectId;
import info.frostfs.sdk.dto.object.patch.Range;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.List;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmRangeHashGet implements SessionContext {
@NotNull
private ContainerId containerId;
@NotNull
private ObjectId objectId;
@NotNull
private List<Range> ranges;
@NotNull
private byte[] salt;
private SessionToken sessionToken;
private Map<String, String> xHeaders;
public PrmRangeHashGet(ContainerId containerId, ObjectId objectId, List<Range> ranges, byte[] salt) {
this.containerId = containerId;
this.objectId = objectId;
this.ranges = ranges;
this.salt = salt;
}
}

View file

@ -1,19 +0,0 @@
package info.frostfs.sdk.jdo.parameters.session;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmSessionCreate {
private long expiration; //-1 is max
private Map<String, String> xHeaders;
public PrmSessionCreate(long expiration) {
this.expiration = expiration;
}
}

View file

@ -1,7 +0,0 @@
package info.frostfs.sdk.jdo.parameters.session;
import info.frostfs.sdk.dto.session.SessionToken;
public interface SessionContext {
SessionToken getSessionToken();
}

View file

@ -1,12 +0,0 @@
package info.frostfs.sdk.jdo.pool;
import lombok.AllArgsConstructor;
import lombok.Getter;
@Getter
@AllArgsConstructor
public class NodeParameters {
private final int priority;
private final String address;
private final double weight;
}

View file

@ -1,44 +0,0 @@
package info.frostfs.sdk.jdo.pool;
import info.frostfs.sdk.jdo.ECDsa;
import info.frostfs.sdk.pool.ClientWrapper;
import io.grpc.ClientInterceptors;
import io.netty.channel.ChannelOption;
import lombok.Getter;
import lombok.Setter;
import org.slf4j.Logger;
import java.util.ArrayList;
import java.util.Collection;
import java.util.function.Function;
@Getter
@Setter
public class PoolInitParameters {
private ECDsa key;
private long nodeDialTimeout;
private long nodeStreamTimeout;
private long healthCheckTimeout;
private long clientRebalanceInterval;
private long sessionExpirationDuration;
private int errorThreshold;
private NodeParameters[] nodeParams;
private ChannelOption<?>[] dialOptions;
private Function<String, ClientWrapper> clientBuilder;
private long gracefulCloseOnSwitchTimeout;
private Logger logger;
private Collection<ClientInterceptors> interceptors = new ArrayList<>();
}

View file

@ -1,15 +0,0 @@
package info.frostfs.sdk.jdo.result;
import info.frostfs.sdk.dto.object.ObjectHeader;
import info.frostfs.sdk.dto.object.SplitInfo;
import lombok.Builder;
import lombok.Getter;
import lombok.Setter;
@Builder
@Getter
@Setter
public class ObjectHeaderResult {
private ObjectHeader headerInfo;
private SplitInfo splitInfo;
}

View file

@ -1,369 +0,0 @@
package info.frostfs.sdk.placement;
import info.frostfs.sdk.dto.netmap.*;
import info.frostfs.sdk.enums.netmap.FilterOperation;
import info.frostfs.sdk.enums.netmap.SelectorClause;
import info.frostfs.sdk.exceptions.FrostFSException;
import lombok.Getter;
import lombok.Setter;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import java.util.*;
import java.util.function.Function;
import java.util.stream.Collectors;
import static info.frostfs.sdk.constants.AttributeConst.ATTRIBUTE_CAPACITY;
import static info.frostfs.sdk.constants.AttributeConst.ATTRIBUTE_PRICE;
import static info.frostfs.sdk.constants.ErrorConst.*;
@Getter
@Setter
public final class Context {
public static final String MAIN_FILTER_NAME = "*";
public static final String LIKE_WILDCARD = "*";
// network map to operate on
private final NetmapSnapshot netMap;
// cache of processed filters
private final Map<String, Filter> processedFilters = new HashMap<>();
// cache of processed selectors
private final Map<String, Selector> processedSelectors = new HashMap<>();
// stores results of selector processing
private final Map<String, List<List<NodeInfo>>> selections = new HashMap<>();
// cache of parsed numeric values
private final Map<String, Long> numCache = new HashMap<>();
private final Map<Long, Boolean> usedNodes = new HashMap<>();
private final Function<NodeInfo, Double> weightFunc;
private byte[] hrwSeed;
private long hrwSeedHash;
private int cbf;
private boolean strict;
public Context(NetmapSnapshot netMap) {
this.netMap = netMap;
this.weightFunc = Tools.defaultWeightFunc(netMap.getNodeInfoCollection());
}
private static Pair<Integer, Integer> calcNodesCount(Selector selector) {
return selector.getClause() == SelectorClause.SAME
? new ImmutablePair<>(1, selector.getCount())
: new ImmutablePair<>(selector.getCount(), 1);
}
private static double calcBucketWeight(List<NodeInfo> ns, MeanIQRAgg a, Function<NodeInfo, Double> wf) {
for (NodeInfo node : ns) {
a.add(wf.apply(node));
}
return a.compute();
}
public void processFilters(PlacementPolicy policy) {
for (Filter filter : policy.getFilters()) {
processFilter(filter, true);
}
}
private void processFilter(Filter filter, boolean top) {
String filterName = filter.getName();
if (MAIN_FILTER_NAME.equals(filterName)) {
throw new FrostFSException(String.format(INVALID_FILTER_NAME_TEMPLATE, MAIN_FILTER_NAME));
}
if (top && (filterName == null || filterName.isEmpty())) {
throw new FrostFSException(UNNAMED_TOP_FILTER);
}
if (!top && filterName != null && !filterName.isEmpty() && !processedFilters.containsKey(filterName)) {
throw new FrostFSException(FILTER_NOT_FOUND);
}
if (filter.getOperation() == FilterOperation.AND ||
filter.getOperation() == FilterOperation.OR ||
filter.getOperation() == FilterOperation.NOT) {
for (Filter f : filter.getFilters()) {
processFilter(f, false);
}
} else {
if (filter.getFilters().length != 0) {
throw new FrostFSException(NON_EMPTY_FILTERS);
} else if (!top && filterName != null && !filterName.isEmpty()) {
// named reference
return;
}
switch (filter.getOperation()) {
case EQ:
case NE:
case LIKE:
break;
case GT:
case GE:
case LT:
case LE:
long n = Long.parseLong(filter.getValue());
numCache.put(filter.getValue(), n);
break;
default:
throw new FrostFSException(String.format(INVALID_FILTER_OPERATION_TEMPLATE, filter.getOperation()));
}
}
if (top) {
processedFilters.put(filterName, filter);
}
}
public void processSelectors(PlacementPolicy policy) {
for (Selector selector : policy.getSelectors()) {
String filterName = selector.getFilter();
if (!MAIN_FILTER_NAME.equals(filterName)) {
if (selector.getFilter() == null || !processedFilters.containsKey(selector.getFilter())) {
throw new FrostFSException(String.format(FILTER_NOT_FOUND_TEMPLATE, filterName));
}
}
processedSelectors.put(selector.getName(), selector);
List<List<NodeInfo>> selection = getSelection(selector);
selections.put(selector.getName(), selection);
}
}
private NodeAttributePair[] getSelectionBase(Selector selector) {
String fName = selector.getFilter();
if (fName == null) {
throw new FrostFSException(FILTER_NAME_IS_EMPTY);
}
Filter f = processedFilters.get(fName);
boolean isMain = MAIN_FILTER_NAME.equals(fName);
List<NodeAttributePair> result = new ArrayList<>();
Map<String, List<NodeInfo>> nodeMap = new HashMap<>();
String attr = selector.getAttribute();
for (NodeInfo node : netMap.getNodeInfoCollection()) {
if (usedNodes.containsKey(node.getHash())) {
continue;
}
if (isMain || match(f, node)) {
if (attr == null) {
result.add(new NodeAttributePair("", new NodeInfo[]{node}));
} else {
String v = node.getAttributes().get(attr);
List<NodeInfo> nodes = nodeMap.computeIfAbsent(v, k -> new ArrayList<>());
nodes.add(node);
}
}
}
if (attr != null && !attr.isEmpty()) {
for (Map.Entry<String, List<NodeInfo>> entry : nodeMap.entrySet()) {
result.add(new NodeAttributePair(entry.getKey(), entry.getValue().toArray(NodeInfo[]::new)));
}
}
if (hrwSeed != null && hrwSeed.length != 0) {
NodeAttributePair[] sortedNodes = new NodeAttributePair[result.size()];
for (int i = 0; i < result.size(); i++) {
double[] ws = new double[result.get(i).getNodes().length];
NodeAttributePair res = result.get(i);
Tools.appendWeightsTo(res.getNodes(), weightFunc, ws);
sortedNodes[i] = new NodeAttributePair(
res.getAttr(),
Tools.sortHasherSliceByWeightValue(Arrays.asList(res.getNodes()), ws, hrwSeedHash)
.toArray(NodeInfo[]::new)
);
}
return sortedNodes;
}
return result.toArray(new NodeAttributePair[0]);
}
public List<List<NodeInfo>> getSelection(Selector s) {
Pair<Integer, Integer> counts = calcNodesCount(s);
int bucketCount = counts.getKey();
int nodesInBucket = counts.getValue();
NodeAttributePair[] buckets = getSelectionBase(s);
if (strict && buckets.length < bucketCount) {
throw new FrostFSException(String.format(NOT_ENOUGH_NODES_TEMPLATE, s.getName()));
}
if (hrwSeed == null || hrwSeed.length == 0) {
if (s.getAttribute() == null || s.getAttribute().isEmpty()) {
Arrays.sort(buckets, Comparator.comparing(b -> b.getNodes()[0].getHash()));
} else {
Arrays.sort(buckets, Comparator.comparing(NodeAttributePair::getAttr));
}
}
int maxNodesInBucket = nodesInBucket * cbf;
List<List<NodeInfo>> res = new ArrayList<>(buckets.length);
List<List<NodeInfo>> fallback = new ArrayList<>(buckets.length);
for (NodeAttributePair bucket : buckets) {
List<NodeInfo> ns = Arrays.asList(bucket.getNodes());
if (ns.size() >= maxNodesInBucket) {
res.add(new ArrayList<>(ns.subList(0, maxNodesInBucket)));
} else if (ns.size() >= nodesInBucket) {
fallback.add(new ArrayList<>(ns));
}
}
if (res.size() < bucketCount) {
res.addAll(fallback);
if (strict && res.size() < bucketCount) {
throw new FrostFSException(String.format(NOT_ENOUGH_NODES_TEMPLATE, s.getName()));
}
}
if (hrwSeed != null && hrwSeed.length != 0) {
double[] weights = new double[res.size()];
var a = new MeanIQRAgg();
for (int i = 0; i < res.size(); i++) {
a.clear();
weights[i] = calcBucketWeight(res.get(i), a, weightFunc);
}
List<HasherList> hashers = res.stream()
.map(HasherList::new)
.collect(Collectors.toList());
hashers = Tools.sortHasherSliceByWeightValue(hashers, weights, hrwSeedHash);
for (int i = 0; i < res.size(); i++) {
res.set(i, hashers.get(i).getNodes());
}
}
if (res.size() < bucketCount) {
if (strict && res.isEmpty()) {
throw new FrostFSException(NOT_ENOUGH_NODES);
}
bucketCount = res.size();
}
if (s.getAttribute() == null || s.getAttribute().isEmpty()) {
fallback = res.subList(bucketCount, res.size());
res = new ArrayList<>(res.subList(0, bucketCount));
for (int i = 0; i < fallback.size(); i++) {
int index = i % bucketCount;
if (res.get(index).size() >= maxNodesInBucket) {
break;
}
res.get(index).addAll(fallback.get(i));
}
}
return res.subList(0, bucketCount);
}
private boolean matchKeyValue(Filter f, NodeInfo nodeInfo) {
switch (f.getOperation()) {
case EQ:
return nodeInfo.getAttributes().containsKey(f.getKey()) &&
nodeInfo.getAttributes().get(f.getKey()).equals(f.getValue());
case LIKE:
boolean hasPrefix = f.getValue().startsWith(LIKE_WILDCARD);
boolean hasSuffix = f.getValue().endsWith(LIKE_WILDCARD);
int start = hasPrefix ? LIKE_WILDCARD.length() : 0;
int end = hasSuffix ? f.getValue().length() - LIKE_WILDCARD.length() : f.getValue().length();
String str = f.getValue().substring(start, end);
if (hasPrefix && hasSuffix) {
return nodeInfo.getAttributes().get(f.getKey()).contains(str);
}
if (hasPrefix) {
return nodeInfo.getAttributes().get(f.getKey()).endsWith(str);
}
if (hasSuffix) {
return nodeInfo.getAttributes().get(f.getKey()).startsWith(str);
}
return nodeInfo.getAttributes().get(f.getKey()).equals(f.getValue());
case NE:
return !nodeInfo.getAttributes().get(f.getKey()).equals(f.getValue());
default:
long attr;
switch (f.getKey()) {
case ATTRIBUTE_PRICE:
attr = nodeInfo.getPrice().longValue();
break;
case ATTRIBUTE_CAPACITY:
attr = nodeInfo.getCapacity().longValue();
break;
default:
try {
attr = Long.parseLong(nodeInfo.getAttributes().get(f.getKey()));
} catch (NumberFormatException e) {
return false;
}
break;
}
switch (f.getOperation()) {
case GT:
return attr > numCache.get(f.getValue());
case GE:
return attr >= numCache.get(f.getValue());
case LT:
return attr < numCache.get(f.getValue());
case LE:
return attr <= numCache.get(f.getValue());
default:
break;
}
break;
}
return false;
}
boolean match(Filter f, NodeInfo nodeInfo) {
if (f == null) {
return false;
}
switch (f.getOperation()) {
case NOT:
Filter[] inner = f.getFilters();
Filter fSub = inner[0];
if (inner[0].getName() != null && !inner[0].getName().isEmpty()) {
fSub = processedFilters.get(inner[0].getName());
}
return !match(fSub, nodeInfo);
case AND:
case OR:
for (int i = 0; i < f.getFilters().length; i++) {
Filter currentFilter = f.getFilters()[i];
if (currentFilter.getName() != null && !currentFilter.getName().isEmpty()) {
currentFilter = processedFilters.get(currentFilter.getName());
}
boolean ok = match(currentFilter, nodeInfo);
if (ok == (f.getOperation() == FilterOperation.OR)) {
return ok;
}
}
return f.getOperation() == FilterOperation.AND;
default:
return matchKeyValue(f, nodeInfo);
}
}
}

View file

@ -1,20 +0,0 @@
package info.frostfs.sdk.placement;
import info.frostfs.sdk.dto.netmap.Hasher;
import info.frostfs.sdk.dto.netmap.NodeInfo;
import lombok.AllArgsConstructor;
import lombok.Getter;
import org.apache.commons.collections4.CollectionUtils;
import java.util.List;
@Getter
@AllArgsConstructor
public final class HasherList implements Hasher {
private final List<NodeInfo> nodes;
@Override
public long getHash() {
return CollectionUtils.isNotEmpty(nodes) ? nodes.get(0).getHash() : 0L;
}
}

View file

@ -1,18 +0,0 @@
package info.frostfs.sdk.placement;
import java.math.BigInteger;
public class MeanAgg {
private double mean;
private int count;
public void add(BigInteger n) {
int c = count + 1;
mean = mean * count / c + n.doubleValue() / c;
count++;
}
public double compute() {
return mean;
}
}

View file

@ -1,57 +0,0 @@
package info.frostfs.sdk.placement;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
public final class MeanIQRAgg {
private static final int MIN_LN = 4;
private final List<Double> arr = new ArrayList<>();
public MeanIQRAgg() {
}
public void add(double d) {
arr.add(d);
}
public double compute() {
int length = arr.size();
if (length == 0) {
return 0;
}
List<Double> sorted = new ArrayList<>(arr);
Collections.sort(sorted);
double minV, maxV;
if (length < MIN_LN) {
minV = sorted.get(0);
maxV = sorted.get(length - 1);
} else {
int start = length / MIN_LN;
int end = length * 3 / MIN_LN - 1;
minV = sorted.get(start);
maxV = sorted.get(end);
}
int count = 0;
double sum = 0;
for (var e : sorted) {
if (e >= minV && e <= maxV) {
sum += e;
count++;
}
}
return count == 0 ? 0 : sum / count;
}
public void clear() {
arr.clear();
}
}

View file

@ -1,24 +0,0 @@
package info.frostfs.sdk.placement;
import java.math.BigInteger;
public class MinAgg {
private double min;
private boolean minFound;
public void add(BigInteger n) {
if (!minFound) {
min = n.doubleValue();
minFound = true;
return;
}
if (n.doubleValue() < min) {
min = n.doubleValue();
}
}
public double compute() {
return min;
}
}

View file

@ -1,15 +0,0 @@
package info.frostfs.sdk.placement;
import info.frostfs.sdk.dto.netmap.NodeInfo;
import lombok.Getter;
@Getter
public class NodeAttributePair {
private final String attr;
private final NodeInfo[] nodes;
NodeAttributePair(String attr, NodeInfo[] nodes) {
this.attr = attr;
this.nodes = nodes;
}
}

View file

@ -1,5 +0,0 @@
package info.frostfs.sdk.placement;
public interface Normalizer {
double normalize(double w);
}

View file

@ -1,197 +0,0 @@
package info.frostfs.sdk.placement;
import info.frostfs.sdk.dto.netmap.*;
import info.frostfs.sdk.exceptions.FrostFSException;
import lombok.AllArgsConstructor;
import org.apache.commons.codec.digest.MurmurHash3;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.function.Function;
import static info.frostfs.sdk.constants.ErrorConst.SELECTOR_NOT_FOUND_TEMPLATE;
import static info.frostfs.sdk.constants.ErrorConst.VECTORS_IS_NULL;
@AllArgsConstructor
public final class PlacementVector {
private final NetmapSnapshot netmapSnapshot;
private static NodeInfo[] flattenNodes(List<List<NodeInfo>> nodes) {
int size = nodes.stream().mapToInt(List::size).sum();
NodeInfo[] result = new NodeInfo[size];
int i = 0;
for (List<NodeInfo> ns : nodes) {
for (NodeInfo n : ns) {
result[i++] = n;
}
}
return result;
}
/*
* PlacementVectors sorts container nodes returned by ContainerNodes method
* and returns placement vectors for the entity identified by the given pivot.
* For example, in order to build node list to store the object,
* binary-encoded object identifier can be used as pivot.
* Result is deterministic for the fixed NetMap and parameters.
* */
public NodeInfo[][] placementVectors(NodeInfo[][] vectors, byte[] pivot) {
if (vectors == null) {
throw new FrostFSException(VECTORS_IS_NULL);
}
long hash = MurmurHash3.hash128x64(pivot, 0, pivot.length, 0)[0];
Function<NodeInfo, Double> wf = Tools.defaultWeightFunc(netmapSnapshot.getNodeInfoCollection());
NodeInfo[][] result = new NodeInfo[vectors.length][];
int maxSize = Arrays.stream(vectors)
.mapToInt(v -> v.length)
.max()
.orElse(0);
double[] spanWeights = new double[maxSize];
for (int i = 0; i < vectors.length; i++) {
result[i] = Arrays.copyOf(vectors[i], vectors[i].length);
Tools.appendWeightsTo(result[i], wf, spanWeights);
List<NodeInfo> sorted = Tools.sortHasherSliceByWeightValue(
Arrays.asList(result[i]),
spanWeights,
hash
);
result[i] = sorted.toArray(new NodeInfo[0]);
}
return result;
}
/*
* SelectFilterNodes returns a two-dimensional list of nodes as a result of applying the given
* SelectFilterExpr to the NetMap. If the SelectFilterExpr contains only filters, the result contains
* a single row with the result of the last filter application. If the SelectFilterExpr contains only selectors,
* the result contains the selection rows of the last select application.
* */
public List<List<NodeInfo>> selectFilterNodes(SelectFilterExpr expr) {
PlacementPolicy policy = new PlacementPolicy(
null,
false,
expr.getCbf(),
expr.getFilters().toArray(Filter[]::new),
new Selector[]{expr.getSelector()}
);
Context ctx = new Context(netmapSnapshot);
ctx.setCbf(expr.getCbf());
ctx.processFilters(policy);
ctx.processSelectors(policy);
List<List<NodeInfo>> ret = new ArrayList<>();
if (expr.getSelector() == null) {
Filter lastFilter = expr.getFilters().get(expr.getFilters().size() - 1);
List<NodeInfo> subCollection = new ArrayList<>();
ret.add(subCollection);
for (NodeInfo nodeInfo : netmapSnapshot.getNodeInfoCollection()) {
if (ctx.match(ctx.getProcessedFilters().get(lastFilter.getName()), nodeInfo)) {
subCollection.add(nodeInfo);
}
}
} else if (expr.getSelector().getName() != null) {
List<List<NodeInfo>> sel = ctx.getSelection(
ctx.getProcessedSelectors().get(expr.getSelector().getName())
);
for (List<NodeInfo> ns : sel) {
List<NodeInfo> subCollection = new ArrayList<>(ns);
ret.add(subCollection);
}
}
return ret;
}
/*
* ContainerNodes returns two-dimensional list of nodes as a result of applying given PlacementPolicy to the NetMap.
* Each line of the list corresponds to a replica descriptor.
* Line order corresponds to order of ReplicaDescriptor list in the policy.
* Nodes are pre-filtered according to the Filter list from the policy, and then selected by Selector list.
* Result is deterministic for the fixed NetMap and parameters.
*
* Result can be used in PlacementVectors.
* */
public NodeInfo[][] containerNodes(PlacementPolicy p, byte[] pivot) {
Context c = new Context(netmapSnapshot);
c.setCbf(p.getBackupFactory() == 0 ? 3 : p.getBackupFactory());
if (pivot != null && pivot.length > 0) {
c.setHrwSeed(pivot);
var hash = MurmurHash3.hash128x64(pivot, 0, pivot.length, 0)[0];
c.setHrwSeedHash(hash);
}
c.processFilters(p);
c.processSelectors(p);
boolean unique = p.isUnique();
List<List<NodeInfo>> result = new ArrayList<>(p.getReplicas().length);
for (int i = 0; i < p.getReplicas().length; i++) {
result.add(new ArrayList<>());
}
for (int i = 0; i < p.getReplicas().length; i++) {
String sName = p.getReplicas()[i].getSelector();
if ((sName == null || sName.isEmpty()) &&
!(p.getReplicas().length == 1 && p.getSelectors().length == 1)) {
Selector s = new Selector(
"", p.getReplicas()[i].getCountNodes(), null, null,
Context.MAIN_FILTER_NAME
);
List<List<NodeInfo>> nodes = c.getSelection(s);
result.get(i).addAll(Arrays.asList(flattenNodes(nodes)));
if (unique) {
for (NodeInfo n : result.get(i)) {
c.getUsedNodes().put(n.getHash(), true);
}
}
continue;
}
if (unique) {
Selector s = c.getProcessedSelectors().get(sName);
if (s == null) {
throw new FrostFSException(String.format(SELECTOR_NOT_FOUND_TEMPLATE, sName));
}
List<List<NodeInfo>> nodes = c.getSelection(s);
result.get(i).addAll(Arrays.asList(flattenNodes(nodes)));
for (NodeInfo n : result.get(i)) {
c.getUsedNodes().put(n.getHash(), true);
}
} else {
List<List<NodeInfo>> nodes = c.getSelections().get(sName);
result.get(i).addAll(Arrays.asList(flattenNodes(nodes)));
}
}
NodeInfo[][] collection = new NodeInfo[result.size()][];
for (int i = 0; i < result.size(); i++) {
collection[i] = result.get(i).toArray(new NodeInfo[0]);
}
return collection;
}
}

View file

@ -1,14 +0,0 @@
package info.frostfs.sdk.placement;
public class ReverseMinNorm implements Normalizer {
private final double min;
public ReverseMinNorm(double min) {
this.min = min;
}
@Override
public double normalize(double w) {
return (min + 1) / (w + 1);
}
}

View file

@ -1,16 +0,0 @@
package info.frostfs.sdk.placement;
import info.frostfs.sdk.dto.netmap.Filter;
import info.frostfs.sdk.dto.netmap.Selector;
import lombok.AllArgsConstructor;
import lombok.Getter;
import java.util.List;
@Getter
@AllArgsConstructor
public class SelectFilterExpr {
private final int cbf;
private final Selector selector;
private final List<Filter> filters;
}

View file

@ -1,19 +0,0 @@
package info.frostfs.sdk.placement;
public class SigmoidNorm implements Normalizer {
private final double scale;
public SigmoidNorm(double scale) {
this.scale = scale;
}
@Override
public double normalize(double w) {
if (scale == 0) {
return 0;
}
double x = w / scale;
return x / (1 + x);
}
}

View file

@ -1,123 +0,0 @@
package info.frostfs.sdk.placement;
import info.frostfs.sdk.dto.netmap.Hasher;
import info.frostfs.sdk.dto.netmap.NodeInfo;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
import java.util.function.Function;
import static info.frostfs.sdk.constants.AppConst.UNSIGNED_LONG_MASK;
import static info.frostfs.sdk.constants.CryptoConst.*;
public final class Tools {
private Tools() {
}
public static long distance(long x, long y) {
long acc = x ^ y;
acc ^= acc >>> MURMUR_MULTIPLIER;
acc *= LANDAU_PRIME_DIVISOR_65BIT;
acc ^= acc >>> MURMUR_MULTIPLIER;
acc *= LANDAU_PRIME_DIVISOR_64BIT;
acc ^= acc >>> MURMUR_MULTIPLIER;
return acc;
}
public static void appendWeightsTo(NodeInfo[] nodes, Function<NodeInfo, Double> wf, double[] weights) {
if (weights.length < nodes.length) {
weights = new double[nodes.length];
}
for (int i = 0; i < nodes.length; i++) {
weights[i] = wf.apply(nodes[i]);
}
}
public static <T extends Hasher> List<T> sortHasherSliceByWeightValue(List<T> nodes, double[] weights, long hash) {
if (nodes.isEmpty()) {
return nodes;
}
boolean allEquals = true;
if (weights.length > 1) {
for (int i = 1; i < weights.length; i++) {
if (weights[i] != weights[0]) {
allEquals = false;
break;
}
}
}
Double[] dist = new Double[nodes.size()];
if (allEquals) {
for (int i = 0; i < dist.length; i++) {
long x = nodes.get(i).getHash();
dist[i] = toUnsignedBigInteger(distance(x, hash)).doubleValue();
}
return sortHasherByDistance(nodes, dist, true);
}
for (int i = 0; i < dist.length; i++) {
var reverse = UNSIGNED_LONG_MASK.subtract(toUnsignedBigInteger(distance(nodes.get(i).getHash(), hash)));
dist[i] = reverse.doubleValue() * weights[i];
}
return sortHasherByDistance(nodes, dist, false);
}
public static <T extends Hasher, N extends Comparable<N>> List<T> sortHasherByDistance(
List<T> nodes, N[] dist, boolean asc
) {
IndexedValue<T, N>[] indexes = new IndexedValue[nodes.size()];
for (int i = 0; i < dist.length; i++) {
indexes[i] = new IndexedValue<>(nodes.get(i), dist[i]);
}
if (asc) {
Arrays.sort(indexes, Comparator.comparing(iv -> iv.dist));
} else {
Arrays.sort(indexes, (iv1, iv2) -> iv2.dist.compareTo(iv1.dist));
}
List<T> result = new ArrayList<>();
for (IndexedValue<T, N> iv : indexes) {
result.add(iv.nodeInfo);
}
return result;
}
public static Function<NodeInfo, Double> defaultWeightFunc(List<NodeInfo> nodes) {
MeanAgg mean = new MeanAgg();
MinAgg minV = new MinAgg();
for (NodeInfo node : nodes) {
mean.add(node.getCapacity());
minV.add(node.getPrice());
}
return newWeightFunc(new SigmoidNorm(mean.compute()), new ReverseMinNorm(minV.compute()));
}
private static BigInteger toUnsignedBigInteger(long i) {
return i >= 0 ? BigInteger.valueOf(i) : BigInteger.valueOf(i).and(UNSIGNED_LONG_MASK);
}
private static Function<NodeInfo, Double> newWeightFunc(Normalizer capNorm, Normalizer priceNorm) {
return nodeInfo -> capNorm.normalize(nodeInfo.getCapacity().doubleValue())
* priceNorm.normalize(nodeInfo.getPrice().doubleValue());
}
private static class IndexedValue<T, N> {
final T nodeInfo;
final N dist;
IndexedValue(T nodeInfo, N dist) {
this.nodeInfo = nodeInfo;
this.dist = dist;
}
}
}

View file

@ -1,27 +0,0 @@
package info.frostfs.sdk.pool;
public interface ClientStatus {
// isHealthy checks if the connection can handle requests.
boolean isHealthy();
// isDialed checks if the connection was created.
boolean isDialed();
// setUnhealthy marks client as unhealthy.
void setUnhealthy();
// address return address of endpoint.
String getAddress();
// currentErrorRate returns current errors rate.
// After specific threshold connection is considered as unhealthy.
// Pool.startRebalance routine can make this connection healthy again.
int getCurrentErrorRate();
// overallErrorRate returns the number of all happened errors.
long getOverallErrorRate();
// methodsStatus returns statistic for all used methods.
StatusSnapshot[] getMethodsStatus();
}

View file

@ -1,114 +0,0 @@
package info.frostfs.sdk.pool;
import info.frostfs.sdk.enums.HealthyStatus;
import info.frostfs.sdk.enums.MethodIndex;
import info.frostfs.sdk.utils.FrostFSMessages;
import lombok.Getter;
import lombok.Setter;
import org.slf4j.Logger;
import java.util.Arrays;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReentrantLock;
@Getter
@Setter
public class ClientStatusMonitor implements ClientStatus {
private final ReentrantLock lock = new ReentrantLock();
private final Logger logger;
private final AtomicInteger healthy = new AtomicInteger();
private final String address;
private final MethodStatus[] methods;
private int errorThreshold;
private int currentErrorCount;
private long overallErrorCount;
public ClientStatusMonitor(Logger logger, String address) {
this.logger = logger;
this.healthy.set(HealthyStatus.HEALTHY.value);
this.address = address;
this.methods = Arrays.stream(MethodIndex.values())
.map(t -> new MethodStatus(t.methodName))
.toArray(MethodStatus[]::new);
}
@Override
public boolean isHealthy() {
return healthy.get() == HealthyStatus.HEALTHY.value;
}
@Override
public boolean isDialed() {
return healthy.get() != HealthyStatus.UNHEALTHY_ON_DIAL.value;
}
public void setHealthy() {
healthy.set(HealthyStatus.HEALTHY.ordinal());
}
@Override
public void setUnhealthy() {
healthy.set(HealthyStatus.UNHEALTHY_ON_REQUEST.value);
}
public void setUnhealthyOnDial() {
healthy.set(HealthyStatus.UNHEALTHY_ON_DIAL.value);
}
public void incErrorRate() {
boolean thresholdReached;
lock.lock();
try {
currentErrorCount++;
overallErrorCount++;
thresholdReached = currentErrorCount >= errorThreshold;
if (thresholdReached) {
setUnhealthy();
currentErrorCount = 0;
}
} finally {
lock.unlock();
}
if (thresholdReached && logger != null) {
FrostFSMessages.errorThresholdReached(logger, address, errorThreshold);
}
}
@Override
public int getCurrentErrorRate() {
lock.lock();
try {
return currentErrorCount;
} finally {
lock.unlock();
}
}
@Override
public long getOverallErrorRate() {
lock.lock();
try {
return overallErrorCount;
} finally {
lock.unlock();
}
}
@Override
public StatusSnapshot[] getMethodsStatus() {
StatusSnapshot[] result = new StatusSnapshot[methods.length];
for (int i = 0; i < result.length; i++) {
result[i] = methods[i].getSnapshot();
}
return result;
}
}

View file

@ -1,131 +0,0 @@
package info.frostfs.sdk.pool;
import info.frostfs.sdk.FrostFSClient;
import info.frostfs.sdk.enums.MethodIndex;
import info.frostfs.sdk.exceptions.ResponseFrostFSException;
import info.frostfs.sdk.exceptions.ValidationFrostFSException;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.utils.WaitUtil;
import lombok.AccessLevel;
import lombok.Getter;
import org.apache.commons.lang3.StringUtils;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import static info.frostfs.sdk.constants.ErrorConst.POOL_CLIENT_UNHEALTHY;
@Getter
public class ClientWrapper extends ClientStatusMonitor {
@Getter(value = AccessLevel.NONE)
private final Lock lock = new ReentrantLock();
private final SessionCache sessionCache;
private final WrapperPrm wrapperPrm;
private FrostFSClient client;
public ClientWrapper(WrapperPrm wrapperPrm, Pool pool) {
super(wrapperPrm.getLogger(), wrapperPrm.getAddress());
this.wrapperPrm = wrapperPrm;
setErrorThreshold(wrapperPrm.getErrorThreshold());
this.sessionCache = pool.getSessionCache();
this.client = new FrostFSClient(wrapperPrm, sessionCache);
}
public FrostFSClient getClient() {
lock.lock();
try {
if (isHealthy()) {
return client;
}
return null;
} finally {
lock.unlock();
}
}
public void dial(CallContext ctx) {
FrostFSClient client = getClient();
if (client == null) {
throw new ValidationFrostFSException(POOL_CLIENT_UNHEALTHY);
}
client.dial(ctx);
}
public void handleError(Exception exp) {
if (exp instanceof ResponseFrostFSException && ((ResponseFrostFSException) exp).getStatus() != null) {
switch (((ResponseFrostFSException) exp).getStatus().getCode()) {
case INTERNAL:
case WRONG_MAGIC_NUMBER:
case SIGNATURE_VERIFICATION_FAILURE:
case NODE_UNDER_MAINTENANCE:
incErrorRate();
}
return;
}
incErrorRate();
}
private void scheduleGracefulClose() {
if (client == null) {
return;
}
WaitUtil.sleep(wrapperPrm.getGracefulCloseOnSwitchTimeout());
client.close();
}
public CompletableFuture<Boolean> restartIfUnhealthy(CallContext ctx) {
try {
client.getLocalNodeInfo(ctx);
return CompletableFuture.completedFuture(false);
} catch (Exception ignored) {
}
if (isDialed()) {
scheduleGracefulClose();
}
return CompletableFuture.completedFuture(restartClient(ctx));
}
private boolean restartClient(CallContext ctx) {
FrostFSClient newClient = null;
try {
newClient = new FrostFSClient(wrapperPrm, sessionCache);
var error = newClient.dial(ctx);
if (StringUtils.isNotBlank(error)) {
setUnhealthyOnDial();
newClient.close();
return true;
}
lock.lock();
client = newClient;
lock.unlock();
} catch (Exception exp) {
if (newClient != null) {
newClient.close();
}
}
try {
client.getLocalNodeInfo(ctx);
} catch (Exception exp) {
setUnhealthy();
return true;
}
setHealthy();
return false;
}
public void incRequests(long elapsed, MethodIndex method) {
var methodStat = getMethods()[method.ordinal()];
methodStat.incRequests(elapsed);
}
}

View file

@ -1,54 +0,0 @@
package info.frostfs.sdk.pool;
import java.util.concurrent.locks.ReentrantLock;
class InnerPool {
private static final int ATTEMPTS_COUNT = 3;
private final ReentrantLock lock = new ReentrantLock();
private final ClientWrapper[] clients;
private Sampler sampler;
InnerPool(Sampler sampler, ClientWrapper[] clients) {
this.sampler = sampler;
this.clients = clients;
}
Sampler getSampler() {
return sampler;
}
void setSampler(Sampler sampler) {
this.sampler = sampler;
}
ClientWrapper[] getClients() {
return clients;
}
ClientWrapper connection() {
lock.lock();
try {
if (clients.length == 1) {
ClientWrapper client = clients[0];
if (client.isHealthy()) {
return client;
}
} else {
int attempts = ATTEMPTS_COUNT * clients.length;
for (int i = 0; i < attempts; i++) {
int index = sampler.next();
if (clients[index].isHealthy()) {
return clients[index];
}
}
}
return null;
} finally {
lock.unlock();
}
}
}

View file

@ -1,31 +0,0 @@
package info.frostfs.sdk.pool;
import lombok.AccessLevel;
import lombok.Getter;
import lombok.Setter;
import java.util.concurrent.locks.ReentrantLock;
@Setter
@Getter
public class MethodStatus {
@Getter(AccessLevel.NONE)
private final ReentrantLock lock = new ReentrantLock();
private final String name;
private StatusSnapshot snapshot;
public MethodStatus(String name) {
this.name = name;
this.snapshot = new StatusSnapshot();
}
void incRequests(long elapsed) {
lock.lock();
try {
snapshot.setAllTime(snapshot.getAllTime() + elapsed);
snapshot.setAllRequests(snapshot.getAllRequests() + 1);
} finally {
lock.unlock();
}
}
}

View file

@ -1,13 +0,0 @@
package info.frostfs.sdk.pool;
import lombok.Getter;
import lombok.Setter;
@Getter
@Setter
public class NodeStatistic {
private String address;
private StatusSnapshot[] methods;
private long overallErrors;
private int currentErrors;
}

View file

@ -1,19 +0,0 @@
package info.frostfs.sdk.pool;
import lombok.Getter;
import java.util.ArrayList;
import java.util.List;
@Getter
public class NodesParam {
private final int priority;
private final List<String> address;
private final List<Double> weight;
public NodesParam(int priority) {
this.priority = priority;
this.address = new ArrayList<>();
this.weight = new ArrayList<>();
}
}

View file

@ -1,558 +0,0 @@
package info.frostfs.sdk.pool;
import frostfs.refs.Types;
import info.frostfs.sdk.dto.ape.Chain;
import info.frostfs.sdk.dto.container.Container;
import info.frostfs.sdk.dto.container.ContainerId;
import info.frostfs.sdk.dto.netmap.NetmapSnapshot;
import info.frostfs.sdk.dto.netmap.NodeInfo;
import info.frostfs.sdk.dto.object.ObjectFrostFS;
import info.frostfs.sdk.dto.object.ObjectHeader;
import info.frostfs.sdk.dto.object.ObjectId;
import info.frostfs.sdk.dto.object.OwnerId;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.exceptions.FrostFSException;
import info.frostfs.sdk.exceptions.SessionExpiredFrostFSException;
import info.frostfs.sdk.exceptions.SessionNotFoundFrostFSException;
import info.frostfs.sdk.exceptions.ValidationFrostFSException;
import info.frostfs.sdk.jdo.ECDsa;
import info.frostfs.sdk.jdo.NetworkSettings;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.jdo.parameters.ape.PrmApeChainAdd;
import info.frostfs.sdk.jdo.parameters.ape.PrmApeChainList;
import info.frostfs.sdk.jdo.parameters.ape.PrmApeChainRemove;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerCreate;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerDelete;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerGet;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerGetAll;
import info.frostfs.sdk.jdo.parameters.object.*;
import info.frostfs.sdk.jdo.parameters.object.patch.PrmObjectPatch;
import info.frostfs.sdk.jdo.parameters.object.patch.PrmRangeGet;
import info.frostfs.sdk.jdo.parameters.object.patch.PrmRangeHashGet;
import info.frostfs.sdk.jdo.parameters.session.PrmSessionCreate;
import info.frostfs.sdk.jdo.pool.NodeParameters;
import info.frostfs.sdk.jdo.pool.PoolInitParameters;
import info.frostfs.sdk.jdo.result.ObjectHeaderResult;
import info.frostfs.sdk.services.CommonClient;
import info.frostfs.sdk.services.impl.rwhelper.ObjectWriter;
import info.frostfs.sdk.services.impl.rwhelper.RangeReader;
import info.frostfs.sdk.utils.FrostFSMessages;
import info.frostfs.sdk.utils.WaitUtil;
import lombok.Getter;
import org.slf4j.Logger;
import java.util.*;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Function;
import static info.frostfs.sdk.Helper.getHexString;
import static info.frostfs.sdk.constants.ErrorConst.*;
import static info.frostfs.sdk.constants.PoolConst.*;
@Getter
public class Pool implements CommonClient {
private final ReentrantLock lock = new ReentrantLock();
private final ECDsa key;
private final SessionCache sessionCache;
private final long sessionTokenDuration;
private final RebalanceParameters rebalanceParams;
private final Function<String, ClientWrapper> clientBuilder;
private final Logger logger;
private InnerPool[] innerPools;
private Types.OwnerID ownerID;
private OwnerId ownerId;
private boolean disposedValue;
private long maxObjectSize;
private ClientStatus clientStatus;
public Pool(PoolInitParameters options) {
if (options == null || options.getKey() == null) {
throw new ValidationFrostFSException(
String.format(
PARAMS_ARE_MISSING_TEMPLATE,
String.join(
FIELDS_DELIMITER_COMMA, PoolInitParameters.class.getName(), ECDsa.class.getName()
)
)
);
}
List<NodesParam> nodesParams = adjustNodeParams(options.getNodeParams());
SessionCache cache = new SessionCache(options.getSessionExpirationDuration());
fillDefaultInitParams(options, this);
this.key = options.getKey();
this.sessionCache = cache;
this.logger = options.getLogger();
this.sessionTokenDuration = options.getSessionExpirationDuration();
this.rebalanceParams = new RebalanceParameters(
nodesParams.toArray(new NodesParam[0]),
options.getHealthCheckTimeout(),
options.getClientRebalanceInterval(),
options.getSessionExpirationDuration());
this.clientBuilder = options.getClientBuilder();
}
private static List<NodesParam> adjustNodeParams(NodeParameters[] nodeParams) {
if (nodeParams == null || nodeParams.length == 0) {
throw new ValidationFrostFSException(POOL_PEERS_IS_MISSING);
}
Map<Integer, NodesParam> nodesParamsDict = new HashMap<>(nodeParams.length);
for (NodeParameters nodeParam : nodeParams) {
var nodesParam = nodesParamsDict
.computeIfAbsent(nodeParam.getPriority(), k -> new NodesParam(nodeParam.getPriority()));
nodesParam.getAddress().add(nodeParam.getAddress());
nodesParam.getWeight().add(nodeParam.getWeight());
}
List<NodesParam> nodesParams = new ArrayList<>(nodesParamsDict.values());
nodesParams.sort(Comparator.comparingInt(NodesParam::getPriority));
for (NodesParam nodes : nodesParams) {
double[] newWeights = adjustWeights(nodes.getWeight().stream().mapToDouble(Double::doubleValue).toArray());
nodes.getWeight().clear();
for (double weight : newWeights) {
nodes.getWeight().add(weight);
}
}
return nodesParams;
}
private static double[] adjustWeights(double[] weights) {
double[] adjusted = new double[weights.length];
double sum = Arrays.stream(weights).sum();
if (sum > 0) {
for (int i = 0; i < weights.length; i++) {
adjusted[i] = weights[i] / sum;
}
}
return adjusted;
}
private static void fillDefaultInitParams(PoolInitParameters parameters, Pool pool) {
if (parameters.getSessionExpirationDuration() == 0) {
parameters.setSessionExpirationDuration(DEFAULT_SESSION_TOKEN_EXPIRATION_DURATION);
}
if (parameters.getErrorThreshold() == 0) {
parameters.setErrorThreshold(DEFAULT_ERROR_THRESHOLD);
}
if (parameters.getClientRebalanceInterval() <= 0) {
parameters.setClientRebalanceInterval(DEFAULT_REBALANCE_INTERVAL);
}
if (parameters.getGracefulCloseOnSwitchTimeout() <= 0) {
parameters.setGracefulCloseOnSwitchTimeout(DEFAULT_GRACEFUL_CLOSE_ON_SWITCH_TIMEOUT);
}
if (parameters.getHealthCheckTimeout() <= 0) {
parameters.setHealthCheckTimeout(DEFAULT_HEALTHCHECK_TIMEOUT);
}
if (parameters.getNodeDialTimeout() <= 0) {
parameters.setNodeDialTimeout(DEFAULT_DIAL_TIMEOUT);
}
if (parameters.getNodeStreamTimeout() <= 0) {
parameters.setNodeStreamTimeout(DEFAULT_STREAM_TIMEOUT);
}
if (parameters.getSessionExpirationDuration() == 0) {
parameters.setSessionExpirationDuration(DEFAULT_SESSION_TOKEN_EXPIRATION_DURATION);
}
if (parameters.getClientBuilder() == null) {
parameters.setClientBuilder(address -> {
WrapperPrm wrapperPrm = new WrapperPrm();
wrapperPrm.setAddress(address);
wrapperPrm.setKey(parameters.getKey());
wrapperPrm.setLogger(parameters.getLogger());
wrapperPrm.setDialTimeout(parameters.getNodeDialTimeout());
wrapperPrm.setStreamTimeout(parameters.getNodeStreamTimeout());
wrapperPrm.setErrorThreshold(parameters.getErrorThreshold());
wrapperPrm.setGracefulCloseOnSwitchTimeout(parameters.getGracefulCloseOnSwitchTimeout());
wrapperPrm.setInterceptors(parameters.getInterceptors());
return new ClientWrapper(wrapperPrm, pool);
});
}
}
private static SessionToken initSessionForDuration(CallContext ctx, ClientWrapper cw, long duration) {
var client = cw.getClient();
NetworkSettings networkInfo = client.getNetworkSettings(ctx);
long epoch = networkInfo.getEpochDuration();
long exp = (Long.MAX_VALUE - epoch < duration) ? Long.MAX_VALUE : (epoch + duration);
return client.createSession(new PrmSessionCreate(exp), ctx);
}
public static String formCacheKey(String address, String key) {
return address + key;
}
@Override
public String dial(CallContext ctx) {
InnerPool[] inner = new InnerPool[rebalanceParams.getNodesParams().length];
boolean atLeastOneHealthy = false;
int i = 0;
for (NodesParam nodeParams : rebalanceParams.getNodesParams()) {
ClientWrapper[] clients = new ClientWrapper[nodeParams.getWeight().size()];
for (int j = 0; j < nodeParams.getAddress().size(); j++) {
ClientWrapper client = clients[j] = clientBuilder.apply(nodeParams.getAddress().get(j));
boolean dialed = false;
try {
client.dial(ctx);
dialed = true;
SessionToken token = initSessionForDuration(
ctx, client, rebalanceParams.getSessionExpirationDuration()
);
String cacheKey = formCacheKey(
nodeParams.getAddress().get(j),
getHexString(key.getPublicKeyByte())
);
sessionCache.setValue(cacheKey, token);
atLeastOneHealthy = true;
} catch (ValidationFrostFSException exp) {
break;
} catch (Exception exp) {
if (!dialed) {
client.setUnhealthyOnDial();
} else {
client.setUnhealthy();
}
if (logger != null) {
FrostFSMessages
.sessionCreationError(logger, client.getWrapperPrm().getAddress(), exp.getMessage());
}
}
}
Sampler sampler = new Sampler(nodeParams.getWeight().stream().mapToDouble(Double::doubleValue).toArray());
inner[i] = new InnerPool(sampler, clients);
i++;
}
if (!atLeastOneHealthy) {
return POOL_NODES_UNHEALTHY;
}
this.innerPools = inner;
NetworkSettings networkSettings = getNetworkSettings(ctx);
this.maxObjectSize = networkSettings.getMaxObjectSize();
startRebalance(ctx);
return null;
}
private ClientWrapper connection() {
for (InnerPool pool : innerPools) {
ClientWrapper client = pool.connection();
if (client != null) {
return client;
}
}
throw new FrostFSException(POOL_CLIENTS_UNHEALTHY);
}
public void close() {
if (innerPools != null) {
for (InnerPool innerPool : innerPools) {
for (ClientWrapper client : innerPool.getClients()) {
if (client.isDialed()) {
client.getClient().close();
}
}
}
}
}
public void startRebalance(CallContext ctx) {
double[][] buffers = new double[rebalanceParams.getNodesParams().length][];
for (int i = 0; i < rebalanceParams.getNodesParams().length; i++) {
NodesParam parameters = rebalanceParams.getNodesParams()[i];
buffers[i] = new double[parameters.getWeight().size()];
CompletableFuture.runAsync(() -> {
WaitUtil.sleep(rebalanceParams.getClientRebalanceInterval());
updateNodesHealth(ctx, buffers);
});
}
}
private void updateNodesHealth(CallContext ctx, double[][] buffers) {
CompletableFuture<?>[] tasks = new CompletableFuture<?>[innerPools.length];
for (int i = 0; i < innerPools.length; i++) {
double[] bufferWeights = buffers[i];
int finalI = i;
tasks[i] = CompletableFuture.runAsync(() -> updateInnerNodesHealth(ctx, finalI, bufferWeights));
}
CompletableFuture.allOf(tasks).join();
}
private void updateInnerNodesHealth(CallContext ctx, int poolIndex, double[] bufferWeights) {
if (poolIndex > innerPools.length - 1) {
return;
}
InnerPool pool = innerPools[poolIndex];
RebalanceParameters options = rebalanceParams;
int[] healthyChanged = {0};
CompletableFuture<?>[] tasks = new CompletableFuture<?>[pool.getClients().length];
for (int j = 0; j < pool.getClients().length; j++) {
ClientWrapper client = pool.getClients()[j];
AtomicBoolean healthy = new AtomicBoolean(false);
AtomicReference<String> error = new AtomicReference<>();
AtomicBoolean changed = new AtomicBoolean(false);
int finalJ = j;
tasks[j] = client.restartIfUnhealthy(ctx).handle((unused, throwable) -> {
if (throwable != null) {
error.set(throwable.getMessage());
bufferWeights[finalJ] = 0;
sessionCache.deleteByPrefix(client.getAddress());
} else {
changed.set(unused);
healthy.set(true);
bufferWeights[finalJ] = options.getNodesParams()[poolIndex].getWeight().get(finalJ);
}
return null;
}).thenRun(() -> {
if (changed.get()) {
if (error.get() != null && logger != null) {
FrostFSMessages.healthChanged(logger, client.getAddress(), healthy.get(), error.get());
}
healthyChanged[0] = 1;
}
});
}
CompletableFuture.allOf(tasks).thenRun(() -> {
if (healthyChanged[0] == 1) {
double[] probabilities = adjustWeights(bufferWeights);
lock.lock();
try {
pool.setSampler(new Sampler(probabilities));
} finally {
lock.unlock();
}
}
});
}
private boolean checkSessionTokenErr(Exception error, String address) {
if (error == null) {
return false;
}
if (error instanceof SessionNotFoundFrostFSException || error instanceof SessionExpiredFrostFSException) {
sessionCache.deleteByPrefix(address);
return true;
}
return false;
}
public Statistic statistic() {
if (innerPools == null) {
throw new ValidationFrostFSException(POOL_NOT_DIALED);
}
Statistic statistics = new Statistic();
for (InnerPool inner : innerPools) {
int valueIndex = 0;
String[] nodes = new String[inner.getClients().length];
lock.lock();
try {
for (ClientWrapper client : inner.getClients()) {
if (client.isHealthy()) {
nodes[valueIndex] = client.getAddress();
}
NodeStatistic node = new NodeStatistic();
node.setAddress(client.getAddress());
node.setMethods(client.getMethodsStatus());
node.setOverallErrors(client.getOverallErrorRate());
node.setCurrentErrors(client.getCurrentErrorRate());
statistics.getNodes().add(node);
valueIndex++;
statistics.setOverallErrors(statistics.getOverallErrors() + node.getOverallErrors());
}
if (statistics.getCurrentNodes() == null || statistics.getCurrentNodes().length == 0) {
statistics.setCurrentNodes(nodes);
}
} finally {
lock.unlock();
}
}
return statistics;
}
@Override
public Container getContainer(PrmContainerGet args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().getContainer(args, ctx);
}
@Override
public List<ContainerId> listContainers(PrmContainerGetAll args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().listContainers(args, ctx);
}
@Override
public ContainerId createContainer(PrmContainerCreate args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().createContainer(args, ctx);
}
@Override
public void deleteContainer(PrmContainerDelete args, CallContext ctx) {
ClientWrapper client = connection();
client.getClient().deleteContainer(args, ctx);
}
@Override
public ObjectHeaderResult getObjectHead(PrmObjectHeadGet args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().getObjectHead(args, ctx);
}
@Override
public ObjectFrostFS getObject(PrmObjectGet args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().getObject(args, ctx);
}
@Override
public ObjectWriter putObject(PrmObjectPut args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().putObject(args, ctx);
}
@Override
public ObjectId putClientCutObject(PrmObjectClientCutPut args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().putClientCutObject(args, ctx);
}
@Override
public ObjectId putSingleObject(PrmObjectSinglePut args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().putSingleObject(args, ctx);
}
@Override
public void deleteObject(PrmObjectDelete args, CallContext ctx) {
ClientWrapper client = connection();
client.getClient().deleteObject(args, ctx);
}
@Override
public Iterable<ObjectId> searchObjects(PrmObjectSearch args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().searchObjects(args, ctx);
}
@Override
public RangeReader getRange(PrmRangeGet args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().getRange(args, ctx);
}
@Override
public byte[][] getRangeHash(PrmRangeHashGet args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().getRangeHash(args, ctx);
}
@Override
public ObjectId patchObject(PrmObjectPatch args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().patchObject(args, ctx);
}
@Override
public byte[] addChain(PrmApeChainAdd args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().addChain(args, ctx);
}
@Override
public void removeChain(PrmApeChainRemove args, CallContext ctx) {
ClientWrapper client = connection();
client.getClient().removeChain(args, ctx);
}
@Override
public List<Chain> listChains(PrmApeChainList args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().listChains(args, ctx);
}
@Override
public NetmapSnapshot getNetmapSnapshot(CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().getNetmapSnapshot(ctx);
}
@Override
public NodeInfo getLocalNodeInfo(CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().getLocalNodeInfo(ctx);
}
@Override
public NetworkSettings getNetworkSettings(CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().getNetworkSettings(ctx);
}
@Override
public SessionToken createSession(PrmSessionCreate args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().createSession(args, ctx);
}
@Override
public ObjectId calculateObjectId(ObjectHeader header) {
ClientWrapper client = connection();
return client.getClient().calculateObjectId(header);
}
@Override
public frostfs.accounting.Types.Decimal getBalance(CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().getBalance(ctx);
}
}

View file

@ -1,15 +0,0 @@
package info.frostfs.sdk.pool;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.Setter;
@Getter
@Setter
@AllArgsConstructor
public class RebalanceParameters {
private NodesParam[] nodesParams;
private long nodeRequestTimeout;
private long clientRebalanceInterval;
private long sessionExpirationDuration;
}

View file

@ -1,15 +0,0 @@
package info.frostfs.sdk.pool;
import info.frostfs.sdk.enums.MethodIndex;
import lombok.Getter;
import lombok.Setter;
import java.time.Duration;
@Getter
@Setter
public class RequestInfo {
private String address;
private MethodIndex methodIndex;
private Duration elapsed;
}

View file

@ -1,77 +0,0 @@
package info.frostfs.sdk.pool;
import java.util.ArrayList;
import java.util.Random;
class Sampler {
private final Object lock = new Object();
private final Random random = new Random();
private final double[] probabilities;
private final int[] alias;
Sampler(double[] probabilities) {
ArrayList<Integer> small = new ArrayList<>();
ArrayList<Integer> large = new ArrayList<>();
int n = probabilities.length;
this.probabilities = new double[n];
this.alias = new int[n];
// Compute scaled probabilities.
double[] p = new double[n];
for (int i = 0; i < n; i++) {
p[i] = probabilities[i] * n;
if (p[i] < 1) {
small.add(i);
} else {
large.add(i);
}
}
while (!small.isEmpty() && !large.isEmpty()) {
int l = small.remove(small.size() - 1);
int g = large.remove(large.size() - 1);
this.probabilities[l] = p[l];
this.alias[l] = g;
p[g] = p[g] + p[l] - 1;
if (p[g] < 1) {
small.add(g);
} else {
large.add(g);
}
}
while (!large.isEmpty()) {
int g = large.remove(large.size() - 1);
this.probabilities[g] = 1;
}
while (!small.isEmpty()) {
int l = small.remove(small.size() - 1);
probabilities[l] = 1;
}
}
int next() {
int n = alias.length;
int i;
double f;
synchronized (lock) {
i = random.nextInt(n);
f = random.nextDouble();
}
if (f < probabilities[i]) {
return i;
}
return alias[i];
}
}

View file

@ -1,37 +0,0 @@
package info.frostfs.sdk.pool;
import info.frostfs.sdk.dto.session.SessionToken;
import org.apache.commons.lang3.StringUtils;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
public class SessionCache {
private final ConcurrentMap<String, SessionToken> cache = new ConcurrentHashMap<>();
private final long tokenDuration;
private long currentEpoch;
public SessionCache(long sessionExpirationDuration) {
this.tokenDuration = sessionExpirationDuration;
}
public boolean contains(String key) {
return cache.containsKey(key);
}
public SessionToken tryGetValue(String key) {
return StringUtils.isBlank(key) ? null : cache.get(key);
}
public void setValue(String key, SessionToken value) {
if (key != null) {
cache.put(key, value);
}
}
public void deleteByPrefix(String prefix) {
cache.keySet().removeIf(key -> key.startsWith(prefix));
}
}

View file

@ -1,15 +0,0 @@
package info.frostfs.sdk.pool;
import lombok.Getter;
import lombok.Setter;
import java.util.ArrayList;
import java.util.List;
@Getter
@Setter
public class Statistic {
private long overallErrors;
private List<NodeStatistic> nodes = new ArrayList<>();
private String[] currentNodes;
}

View file

@ -1,13 +0,0 @@
package info.frostfs.sdk.pool;
import lombok.Getter;
import lombok.Setter;
@Getter
@Setter
public class StatusSnapshot {
private long allTime;
private long allRequests;
}

View file

@ -1,22 +0,0 @@
package info.frostfs.sdk.pool;
import java.util.ArrayList;
import java.util.List;
class WorkList {
private final List<Integer> elements = new ArrayList<>();
private int getLength() {
return elements.size();
}
private void add(int element) {
elements.add(element);
}
private int remove() {
int last = elements.get(elements.size() - 1);
elements.remove(elements.size() - 1);
return last;
}
}

View file

@ -1,26 +0,0 @@
package info.frostfs.sdk.pool;
import info.frostfs.sdk.jdo.ECDsa;
import io.grpc.ClientInterceptors;
import io.grpc.ManagedChannelBuilder;
import lombok.Getter;
import lombok.Setter;
import org.slf4j.Logger;
import java.util.Collection;
@Getter
@Setter
public class WrapperPrm {
private Logger logger;
private String address;
private ECDsa key;
private long dialTimeout;
private long streamTimeout;
private int errorThreshold;
private Runnable responseInfoCallback;
private Runnable poolRequestInfoCallback;
private ManagedChannelBuilder<?> grpcChannelOptions;
private long gracefulCloseOnSwitchTimeout;
private Collection<ClientInterceptors> interceptors;
}

View file

@ -1,8 +0,0 @@
package info.frostfs.sdk.services;
import frostfs.accounting.Types;
import info.frostfs.sdk.jdo.parameters.CallContext;
public interface AccountingClient {
Types.Decimal getBalance(CallContext ctx);
}

View file

@ -1,17 +0,0 @@
package info.frostfs.sdk.services;
import info.frostfs.sdk.dto.ape.Chain;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.jdo.parameters.ape.PrmApeChainAdd;
import info.frostfs.sdk.jdo.parameters.ape.PrmApeChainList;
import info.frostfs.sdk.jdo.parameters.ape.PrmApeChainRemove;
import java.util.List;
public interface ApeManagerClient {
byte[] addChain(PrmApeChainAdd args, CallContext ctx);
void removeChain(PrmApeChainRemove args, CallContext ctx);
List<Chain> listChains(PrmApeChainList args, CallContext ctx);
}

View file

@ -1,9 +0,0 @@
package info.frostfs.sdk.services;
import info.frostfs.sdk.jdo.parameters.CallContext;
public interface CommonClient extends
AccountingClient, ApeManagerClient, ContainerClient, NetmapClient, ObjectClient, SessionClient, ToolsClient {
String dial(CallContext ctx);
}

View file

@ -1,21 +0,0 @@
package info.frostfs.sdk.services;
import info.frostfs.sdk.dto.container.Container;
import info.frostfs.sdk.dto.container.ContainerId;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerCreate;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerDelete;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerGet;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerGetAll;
import java.util.List;
public interface ContainerClient {
Container getContainer(PrmContainerGet args, CallContext ctx);
List<ContainerId> listContainers(PrmContainerGetAll args, CallContext ctx);
ContainerId createContainer(PrmContainerCreate args, CallContext ctx);
void deleteContainer(PrmContainerDelete args, CallContext ctx);
}

View file

@ -1,13 +0,0 @@
package info.frostfs.sdk.services;
import info.frostfs.sdk.jdo.ClientEnvironment;
import lombok.Getter;
@Getter
public class ContextAccessor {
private final ClientEnvironment context;
public ContextAccessor(ClientEnvironment context) {
this.context = context;
}
}

View file

@ -1,14 +0,0 @@
package info.frostfs.sdk.services;
import info.frostfs.sdk.dto.netmap.NetmapSnapshot;
import info.frostfs.sdk.dto.netmap.NodeInfo;
import info.frostfs.sdk.jdo.NetworkSettings;
import info.frostfs.sdk.jdo.parameters.CallContext;
public interface NetmapClient {
NetmapSnapshot getNetmapSnapshot(CallContext ctx);
NodeInfo getLocalNodeInfo(CallContext ctx);
NetworkSettings getNetworkSettings(CallContext ctx);
}

View file

@ -1,34 +0,0 @@
package info.frostfs.sdk.services;
import info.frostfs.sdk.dto.object.ObjectFrostFS;
import info.frostfs.sdk.dto.object.ObjectId;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.jdo.parameters.object.*;
import info.frostfs.sdk.jdo.parameters.object.patch.PrmObjectPatch;
import info.frostfs.sdk.jdo.parameters.object.patch.PrmRangeGet;
import info.frostfs.sdk.jdo.parameters.object.patch.PrmRangeHashGet;
import info.frostfs.sdk.jdo.result.ObjectHeaderResult;
import info.frostfs.sdk.services.impl.rwhelper.ObjectWriter;
import info.frostfs.sdk.services.impl.rwhelper.RangeReader;
public interface ObjectClient {
ObjectHeaderResult getObjectHead(PrmObjectHeadGet args, CallContext ctx);
ObjectFrostFS getObject(PrmObjectGet args, CallContext ctx);
ObjectWriter putObject(PrmObjectPut args, CallContext ctx);
ObjectId putClientCutObject(PrmObjectClientCutPut args, CallContext ctx);
ObjectId putSingleObject(PrmObjectSinglePut args, CallContext ctx);
void deleteObject(PrmObjectDelete args, CallContext ctx);
Iterable<ObjectId> searchObjects(PrmObjectSearch args, CallContext ctx);
RangeReader getRange(PrmRangeGet args, CallContext ctx);
byte[][] getRangeHash(PrmRangeHashGet args, CallContext ctx);
ObjectId patchObject(PrmObjectPatch args, CallContext ctx);
}

View file

@ -1,9 +0,0 @@
package info.frostfs.sdk.services;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.jdo.parameters.session.PrmSessionCreate;
public interface SessionClient {
SessionToken createSession(PrmSessionCreate args, CallContext ctx);
}

View file

@ -1,9 +0,0 @@
package info.frostfs.sdk.services;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.ClientEnvironment;
import info.frostfs.sdk.jdo.parameters.CallContext;
public interface SessionTools {
SessionToken getOrCreateSession(ClientEnvironment env, CallContext ctx);
}

View file

@ -1,8 +0,0 @@
package info.frostfs.sdk.services;
import info.frostfs.sdk.dto.object.ObjectHeader;
import info.frostfs.sdk.dto.object.ObjectId;
public interface ToolsClient {
ObjectId calculateObjectId(ObjectHeader header);
}

View file

@ -1,48 +0,0 @@
package info.frostfs.sdk.services.impl;
import frostfs.accounting.AccountingServiceGrpc;
import frostfs.accounting.Service;
import frostfs.accounting.Types;
import info.frostfs.sdk.jdo.ClientEnvironment;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.mappers.object.OwnerIdMapper;
import info.frostfs.sdk.services.AccountingClient;
import info.frostfs.sdk.services.ContextAccessor;
import info.frostfs.sdk.tools.RequestConstructor;
import info.frostfs.sdk.tools.RequestSigner;
import info.frostfs.sdk.tools.Verifier;
import static info.frostfs.sdk.utils.DeadLineUtil.deadLineAfter;
public class AccountingClientImpl extends ContextAccessor implements AccountingClient {
private final AccountingServiceGrpc.AccountingServiceBlockingStub serviceBlockingStub;
public AccountingClientImpl(ClientEnvironment clientEnvironment) {
super(clientEnvironment);
this.serviceBlockingStub = AccountingServiceGrpc.newBlockingStub(getContext().getChannel());
}
@Override
public Types.Decimal getBalance(CallContext ctx) {
var request = createGetRequest();
var service = deadLineAfter(serviceBlockingStub, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.balance(request);
Verifier.checkResponse(response);
return response.getBody().getBalance();
}
private Service.BalanceRequest createGetRequest() {
var body = Service.BalanceRequest.Body.newBuilder()
.setOwnerId(OwnerIdMapper.toGrpcMessage(getContext().getOwnerId()))
.build();
var request = Service.BalanceRequest.newBuilder()
.setBody(body);
RequestConstructor.addMetaHeader(request);
RequestSigner.sign(request, getContext().getKey());
return request.build();
}
}

View file

@ -1,123 +0,0 @@
package info.frostfs.sdk.services.impl;
import com.google.protobuf.ByteString;
import frostfs.ape.Types;
import frostfs.apemanager.APEManagerServiceGrpc;
import frostfs.apemanager.Service;
import info.frostfs.sdk.dto.ape.Chain;
import info.frostfs.sdk.jdo.ClientEnvironment;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.jdo.parameters.ape.PrmApeChainAdd;
import info.frostfs.sdk.jdo.parameters.ape.PrmApeChainList;
import info.frostfs.sdk.jdo.parameters.ape.PrmApeChainRemove;
import info.frostfs.sdk.mappers.chain.ChainTargetMapper;
import info.frostfs.sdk.services.ApeManagerClient;
import info.frostfs.sdk.services.ContextAccessor;
import info.frostfs.sdk.tools.RequestConstructor;
import info.frostfs.sdk.tools.RequestSigner;
import info.frostfs.sdk.tools.Verifier;
import info.frostfs.sdk.tools.ape.RuleDeserializer;
import info.frostfs.sdk.tools.ape.RuleSerializer;
import java.util.List;
import java.util.stream.Collectors;
import static info.frostfs.sdk.utils.DeadLineUtil.deadLineAfter;
import static info.frostfs.sdk.utils.Validator.validate;
public class ApeManagerClientImpl extends ContextAccessor implements ApeManagerClient {
private final APEManagerServiceGrpc.APEManagerServiceBlockingStub apeManagerServiceClient;
public ApeManagerClientImpl(ClientEnvironment clientEnvironment) {
super(clientEnvironment);
this.apeManagerServiceClient = APEManagerServiceGrpc.newBlockingStub(getContext().getChannel());
}
@Override
public byte[] addChain(PrmApeChainAdd args, CallContext ctx) {
validate(args);
var request = createAddChainRequest(args);
var service = deadLineAfter(apeManagerServiceClient, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.addChain(request);
Verifier.checkResponse(response);
return response.getBody().getChainId().toByteArray();
}
@Override
public void removeChain(PrmApeChainRemove args, CallContext ctx) {
validate(args);
var request = createRemoveChainRequest(args);
var service = deadLineAfter(apeManagerServiceClient, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.removeChain(request);
Verifier.checkResponse(response);
}
@Override
public List<Chain> listChains(PrmApeChainList args, CallContext ctx) {
validate(args);
var request = createListChainsRequest(args);
var service = deadLineAfter(apeManagerServiceClient, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.listChains(request);
Verifier.checkResponse(response);
return response.getBody().getChainsList().stream()
.map(chain -> RuleDeserializer.deserialize(chain.getRaw().toByteArray()))
.collect(Collectors.toList());
}
private Service.AddChainRequest createAddChainRequest(PrmApeChainAdd args) {
var raw = RuleSerializer.serialize(args.getChain());
var chainGrpc = Types.Chain.newBuilder()
.setRaw(ByteString.copyFrom(raw))
.build();
var body = Service.AddChainRequest.Body.newBuilder()
.setChain(chainGrpc)
.setTarget(ChainTargetMapper.toGrpcMessage(args.getChainTarget()))
.build();
var request = Service.AddChainRequest.newBuilder()
.setBody(body);
RequestConstructor.addMetaHeader(request, args.getXHeaders());
RequestSigner.sign(request, getContext().getKey());
return request.build();
}
private Service.RemoveChainRequest createRemoveChainRequest(PrmApeChainRemove args) {
var body = Service.RemoveChainRequest.Body.newBuilder()
.setChainId(ByteString.copyFrom(args.getChainId()))
.setTarget(ChainTargetMapper.toGrpcMessage(args.getChainTarget()))
.build();
var request = Service.RemoveChainRequest.newBuilder()
.setBody(body);
RequestConstructor.addMetaHeader(request, args.getXHeaders());
RequestSigner.sign(request, getContext().getKey());
return request.build();
}
private Service.ListChainsRequest createListChainsRequest(PrmApeChainList args) {
var body = Service.ListChainsRequest.Body.newBuilder()
.setTarget(ChainTargetMapper.toGrpcMessage(args.getChainTarget()))
.build();
var request = Service.ListChainsRequest.newBuilder()
.setBody(body);
RequestConstructor.addMetaHeader(request, args.getXHeaders());
RequestSigner.sign(request, getContext().getKey());
return request.build();
}
}

View file

@ -1,253 +0,0 @@
package info.frostfs.sdk.services.impl;
import frostfs.container.ContainerServiceGrpc;
import frostfs.container.Service;
import frostfs.refs.Types;
import info.frostfs.sdk.dto.container.Container;
import info.frostfs.sdk.dto.container.ContainerId;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.enums.StatusCode;
import info.frostfs.sdk.enums.WaitExpects;
import info.frostfs.sdk.exceptions.ResponseFrostFSException;
import info.frostfs.sdk.exceptions.TimeoutFrostFSException;
import info.frostfs.sdk.jdo.ClientEnvironment;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.jdo.parameters.PrmWait;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerCreate;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerDelete;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerGet;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerGetAll;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import info.frostfs.sdk.mappers.container.ContainerIdMapper;
import info.frostfs.sdk.mappers.container.ContainerMapper;
import info.frostfs.sdk.mappers.netmap.VersionMapper;
import info.frostfs.sdk.mappers.object.OwnerIdMapper;
import info.frostfs.sdk.services.ContainerClient;
import info.frostfs.sdk.services.ContextAccessor;
import info.frostfs.sdk.tools.RequestConstructor;
import info.frostfs.sdk.tools.RequestSigner;
import info.frostfs.sdk.tools.Verifier;
import info.frostfs.sdk.utils.WaitUtil;
import java.time.LocalDateTime;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import static info.frostfs.sdk.constants.AttributeConst.DISABLE_HOMOMORPHIC_HASHING_ATTRIBUTE;
import static info.frostfs.sdk.utils.DeadLineUtil.deadLineAfter;
import static info.frostfs.sdk.utils.Validator.validate;
import static java.util.Objects.isNull;
import static java.util.Objects.nonNull;
public class ContainerClientImpl extends ContextAccessor implements ContainerClient {
private final ContainerServiceGrpc.ContainerServiceBlockingStub serviceBlockingStub;
private final SessionToolsImpl sessionTools;
public ContainerClientImpl(ClientEnvironment clientEnvironment) {
super(clientEnvironment);
this.serviceBlockingStub = ContainerServiceGrpc.newBlockingStub(clientEnvironment.getChannel());
this.sessionTools = new SessionToolsImpl(clientEnvironment);
}
public SessionToken getOrCreateSession(SessionContext sessionContext, CallContext ctx) {
return isNull(sessionContext.getSessionToken())
? sessionTools.getOrCreateSession(getContext(), ctx)
: sessionContext.getSessionToken();
}
@Override
public Container getContainer(PrmContainerGet args, CallContext ctx) {
validate(args);
var request = createGetRequest(args);
var service = deadLineAfter(serviceBlockingStub, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.get(request);
Verifier.checkResponse(response);
return ContainerMapper.toModel(response.getBody().getContainer());
}
@Override
public List<ContainerId> listContainers(PrmContainerGetAll args, CallContext ctx) {
validate(args);
var request = createListRequest(args);
var service = deadLineAfter(serviceBlockingStub, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.list(request);
Verifier.checkResponse(response);
return response.getBody().getContainerIdsList().stream()
.map(cid -> new ContainerId(cid.getValue().toByteArray()))
.collect(Collectors.toList());
}
@Override
public ContainerId createContainer(PrmContainerCreate args, CallContext ctx) {
validate(args);
var request = createPutRequest(args, ctx);
var service = deadLineAfter(serviceBlockingStub, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.put(request);
Verifier.checkResponse(response);
waitForContainer(WaitExpects.EXISTS, response.getBody().getContainerId(), args.getWaitParams());
return new ContainerId(response.getBody().getContainerId().getValue().toByteArray());
}
@Override
public void deleteContainer(PrmContainerDelete args, CallContext ctx) {
validate(args);
var request = createDeleteRequest(args, ctx);
var service = deadLineAfter(serviceBlockingStub, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.delete(request);
Verifier.checkResponse(response);
waitForContainer(WaitExpects.REMOVED, request.getBody().getContainerId(), args.getWaitParams());
}
private void waitForContainer(WaitExpects expect, Types.ContainerID cid, PrmWait waitParams) {
var request = createGetRequest(cid, null);
waitParams = isNull(waitParams) ? new PrmWait() : waitParams;
var deadLine = waitParams.getDeadline();
while (true) {
try {
var response = serviceBlockingStub.get(request);
Verifier.checkResponse(response);
if (expect == WaitExpects.EXISTS) {
break;
}
if (LocalDateTime.now().isAfter(deadLine)) {
throw new TimeoutFrostFSException();
}
WaitUtil.sleep(waitParams.getPollInterval().toMillis());
} catch (ResponseFrostFSException exp) {
if (LocalDateTime.now().isAfter(deadLine)) {
throw new TimeoutFrostFSException();
}
if (exp.getStatus().getCode() != StatusCode.CONTAINER_NOT_FOUND) {
throw exp;
}
if (expect == WaitExpects.REMOVED) {
break;
}
WaitUtil.sleep(waitParams.getPollInterval().toMillis());
}
}
}
private Service.GetRequest createGetRequest(PrmContainerGet args) {
var cid = ContainerIdMapper.toGrpcMessage(args.getContainerId());
return createGetRequest(cid, args.getXHeaders());
}
private Service.GetRequest createGetRequest(Types.ContainerID cid, Map<String, String> xHeaders) {
var body = Service.GetRequest.Body.newBuilder()
.setContainerId(cid)
.build();
var request = Service.GetRequest.newBuilder()
.setBody(body);
RequestConstructor.addMetaHeader(request, xHeaders);
RequestSigner.sign(request, getContext().getKey());
return request.build();
}
private Service.ListRequest createListRequest(PrmContainerGetAll args) {
var body = Service.ListRequest.Body.newBuilder()
.setOwnerId(OwnerIdMapper.toGrpcMessage(getContext().getOwnerId()))
.build();
var request = Service.ListRequest.newBuilder()
.setBody(body);
RequestConstructor.addMetaHeader(request, args.getXHeaders());
RequestSigner.sign(request, getContext().getKey());
return request.build();
}
private Service.PutRequest createPutRequest(PrmContainerCreate args, CallContext ctx) {
syncContainerWithNetwork(args.getContainer(), ctx);
var builder = ContainerMapper.toGrpcMessage(args.getContainer());
if (!builder.hasOwnerId()) {
builder.setOwnerId(OwnerIdMapper.toGrpcMessage(getContext().getOwnerId()));
}
if (!builder.hasVersion()) {
builder.setVersion(VersionMapper.toGrpcMessage(getContext().getVersion()));
}
var container = builder.build();
var body = Service.PutRequest.Body.newBuilder()
.setContainer(container)
.setSignature(RequestSigner.signRFC6979(getContext().getKey(), container))
.build();
var request = Service.PutRequest.newBuilder()
.setBody(body);
var sessionToken = getOrCreateSession(args, ctx);
var protoToken = RequestConstructor.createContainerTokenContext(
sessionToken,
null,
frostfs.session.Types.ContainerSessionContext.Verb.PUT,
container.getOwnerId(),
getContext().getKey()
);
RequestConstructor.addMetaHeader(request, args.getXHeaders(), protoToken);
RequestSigner.sign(request, getContext().getKey());
return request.build();
}
private Service.DeleteRequest createDeleteRequest(PrmContainerDelete args, CallContext ctx) {
var cid = ContainerIdMapper.toGrpcMessage(args.getContainerId());
var body = Service.DeleteRequest.Body.newBuilder()
.setContainerId(cid)
.setSignature(RequestSigner.signRFC6979(getContext().getKey(), cid.getValue()))
.build();
var request = Service.DeleteRequest.newBuilder()
.setBody(body);
var sessionToken = getOrCreateSession(args, ctx);
var protoToken = RequestConstructor.createContainerTokenContext(
sessionToken,
null,
frostfs.session.Types.ContainerSessionContext.Verb.DELETE,
null,
getContext().getKey()
);
RequestConstructor.addMetaHeader(request, args.getXHeaders(), protoToken);
RequestSigner.sign(request, getContext().getKey());
return request.build();
}
private void syncContainerWithNetwork(Container container, CallContext callContext) {
var settings = getContext().getFrostFSClient().getNetworkSettings(callContext);
if (nonNull(settings.getHomomorphicHashingDisabled()) && settings.getHomomorphicHashingDisabled()) {
container.getAttributes().put(DISABLE_HOMOMORPHIC_HASHING_ATTRIBUTE, Boolean.TRUE.toString());
} else {
container.getAttributes().remove(DISABLE_HOMOMORPHIC_HASHING_ATTRIBUTE, Boolean.TRUE.toString());
}
}
}

View file

@ -1,160 +0,0 @@
package info.frostfs.sdk.services.impl;
import frostfs.netmap.NetmapServiceGrpc;
import frostfs.netmap.Service;
import frostfs.netmap.Types;
import info.frostfs.sdk.dto.netmap.NetmapSnapshot;
import info.frostfs.sdk.dto.netmap.NodeInfo;
import info.frostfs.sdk.jdo.ClientEnvironment;
import info.frostfs.sdk.jdo.NetworkSettings;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.mappers.netmap.NetmapSnapshotMapper;
import info.frostfs.sdk.mappers.netmap.NodeInfoMapper;
import info.frostfs.sdk.services.ContextAccessor;
import info.frostfs.sdk.services.NetmapClient;
import info.frostfs.sdk.tools.RequestConstructor;
import info.frostfs.sdk.tools.Verifier;
import java.nio.charset.StandardCharsets;
import static info.frostfs.sdk.tools.RequestSigner.sign;
import static info.frostfs.sdk.utils.DeadLineUtil.deadLineAfter;
import static java.util.Objects.nonNull;
public class NetmapClientImpl extends ContextAccessor implements NetmapClient {
private final NetmapServiceGrpc.NetmapServiceBlockingStub netmapServiceClient;
public NetmapClientImpl(ClientEnvironment clientEnvironment) {
super(clientEnvironment);
this.netmapServiceClient = NetmapServiceGrpc.newBlockingStub(getContext().getChannel());
}
private static boolean getBoolValue(byte[] bytes) {
for (var byteValue : bytes) {
if (byteValue != 0) {
return true;
}
}
return false;
}
private static long getLongValue(byte[] bytes) {
long val = 0;
for (var i = bytes.length - 1; i >= 0; i--) {
val = (val << Byte.SIZE) + bytes[i];
}
return val;
}
private static void setNetworksParam(Types.NetworkConfig.Parameter param, NetworkSettings settings) {
var key = new String(param.getKey().toByteArray(), StandardCharsets.UTF_8);
var valueBytes = param.getValue().toByteArray();
switch (key) {
case "AuditFee":
settings.setAuditFee(getLongValue(valueBytes));
break;
case "BasicIncomeRate":
settings.setBasicIncomeRate(getLongValue(valueBytes));
break;
case "ContainerFee":
settings.setContainerFee(getLongValue(valueBytes));
break;
case "ContainerAliasFee":
settings.setContainerAliasFee(getLongValue(valueBytes));
break;
case "EpochDuration":
settings.setEpochDuration(getLongValue(valueBytes));
break;
case "InnerRingCandidateFee":
settings.setIRCandidateFee(getLongValue(valueBytes));
break;
case "MaxECDataCount":
settings.setMaxECDataCount(getLongValue(valueBytes));
break;
case "MaxECParityCount":
settings.setMaxECParityCount(getLongValue(valueBytes));
break;
case "MaxObjectSize":
settings.setMaxObjectSize(getLongValue(valueBytes));
break;
case "WithdrawFee":
settings.setWithdrawalFee(getLongValue(valueBytes));
break;
case "HomomorphicHashingDisabled":
settings.setHomomorphicHashingDisabled(getBoolValue(valueBytes));
break;
case "MaintenanceModeAllowed":
settings.setMaintenanceModeAllowed(getBoolValue(valueBytes));
break;
default:
settings.getUnnamedSettings().put(key, valueBytes);
break;
}
}
@Override
public NetworkSettings getNetworkSettings(CallContext ctx) {
if (nonNull(getContext().getNetworkSettings())) {
return getContext().getNetworkSettings();
}
var info = getNetworkInfo(ctx);
var settings = new NetworkSettings();
for (var param : info.getBody().getNetworkInfo().getNetworkConfig().getParametersList()) {
setNetworksParam(param, settings);
}
getContext().setNetworkSettings(settings);
return settings;
}
@Override
public NodeInfo getLocalNodeInfo(CallContext ctx) {
var request = Service.LocalNodeInfoRequest.newBuilder();
RequestConstructor.addMetaHeader(request);
sign(request, getContext().getKey());
var service = deadLineAfter(netmapServiceClient, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.localNodeInfo(request.build());
Verifier.checkResponse(response);
return NodeInfoMapper.toModel(response.getBody());
}
public Service.NetworkInfoResponse getNetworkInfo(CallContext ctx) {
var request = Service.NetworkInfoRequest.newBuilder();
RequestConstructor.addMetaHeader(request);
sign(request, getContext().getKey());
var service = deadLineAfter(netmapServiceClient, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.networkInfo(request.build());
Verifier.checkResponse(response);
return response;
}
@Override
public NetmapSnapshot getNetmapSnapshot(CallContext ctx) {
var request = Service.NetmapSnapshotRequest.newBuilder();
RequestConstructor.addMetaHeader(request);
sign(request, getContext().getKey());
var service = deadLineAfter(netmapServiceClient, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.netmapSnapshot(request.build());
Verifier.checkResponse(response);
return NetmapSnapshotMapper.toModel(response);
}
}

View file

@ -1,668 +0,0 @@
package info.frostfs.sdk.services.impl;
import com.google.common.collect.Iterables;
import com.google.protobuf.ByteString;
import frostfs.object.ObjectServiceGrpc;
import frostfs.object.Service;
import frostfs.refs.Types;
import info.frostfs.sdk.constants.AppConst;
import info.frostfs.sdk.dto.object.*;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.enums.ObjectType;
import info.frostfs.sdk.exceptions.ProcessFrostFSException;
import info.frostfs.sdk.jdo.ClientEnvironment;
import info.frostfs.sdk.jdo.PutObjectResult;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.jdo.parameters.object.*;
import info.frostfs.sdk.jdo.parameters.object.patch.PrmObjectPatch;
import info.frostfs.sdk.jdo.parameters.object.patch.PrmRangeGet;
import info.frostfs.sdk.jdo.parameters.object.patch.PrmRangeHashGet;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import info.frostfs.sdk.jdo.result.ObjectHeaderResult;
import info.frostfs.sdk.mappers.container.ContainerIdMapper;
import info.frostfs.sdk.mappers.object.*;
import info.frostfs.sdk.mappers.object.patch.AddressMapper;
import info.frostfs.sdk.mappers.object.patch.RangeMapper;
import info.frostfs.sdk.services.ContextAccessor;
import info.frostfs.sdk.services.ObjectClient;
import info.frostfs.sdk.services.impl.rwhelper.*;
import info.frostfs.sdk.tools.RequestConstructor;
import info.frostfs.sdk.tools.Verifier;
import org.apache.commons.collections4.CollectionUtils;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import static info.frostfs.sdk.constants.ErrorConst.PROTO_MESSAGE_IS_EMPTY_TEMPLATE;
import static info.frostfs.sdk.tools.RequestSigner.sign;
import static info.frostfs.sdk.utils.DeadLineUtil.deadLineAfter;
import static info.frostfs.sdk.utils.Validator.validate;
import static java.util.Objects.isNull;
import static java.util.Objects.nonNull;
public class ObjectClientImpl extends ContextAccessor implements ObjectClient {
private final ObjectServiceGrpc.ObjectServiceBlockingStub objectServiceBlockingClient;
private final ObjectServiceGrpc.ObjectServiceStub objectServiceClient;
private final ObjectToolsImpl objectToolsImpl;
private final SessionToolsImpl sessionTools;
public ObjectClientImpl(ClientEnvironment clientEnvironment) {
super(clientEnvironment);
this.objectServiceBlockingClient = ObjectServiceGrpc.newBlockingStub(getContext().getChannel());
this.objectServiceClient = ObjectServiceGrpc.newStub(getContext().getChannel());
this.objectToolsImpl = new ObjectToolsImpl(clientEnvironment);
this.sessionTools = new SessionToolsImpl(clientEnvironment);
}
public SessionToken getOrCreateSession(SessionContext sessionContext, CallContext ctx) {
return isNull(sessionContext.getSessionToken())
? sessionTools.getOrCreateSession(getContext(), ctx)
: sessionContext.getSessionToken();
}
@Override
public ObjectHeaderResult getObjectHead(PrmObjectHeadGet args, CallContext ctx) {
validate(args);
var request = createHeadRequest(args, ctx);
var service = deadLineAfter(objectServiceBlockingClient, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.head(request);
Verifier.checkResponse(response);
return ObjectHeaderResult.builder()
.headerInfo(ObjectHeaderMapper.toModel(response.getBody().getHeader().getHeader()))
.splitInfo(SplitInfoMapper.toModel(response.getBody().getSplitInfo()))
.build();
}
@Override
public ObjectFrostFS getObject(PrmObjectGet args, CallContext ctx) {
validate(args);
var request = createGetRequest(args, ctx);
return getObject(request, ctx);
}
@Override
public void deleteObject(PrmObjectDelete args, CallContext ctx) {
validate(args);
var request = createDeleteRequest(args, ctx);
var service = deadLineAfter(objectServiceBlockingClient, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.delete(request);
Verifier.checkResponse(response);
}
@Override
public Iterable<ObjectId> searchObjects(PrmObjectSearch args, CallContext ctx) {
validate(args);
var request = createSearchRequest(args, ctx);
var objectsIds = searchObjects(request, ctx);
return Iterables.transform(objectsIds, input -> new ObjectId(input.getValue().toByteArray()));
}
@Override
public ObjectWriter putObject(PrmObjectPut args, CallContext ctx) {
validate(args);
return new ObjectWriter(getContext(), args, getUploadStream(args, ctx));
}
@Override
public ObjectId putClientCutObject(PrmObjectClientCutPut args, CallContext ctx) {
validate(args);
var header = args.getObjectHeader();
var fullLength = header.getPayloadLength() == 0 ? getStreamSize(args.getPayload()) : header.getPayloadLength();
args.getPutObjectContext().setFullLength(fullLength);
if (args.getPutObjectContext().getMaxObjectSizeCache() == 0) {
var networkSettings = getContext().getFrostFSClient().getNetworkSettings(ctx);
args.getPutObjectContext().setMaxObjectSizeCache(networkSettings.getMaxObjectSize().intValue());
}
var restBytes = fullLength - args.getPutObjectContext().getCurrentStreamPosition();
var objectSize = restBytes > 0
? Math.min(args.getPutObjectContext().getMaxObjectSizeCache(), restBytes)
: args.getPutObjectContext().getMaxObjectSizeCache();
//define collection capacity
var restPart = (restBytes % objectSize) > 0 ? 1 : 0;
var objectsCount = fullLength > 0 ? (int) (restBytes / objectSize) + restPart : 0;
List<ObjectId> sentObjectIds = new ArrayList<>(objectsCount);
// keep attributes for the large object
var attributes = args.getObjectHeader().getAttributes();
Split split = new Split();
args.getObjectHeader().setAttributes(new ArrayList<>());
// send all parts except the last one as separate Objects
while (restBytes > (long) args.getPutObjectContext().getMaxObjectSizeCache()) {
var previous = CollectionUtils.isNotEmpty(sentObjectIds)
? sentObjectIds.get(sentObjectIds.size() - 1)
: null;
split.setPrevious(previous);
args.getObjectHeader().setSplit(split);
var result = putMultipartStreamObject(args, ctx);
sentObjectIds.add(result.getObjectId());
restBytes -= result.getObjectSize();
}
// send the last part and create linkObject
if (CollectionUtils.isNotEmpty(sentObjectIds)) {
var largeObjectHeader = new ObjectHeader(
header.getContainerId(), ObjectType.REGULAR, attributes, fullLength, header.getVersion()
);
largeObjectHeader.setOwnerId(header.getOwnerId());
split.setParentHeader(largeObjectHeader);
var result = putMultipartStreamObject(args, ctx);
sentObjectIds.add(result.getObjectId());
var linkObject = new LinkObject(header.getContainerId(), split.getSplitId(), largeObjectHeader);
linkObject.addChildren(sentObjectIds);
putSingleObject(new PrmObjectSinglePut(linkObject), ctx);
return split.getParent();
}
// We are here if the payload is placed to one Object. It means no cut action, just simple PUT.
var singlePartResult = putMultipartStreamObject(args, ctx);
return singlePartResult.getObjectId();
}
@Override
public ObjectId putSingleObject(PrmObjectSinglePut args, CallContext ctx) {
var grpcObject = objectToolsImpl.createObject(args.getObjectFrostFS());
var request = createPutSingleRequest(grpcObject, args, ctx);
var service = deadLineAfter(objectServiceBlockingClient, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.putSingle(request);
Verifier.checkResponse(response);
return new ObjectId(grpcObject.getObjectId().getValue().toByteArray());
}
@Override
public RangeReader getRange(PrmRangeGet args, CallContext ctx) {
validate(args);
var request = createGetRangeRequest(args, ctx);
var service = deadLineAfter(objectServiceBlockingClient, ctx.getTimeout(), ctx.getTimeUnit());
return new RangeReader(service.getRange(request));
}
@Override
public byte[][] getRangeHash(PrmRangeHashGet args, CallContext ctx) {
validate(args);
var request = createGetRangeHashRequest(args, ctx);
var service = deadLineAfter(objectServiceBlockingClient, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.getRangeHash(request);
Verifier.checkResponse(response);
return response.getBody().getHashListList().stream().map(ByteString::toByteArray).toArray(byte[][]::new);
}
@Override
public ObjectId patchObject(PrmObjectPatch args, CallContext ctx) {
validate(args);
var service = deadLineAfter(objectServiceClient, ctx.getTimeout(), ctx.getTimeUnit());
PatchStreamer writer = new PatchStreamer(service);
var request = createInitPatchRequest(args, ctx);
writer.write(request.build());
if (nonNull(args.getPayload())) {
patchObjectPayload(request, args, writer);
}
var response = writer.complete();
Verifier.checkResponse(response);
return ObjectIdMapper.toModel(response.getBody().getObjectId());
}
private void patchObjectPayload(Service.PatchRequest.Builder request, PrmObjectPatch args, PatchStreamer writer) {
var currentPos = args.getRange().getOffset();
var chunkSize = args.getMaxChunkLength() > 0 ? args.getMaxChunkLength() : AppConst.OBJECT_CHUNK_SIZE;
byte[] chunkBuffer = new byte[chunkSize];
var bytesCount = readNBytes(args.getPayload(), chunkBuffer, chunkSize);
while (bytesCount > 0) {
var range = Service.Range.newBuilder()
.setOffset(currentPos)
.setLength(bytesCount)
.build();
var patch = Service.PatchRequest.Body.Patch.newBuilder()
.setChunk(ByteString.copyFrom(chunkBuffer, 0, bytesCount))
.setSourceRange(range)
.build();
var body = Service.PatchRequest.Body.newBuilder()
.setAddress(request.getBody().getAddress())
.setPatch(patch)
.build();
request.setBody(body);
RequestConstructor.addMetaHeader(request, args.getXHeaders(), request.getMetaHeader().getSessionToken());
sign(request, getContext().getKey());
writer.write(request.build());
currentPos += bytesCount;
bytesCount = readNBytes(args.getPayload(), chunkBuffer, chunkSize);
}
}
private ObjectFrostFS getObject(Service.GetRequest request, CallContext ctx) {
var reader = getObjectInit(request, ctx);
var grpcObject = reader.readHeader();
var modelObject = ObjectFrostFSMapper.toModel(grpcObject);
modelObject.setObjectReader(reader);
return modelObject;
}
private ObjectReaderImpl getObjectInit(Service.GetRequest initRequest, CallContext ctx) {
if (initRequest.getSerializedSize() == 0) {
throw new ProcessFrostFSException(
String.format(PROTO_MESSAGE_IS_EMPTY_TEMPLATE, initRequest.getClass().getName())
);
}
var service = deadLineAfter(objectServiceBlockingClient, ctx.getTimeout(), ctx.getTimeUnit());
return new ObjectReaderImpl(service.get(initRequest));
}
private PutObjectResult putMultipartStreamObject(PrmObjectClientCutPut args, CallContext ctx) {
var chunkSize = args.getBufferMaxSize() > 0 ? args.getBufferMaxSize() : AppConst.OBJECT_CHUNK_SIZE;
var restBytes =
args.getPutObjectContext().getFullLength() - args.getPutObjectContext().getCurrentStreamPosition();
chunkSize = (int) Math.min(restBytes, chunkSize);
byte[] chunkBuffer = args.getCustomerBuffer() != null
? args.getCustomerBuffer()
: new byte[chunkSize];//todo change to pool
var sentBytes = 0;
// 0 means no limit from client, so server side cut is performed
var objectLimitSize = args.getPutObjectContext().getMaxObjectSizeCache();
var stream = getUploadStream(args, ctx);
while (objectLimitSize == 0 || sentBytes < objectLimitSize) {
// send chunks limited to default or user's settings
var bufferSize = objectLimitSize > 0 ? Math.min(objectLimitSize - sentBytes, chunkSize) : chunkSize;
var bytesCount = readNBytes(args.getPayload(), chunkBuffer, bufferSize);
if (bytesCount == 0) {
break;
}
sentBytes += bytesCount;
var body = Service.PutRequest.Body.newBuilder()
.setChunk(ByteString.copyFrom(chunkBuffer, 0, bytesCount))
.build();
var chunkRequest = Service.PutRequest.newBuilder()
.setBody(body)
.clearVerifyHeader();
RequestConstructor.addMetaHeader(chunkRequest, args.getXHeaders());
sign(chunkRequest, getContext().getKey());
stream.write(chunkRequest.build());
}
var response = stream.complete();
Verifier.checkResponse(response);
var objectId = new ObjectId(response.getBody().getObjectId().getValue().toByteArray());
return new PutObjectResult(objectId, sentBytes);
}
private ObjectStreamer getUploadStream(PrmObjectPutBase args, CallContext ctx) {
var header = args.getObjectHeader();
header.setOwnerId(getContext().getOwnerId());
header.setVersion(getContext().getVersion());
var grpcHeader = ObjectHeaderMapper.toGrpcMessage(header);
if (nonNull(header.getSplit())) {
grpcHeader = objectToolsImpl.updateSplitValues(grpcHeader, header.getSplit());
}
var initRequest = createInitPutRequest(grpcHeader, args, ctx);
return putObjectInit(initRequest, ctx);
}
private ObjectStreamer putObjectInit(Service.PutRequest initRequest, CallContext ctx) {
if (initRequest.getSerializedSize() == 0) {
throw new ProcessFrostFSException(
String.format(PROTO_MESSAGE_IS_EMPTY_TEMPLATE, initRequest.getClass().getName())
);
}
var service = deadLineAfter(objectServiceClient, ctx.getTimeout(), ctx.getTimeUnit());
ObjectStreamer writer = new ObjectStreamer(service);
writer.write(initRequest);
return writer;
}
private Iterable<Types.ObjectID> searchObjects(Service.SearchRequest request, CallContext ctx) {
var reader = getSearchReader(request, ctx);
var ids = reader.read();
List<Types.ObjectID> result = new ArrayList<>();
while (CollectionUtils.isNotEmpty(ids)) {
result.addAll(ids);
ids = reader.read();
}
return result;//todo return yield
}
private SearchReader getSearchReader(Service.SearchRequest initRequest, CallContext ctx) {
if (initRequest.getSerializedSize() == 0) {
throw new ProcessFrostFSException(
String.format(PROTO_MESSAGE_IS_EMPTY_TEMPLATE, initRequest.getClass().getName())
);
}
var service = deadLineAfter(objectServiceBlockingClient, ctx.getTimeout(), ctx.getTimeUnit());
return new SearchReader(service.search(initRequest));
}
private int readNBytes(InputStream inputStream, byte[] buffer, int size) {
try {
return inputStream.readNBytes(buffer, 0, size);
} catch (IOException exp) {
throw new ProcessFrostFSException(exp.getMessage());
}
}
private long getStreamSize(InputStream inputStream) {
try {
return inputStream.available();
} catch (IOException exp) {
throw new ProcessFrostFSException(exp.getMessage());
}
}
private Service.HeadRequest createHeadRequest(PrmObjectHeadGet args, CallContext ctx) {
var address = Types.Address.newBuilder()
.setContainerId(ContainerIdMapper.toGrpcMessage(args.getContainerId()))
.setObjectId(ObjectIdMapper.toGrpcMessage(args.getObjectId()))
.build();
var body = Service.HeadRequest.Body.newBuilder()
.setAddress(address)
.setRaw(args.isRaw())
.build();
var request = Service.HeadRequest.newBuilder()
.setBody(body);
var sessionToken = getOrCreateSession(args, ctx);
var protoToken = RequestConstructor.createObjectTokenContext(
sessionToken,
address,
frostfs.session.Types.ObjectSessionContext.Verb.HEAD,
getContext().getKey()
);
RequestConstructor.addMetaHeader(request, args.getXHeaders(), protoToken);
sign(request, getContext().getKey());
return request.build();
}
private Service.GetRequest createGetRequest(PrmObjectGet args, CallContext ctx) {
var address = Types.Address.newBuilder()
.setContainerId(ContainerIdMapper.toGrpcMessage(args.getContainerId()))
.setObjectId(ObjectIdMapper.toGrpcMessage(args.getObjectId()))
.build();
var body = Service.GetRequest.Body.newBuilder()
.setAddress(address)
.build();
var request = Service.GetRequest.newBuilder()
.setBody(body);
var sessionToken = getOrCreateSession(args, ctx);
var protoToken = RequestConstructor.createObjectTokenContext(
sessionToken,
address,
frostfs.session.Types.ObjectSessionContext.Verb.GET,
getContext().getKey()
);
RequestConstructor.addMetaHeader(request, args.getXHeaders(), protoToken);
sign(request, getContext().getKey());
return request.build();
}
private Service.DeleteRequest createDeleteRequest(PrmObjectDelete args, CallContext ctx) {
var address = Types.Address.newBuilder()
.setContainerId(ContainerIdMapper.toGrpcMessage(args.getContainerId()))
.setObjectId(ObjectIdMapper.toGrpcMessage(args.getObjectId()))
.build();
var body = Service.DeleteRequest.Body.newBuilder()
.setAddress(address)
.build();
var request = Service.DeleteRequest.newBuilder()
.setBody(body);
var sessionToken = getOrCreateSession(args, ctx);
var protoToken = RequestConstructor.createObjectTokenContext(
sessionToken,
address,
frostfs.session.Types.ObjectSessionContext.Verb.DELETE,
getContext().getKey()
);
RequestConstructor.addMetaHeader(request, args.getXHeaders(), protoToken);
sign(request, getContext().getKey());
return request.build();
}
private Service.SearchRequest createSearchRequest(PrmObjectSearch args, CallContext ctx) {
var cid = ContainerIdMapper.toGrpcMessage(args.getContainerId());
var address = Types.Address.newBuilder()
.setContainerId(cid)
.build();
var body = Service.SearchRequest.Body.newBuilder()
.setContainerId(cid)
.setVersion(1);// TODO: clarify this param
for (ObjectFilter<?> filter : args.getFilters()) {
body.addFilters(ObjectFilterMapper.toGrpcMessage(filter));
}
var request = Service.SearchRequest.newBuilder()
.setBody(body.build());
var sessionToken = getOrCreateSession(args, ctx);
var protoToken = RequestConstructor.createObjectTokenContext(
sessionToken,
address,
frostfs.session.Types.ObjectSessionContext.Verb.SEARCH,
getContext().getKey()
);
RequestConstructor.addMetaHeader(request, args.getXHeaders(), protoToken);
sign(request, getContext().getKey());
return request.build();
}
private Service.PutRequest createInitPutRequest(frostfs.object.Types.Header header,
PrmObjectPutBase args,
CallContext ctx) {
var address = Types.Address.newBuilder()
.setContainerId(header.getContainerId())
.build();
var init = Service.PutRequest.Body.Init.newBuilder()
.setHeader(header)
.build();
var body = Service.PutRequest.Body.newBuilder()
.setInit(init)
.build();
var request = Service.PutRequest.newBuilder()
.setBody(body);
var sessionToken = getOrCreateSession(args, ctx);
var protoToken = RequestConstructor.createObjectTokenContext(
sessionToken,
address,
frostfs.session.Types.ObjectSessionContext.Verb.PUT,
getContext().getKey()
);
RequestConstructor.addMetaHeader(request, args.getXHeaders(), protoToken);
sign(request, getContext().getKey());
return request.build();
}
private Service.PutSingleRequest createPutSingleRequest(frostfs.object.Types.Object grpcObject,
PrmObjectSinglePut args,
CallContext ctx) {
var address = Types.Address.newBuilder()
.setContainerId(grpcObject.getHeader().getContainerId())
.build();
var body = Service.PutSingleRequest.Body.newBuilder()
.setObject(grpcObject)
.build();
var request = Service.PutSingleRequest.newBuilder()
.setBody(body);
var sessionToken = getOrCreateSession(args, ctx);
var protoToken = RequestConstructor.createObjectTokenContext(
sessionToken,
address,
frostfs.session.Types.ObjectSessionContext.Verb.PUT,
getContext().getKey()
);
RequestConstructor.addMetaHeader(request, args.getXHeaders(), protoToken);
sign(request, getContext().getKey());
return request.build();
}
private Service.GetRangeRequest createGetRangeRequest(PrmRangeGet args, CallContext ctx) {
var address = Types.Address.newBuilder()
.setContainerId(ContainerIdMapper.toGrpcMessage(args.getContainerId()))
.setObjectId(ObjectIdMapper.toGrpcMessage(args.getObjectId()))
.build();
var body = Service.GetRangeRequest.Body.newBuilder()
.setAddress(address)
.setRange(RangeMapper.toGrpcMessage(args.getRange()))
.setRaw(args.isRaw())
.build();
var request = Service.GetRangeRequest.newBuilder()
.setBody(body);
var sessionToken = getOrCreateSession(args, ctx);
var protoToken = RequestConstructor.createObjectTokenContext(
sessionToken,
address,
frostfs.session.Types.ObjectSessionContext.Verb.RANGE,
getContext().getKey()
);
RequestConstructor.addMetaHeader(request, args.getXHeaders(), protoToken);
sign(request, getContext().getKey());
return request.build();
}
private Service.GetRangeHashRequest createGetRangeHashRequest(PrmRangeHashGet args, CallContext ctx) {
var address = Types.Address.newBuilder()
.setContainerId(ContainerIdMapper.toGrpcMessage(args.getContainerId()))
.setObjectId(ObjectIdMapper.toGrpcMessage(args.getObjectId()))
.build();
var body = Service.GetRangeHashRequest.Body.newBuilder()
.setAddress(address)
.setType(Types.ChecksumType.SHA256)
.setSalt(ByteString.copyFrom(args.getSalt()))
.addAllRanges(RangeMapper.toGrpcMessages(args.getRanges()))
.build();
var request = Service.GetRangeHashRequest.newBuilder()
.setBody(body);
var sessionToken = getOrCreateSession(args, ctx);
var protoToken = RequestConstructor.createObjectTokenContext(
sessionToken,
address,
frostfs.session.Types.ObjectSessionContext.Verb.RANGEHASH,
getContext().getKey()
);
RequestConstructor.addMetaHeader(request, args.getXHeaders(), protoToken);
sign(request, getContext().getKey());
return request.build();
}
private Service.PatchRequest.Builder createInitPatchRequest(PrmObjectPatch args, CallContext ctx) {
var address = AddressMapper.toGrpcMessage(args.getAddress());
var body = Service.PatchRequest.Body.newBuilder()
.setAddress(address)
.setReplaceAttributes(args.isReplaceAttributes())
.addAllNewAttributes(ObjectAttributeMapper.toGrpcMessages(args.getNewAttributes()))
.build();
var request = Service.PatchRequest.newBuilder()
.setBody(body);
var protoToken = RequestConstructor.createObjectTokenContext(
getOrCreateSession(args, ctx),
request.getBody().getAddress(),
frostfs.session.Types.ObjectSessionContext.Verb.PATCH,
getContext().getKey()
);
RequestConstructor.addMetaHeader(request, args.getXHeaders(), protoToken);
sign(request, getContext().getKey());
return request;
}
}

View file

@ -1,126 +0,0 @@
package info.frostfs.sdk.services.impl;
import com.google.protobuf.ByteString;
import frostfs.object.Types;
import info.frostfs.sdk.dto.object.ObjectFrostFS;
import info.frostfs.sdk.dto.object.ObjectHeader;
import info.frostfs.sdk.dto.object.ObjectId;
import info.frostfs.sdk.dto.object.Split;
import info.frostfs.sdk.jdo.ClientEnvironment;
import info.frostfs.sdk.mappers.netmap.VersionMapper;
import info.frostfs.sdk.mappers.object.ObjectHeaderMapper;
import info.frostfs.sdk.mappers.object.ObjectIdMapper;
import info.frostfs.sdk.mappers.object.OwnerIdMapper;
import info.frostfs.sdk.services.ContextAccessor;
import info.frostfs.sdk.services.ToolsClient;
import org.apache.commons.collections4.CollectionUtils;
import java.util.stream.Collectors;
import static info.frostfs.sdk.Helper.getSha256;
import static info.frostfs.sdk.tools.RequestSigner.signData;
import static java.util.Objects.isNull;
import static java.util.Objects.nonNull;
public class ObjectToolsImpl extends ContextAccessor implements ToolsClient {
public ObjectToolsImpl(ClientEnvironment context) {
super(context);
}
private static frostfs.refs.Types.Checksum sha256Checksum(byte[] data) {
return frostfs.refs.Types.Checksum.newBuilder()
.setType(frostfs.refs.Types.ChecksumType.SHA256)
.setSum(ByteString.copyFrom(getSha256(data)))
.build();
}
@Override
public ObjectId calculateObjectId(ObjectHeader header) {
var grpcHeader = createHeader(header, new byte[]{});
if (nonNull(header.getSplit())) {
grpcHeader = updateSplitValues(grpcHeader, header.getSplit());
}
return ObjectIdMapper.toModel(
frostfs.refs.Types.ObjectID.newBuilder().setValue(getSha256(grpcHeader)).build()
);
}
public Types.Object createObject(ObjectFrostFS objectFrostFS) {
objectFrostFS.getHeader().setOwnerId(getContext().getOwnerId());
objectFrostFS.getHeader().setVersion(getContext().getVersion());
objectFrostFS.getHeader().setPayloadLength(objectFrostFS.getPayload().length);
var grpcHeader = ObjectHeaderMapper.toGrpcMessage(objectFrostFS.getHeader()).toBuilder()
.setPayloadHash(sha256Checksum(objectFrostFS.getPayload()))
.build();
var split = objectFrostFS.getHeader().getSplit();
if (nonNull(split)) {
grpcHeader = updateSplitValues(grpcHeader, split);
}
var objectId = frostfs.refs.Types.ObjectID.newBuilder().setValue(getSha256(grpcHeader)).build();
var sig = frostfs.refs.Types.Signature.newBuilder()
.setKey(ByteString.copyFrom(getContext().getKey().getPublicKeyByte()))
.setSign(ByteString.copyFrom(signData(getContext().getKey(), objectId.toByteArray())));
return Types.Object.newBuilder()
.setHeader(grpcHeader)
.setObjectId(objectId)
.setPayload(ByteString.copyFrom(objectFrostFS.getPayload()))
.setSignature(sig)
.build();
}
public Types.Header updateSplitValues(Types.Header grpcHeader, Split split) {
if (isNull(split)) {
return grpcHeader;
}
var grpcSplit = grpcHeader.getSplit().toBuilder()
.setSplitId(ByteString.copyFrom(split.getSplitId().toBinary()));
if (CollectionUtils.isNotEmpty(split.getChildren())) {
var grpcChildren = split.getChildren().stream()
.map(ObjectIdMapper::toGrpcMessage)
.collect(Collectors.toList());
grpcSplit.addAllChildren(grpcChildren);
}
if (nonNull(split.getParentHeader())) {
var grpcParentHeader = createHeader(split.getParentHeader(), new byte[]{});
var parentObjectId = frostfs.refs.Types.ObjectID.newBuilder().setValue(getSha256(grpcParentHeader)).build();
var signature = frostfs.refs.Types.Signature.newBuilder()
.setKey(ByteString.copyFrom(getContext().getKey().getPublicKeyByte()))
.setSign(ByteString.copyFrom(signData(getContext().getKey(), parentObjectId.toByteArray())))
.build();
grpcSplit
.setParent(parentObjectId)
.setParentHeader(grpcParentHeader)
.setParentSignature(signature);
split.setParent(ObjectIdMapper.toModel(parentObjectId));
}
if (nonNull(split.getPrevious())) {
grpcSplit.setPrevious(ObjectIdMapper.toGrpcMessage(split.getPrevious())).build();
}
return grpcHeader.toBuilder().setSplit(grpcSplit.build()).build();
}
private Types.Header createHeader(ObjectHeader header, byte[] payload) {
var grpcHeader = ObjectHeaderMapper.toGrpcMessage(header).toBuilder()
.setOwnerId(OwnerIdMapper.toGrpcMessage(getContext().getOwnerId()))
.setVersion(VersionMapper.toGrpcMessage(getContext().getVersion()));
if (payload != null) {
grpcHeader.setPayloadHash(sha256Checksum(payload));
}
return grpcHeader.build();
}
}

View file

@ -1,72 +0,0 @@
package info.frostfs.sdk.services.impl;
import frostfs.session.Service;
import frostfs.session.SessionServiceGrpc;
import frostfs.session.Types;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.ClientEnvironment;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.jdo.parameters.session.PrmSessionCreate;
import info.frostfs.sdk.mappers.object.OwnerIdMapper;
import info.frostfs.sdk.mappers.session.SessionMapper;
import info.frostfs.sdk.services.ContextAccessor;
import info.frostfs.sdk.services.SessionClient;
import info.frostfs.sdk.tools.RequestConstructor;
import info.frostfs.sdk.tools.Verifier;
import static info.frostfs.sdk.tools.RequestSigner.sign;
import static info.frostfs.sdk.utils.DeadLineUtil.deadLineAfter;
public class SessionClientImpl extends ContextAccessor implements SessionClient {
private final SessionServiceGrpc.SessionServiceBlockingStub serviceBlockingStub;
public SessionClientImpl(ClientEnvironment clientEnvironment) {
super(clientEnvironment);
this.serviceBlockingStub = SessionServiceGrpc.newBlockingStub(getContext().getChannel());
}
@Override
public SessionToken createSession(PrmSessionCreate args, CallContext ctx) {
var sessionToken = createSessionInternal(args, ctx);
var token = SessionMapper.serialize(sessionToken);
return new SessionToken(token);
}
public Types.SessionToken createSessionInternal(PrmSessionCreate args, CallContext ctx) {
var body = Service.CreateRequest.Body.newBuilder()
.setOwnerId(OwnerIdMapper.toGrpcMessage(getContext().getOwnerId()))
.setExpiration(args.getExpiration())
.build();
var request = Service.CreateRequest.newBuilder()
.setBody(body);
RequestConstructor.addMetaHeader(request);
sign(request, getContext().getKey());
return createSession(request.build(), ctx);
}
private Types.SessionToken createSession(Service.CreateRequest request, CallContext ctx) {
var service = deadLineAfter(serviceBlockingStub, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.create(request);
Verifier.checkResponse(response);
var lifetime = Types.SessionToken.Body.TokenLifetime.newBuilder()
.setExp(request.getBody().getExpiration())
.setIat(response.getMetaHeader().getEpoch())
.setNbf(response.getMetaHeader().getEpoch())
.build();
var body = Types.SessionToken.Body.newBuilder()
.setId(response.getBody().getId())
.setSessionKey(response.getBody().getSessionKey())
.setOwnerId(request.getBody().getOwnerId())
.setLifetime(lifetime)
.build();
return Types.SessionToken.newBuilder()
.setBody(body)
.build();
}
}

Some files were not shown because too many files have changed in this diff Show more