Compare commits

...

No commits in common. "master" and "empty" have entirely different histories.

257 changed files with 2 additions and 16416 deletions

View file

@ -1,21 +0,0 @@
name: DCO
on: [pull_request]
jobs:
dco:
name: DCO
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.21'
- name: Run commit format checker
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
with:
from: 'origin/${{ github.event.pull_request.base.ref }}'

View file

@ -1,22 +0,0 @@
on:
push:
workflow_dispatch:
jobs:
image:
name: Publish Maven packages
runs-on: docker
container: git.frostfs.info/truecloudlab/env:openjdk-11-maven-3.8.6
steps:
- name: Clone git repo
uses: actions/checkout@v3
- name: Publish release packages
run: mvn clean --batch-mode --update-snapshots deploy
if: >-
startsWith(github.ref, 'refs/tags/v') &&
(github.event_name == 'workflow_dispatch' || github.event_name == 'push')
env:
MAVEN_REGISTRY: TrueCloudLab
MAVEN_REGISTRY_USER: ${{secrets.MAVEN_REGISTRY_USER}}
MAVEN_REGISTRY_PASSWORD: ${{secrets.MAVEN_REGISTRY_PASSWORD}}

View file

@ -1,12 +0,0 @@
name: Verify code phase
on: [pull_request]
jobs:
verify-code:
name: Verify code
runs-on: docker
container: git.frostfs.info/truecloudlab/env:openjdk-11-maven-3.8.6
steps:
- uses: actions/checkout@v3
- name: Run the Maven verify phase
run: mvn --batch-mode --update-snapshots verify

41
.gitignore vendored
View file

@ -1,41 +0,0 @@
### Maven ###
target/
!.mvn/wrapper/maven-wrapper.jar
!**/src/main/**/target/
!**/src/test/**/target/
**/.flattened-pom.xml
### IntelliJ IDEA ###
.idea/modules.xml
.idea/jarRepositories.xml
.idea/compiler.xml
.idea/libraries/
.idea/
*.iws
*.iml
*.ipr
### Eclipse ###
.apt_generated
.classpath
.factorypath
.project
.settings
.springBeans
.sts4-cache
### NetBeans ###
/nbproject/private/
/nbbuild/
/dist/
/nbdist/
/.nb-gradle/
build/
!**/src/main/**/build/
!**/src/test/**/build/
### VS Code ###
.vscode/
### Mac OS ###
.DS_Store

View file

@ -1,37 +0,0 @@
# Changelog
## [0.9.0] - 2025-03-05
### Added
- APE rule deserializer
## [0.8.0] - 2025-03-04
### Added
- Creating client via wallet and password
## [0.7.0] - 2025-02-20
### Added
- Expanding the parameters for creating a container
### Fixed
- Creating a session for working with objects
## [0.6.0] - 2025-02-13
### Added
- APE rules serializer
## [0.5.0] - 2025-02-11
### Fixed
- Loading large objects in chunks
- .gitignore
- pom revision

View file

@ -1,3 +0,0 @@
.* @orikik
.forgejo/.* @potyarkin
Makefile @potyarkin

View file

@ -1,156 +0,0 @@
# Contribution guide
First, thank you for contributing! We love and encourage pull requests from
everyone. Please follow the guidelines:
- Check the open [issues](https://git.frostfs.info/TrueCloudLab/frostfs-sdk-java/issues) and
[pull requests](https://git.frostfs.info/TrueCloudLab/frostfs-sdk-java/pulls) for existing
discussions.
- Open an issue first, to discuss a new feature or enhancement.
- Write tests and make sure the test suite passes locally and on CI.
- Open a pull request and reference the relevant issue(s).
- Make sure your commits are logically separated and have good comments
explaining the details of your change.
- After receiving a feedback, amend your commits or add new ones as
appropriate.
- **Have fun!**
## Development Workflow
Start by forking the `frostfs-sdk-java` repository, make changes in a branch and then
send a pull request. We encourage pull requests to discuss code changes. Here
are the steps in details:
### Set up your git repository
Fork [FrostFS S3 Gateway
upstream](https://git.frostfs.info/repo/fork/346) source repository
to your own personal repository. Copy the URL of your fork (you will need it for
the `git clone` command below).
```sh
$ git clone https://git.frostfs.info/<username>/frostfs-sdk-java.git
```
### Set up git remote as ``upstream``
```sh
$ cd frostfs-sdk-java
$ git remote add upstream https://git.frostfs.info/TrueCloudLab/frostfs-sdk-java.git
$ git fetch upstream
$ git merge upstream/master
...
```
### Create your feature branch
Before making code changes, make sure you create a separate branch for these
changes. Maybe you will find it convenient to name a branch in
`<type>/<Issue>-<changes_topic>` format.
```
$ git checkout -b feature/123-something_awesome
```
### Test your changes
After your code changes, make sure
- To add test cases for the new code.
- To run `mvn clean verify`
- To squash your commits into a single commit or a series of logically separated
commits with `git rebase -i`. It's okay to force update your pull request.
- To run `mvn clean package` successfully.
### Commit changes
After verification, commit your changes. There is a [great
post](https://chris.beams.io/posts/git-commit/) on how to write useful commit
messages. Try following this template:
```
[#Issue] <component> Summary
Description
<Macros>
<Sign-Off>
```
```
$ git commit -ams '[#123] Add some feature'
```
### Push to the branch
Push your locally committed changes to the remote origin (your fork)
```
$ git push origin feature/123-something_awesome
```
### Create a Pull Request
Pull requests can be created via Forgejo. Refer to [this
document](https://docs.codeberg.org/collaborating/pull-requests-and-git-flow/) for
detailed steps on how to create a pull request. After a Pull Request gets peer
reviewed and approved, it will be merged.
## DCO Sign off
All authors to the project retain copyright to their work. However, to ensure
that they are only submitting work that they have rights to, we require
everyone to acknowledge this by signing their work.
Any copyright notices in this repository should specify the authors as "the
contributors".
To sign your work, just add a line like this at the end of your commit message:
```
Signed-off-by: Samii Sakisaka <samii@frostfs.info>
```
This can be easily done with the `--signoff` option to `git commit`.
By doing this you state that you can certify the following (from [The Developer
Certificate of Origin](https://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
1 Letterman Drive
Suite D4700
San Francisco, CA, 94129
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```

74
LICENSE
View file

@ -1,74 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
(a) You must give any other recipients of the Work or Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives.
Copyright 2024 TrueCloudLab
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

165
README.md
View file

@ -1,164 +1,3 @@
# frostfs-sdk-java
# WIP area: this repo is just a fork!
Java implementation of FrostFS SDK
## Prerequisites
### Get the key for your wallet
1. Get the address
```bash
cat <path_to_your_wallet> | jq .accounts[0].address | tr -d '"'
```
2. Get the key
```bash
neo-go wallet export -w <path_to_your_wallet> -d <address_from_p1>
```
## Example usage
### Container operations
```java
import info.frostfs.sdk.FrostFSClient;
import info.frostfs.sdk.dto.container.Container;
import info.frostfs.sdk.dto.netmap.PlacementPolicy;
import info.frostfs.sdk.dto.netmap.Replica;
import info.frostfs.sdk.jdo.ClientSettings;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerCreate;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerDelete;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerGet;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerGetAll;
public class ContainerExample {
public void example() {
var callContext = new CallContext();
ClientSettings clientSettings = new ClientSettings(<your_key>, <your_host>);
FrostFSClient frostFSClient = new FrostFSClient(clientSettings);
// Create container
var placementPolicy = new PlacementPolicy(new Replica[]{new Replica(3)}, true, 1);
var prmContainerCreate = new PrmContainerCreate(new Container(placementPolicy));
var containerId = frostFSClient.createContainer(prmContainerCreate, callContext);
// Get container
var prmContainerGet = new PrmContainerGet(containerId);
var container = frostFSClient.getContainer(prmContainerGet, callContext);
// List containers
var containerIds = frostFSClient.listContainers(new PrmContainerGetAll(), callContext);
// Delete container
var prmContainerDelete = new PrmContainerDelete(containerId);
frostFSClient.deleteContainer(prmContainerDelete, callContext);
}
}
```
### Object operations
```java
import info.frostfs.sdk.dto.object.*;
import info.frostfs.sdk.enums.ObjectType;
import info.frostfs.sdk.jdo.ClientSettings;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.jdo.parameters.object.*;
import org.apache.commons.lang3.ArrayUtils;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import static java.util.Objects.isNull;
public class ObjectExample {
public void example() {
CallContext callContext = new CallContext();
ClientSettings clientSettings = new ClientSettings(<your_key>, <your_host>);
FrostFSClient frostFSClient = new FrostFSClient(clientSettings);
// Put object
ObjectId objectId;
try (FileInputStream file = new FileInputStream("/path/to/file/cat.jpg")) {
var attribute = new ObjectAttribute("Filename", "cat.jpg");
var cat = new ObjectHeader(containerId, ObjectType.REGULAR, attribute);
var prmObjectPut = PrmObjectPut.builder().objectHeader(cat).build();
var writer = frostFSClient.putObject(prmObjectPut, callContext);
writer.write(file.readAllBytes());
objectId = writer.complete();
} catch (IOException e) {
throw new RuntimeException(e);
}
// Get object
var prmObjectGet = new PrmObjectGet(containerId, oid);
ObjectFrostFS object = frostFSClient.getObject(prmObjectGet, callContext);
var reader = object.getObjectReader();
var chunk = reader.readChunk();
var length = chunk.length;
byte[] buffer = null;
while (length > 0) {
buffer = isNull(buffer) ? chunk : ArrayHelper.concat(buffer, chunk);
chunk = object.getObjectReader().readChunk();
length = ArrayUtils.isEmpty(chunk) ? 0 : chunk.length;
}
try (FileOutputStream fos = new FileOutputStream("/path/to/file/newCat.jpg")) {
fos.write(buffer);
} catch (Exception ignored) {
}
// Get object header
var prmObjectHeadGet = new PrmObjectHeadGet(containerId, objectId);
var objectHeader = frostFSClient.getObjectHead(prmObjectHeadGet, callContext);
// Search regular objects
var prmObjectSearch = new PrmObjectSearch(containerId, new ObjectFilter.FilterByRootObject());
var objectIds = frostFSClient.searchObjects(prmObjectSearch, callContext);
// Delete object
var prmObjectDelete = new PrmObjectDelete(containerId, objectId);
frostFSClient.deleteObject(prmObjectDelete, callContext);
}
}
```
### Pool init
```java
import info.frostfs.sdk.jdo.ECDsa;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.jdo.pool.NodeParameters;
import info.frostfs.sdk.jdo.pool.PoolInitParameters;
import info.frostfs.sdk.pool.Pool;
public class PoolExample {
public static void example() {
CallContext callContext = new CallContext();
//Init
var nodeParam1 = new NodeParameters(1, <your_host1>, 1);
var nodeParam2 = new NodeParameters(1, <your_host2>, 1);
var nodeParam3 = new NodeParameters(1, <your_host3>, 1);
var nodeParam4 = new NodeParameters(1, <your_host4>, 1);
PoolInitParameters initParameters = new PoolInitParameters();
initParameters.setKey(new ECDsa(<your_key>));
initParameters.setNodeParams(new NodeParameters[]{nodeParam1, nodeParam2, nodeParam3, nodeParam4});
Pool pool = new Pool(initParameters);
//Dial (Required!)
pool.dial(callContext);
}
}
```
Useful things may be published only in [other branches](../../../branches)

View file

@ -1,84 +0,0 @@
<!DOCTYPE module PUBLIC
"-//Puppy Crawl//DTD Check Configuration 1.3//EN"
"http://www.puppycrawl.com/dtds/configuration_1_3.dtd">
<module name="Checker">
<property name="charset" value="UTF-8"/>
<property name="severity" value="error"/>
<property name="fileExtensions" value="java, properties, xml"/>
<module name="SuppressWarningsFilter" />
<module name="FileTabCharacter">
<property name="eachLine" value="true"/>
</module>
<module name="NewlineAtEndOfFile"/>
<!-- Checks for Size Violations. -->
<!-- See http://checkstyle.sf.net/config_sizes.html -->
<module name="LineLength">
<property name="max" value="120"/>
</module>
<module name="TreeWalker">
<property name="tabWidth" value="4"/>
<module name="Regexp">
<property name="message" value="Blank line at end of the block is not allowed"/>
<property name="format" value="^\s*$^\s*\}"/>
<property name="ignoreComments" value="true"/>
<property name="illegalPattern" value="true"/>
</module>
<module name="HideUtilityClassConstructor" />
<module name="SuppressWarningsHolder" />
<!-- Checks for Naming Conventions. -->
<!-- See http://checkstyle.sf.net/config_naming.html -->
<module name="AbbreviationAsWordInName">
<property name="tokens" value="METHOD_DEF,CLASS_DEF"/>
<property name="ignoreStatic" value="false"/>
</module>
<!-- Checks for imports -->
<!-- See http://checkstyle.sf.net/config_import.html -->
<module name="IllegalImport"> <!-- defaults to sun.* packages -->
</module>
<module name="RedundantImport"/>
<module name="UnusedImports"/>
<module name="ConstantName">
<property name="applyToPrivate" value="false"/>
</module>
<module name="WhitespaceAround">
<property name="allowEmptyConstructors" value="true"/>
<property name="allowEmptyMethods" value="true"/>
</module>
<module name="MethodParamPad"/>
<module name="EmptyForInitializerPad"/>
<module name="MissingOverride"/>
<module name="ParameterNumber">
<property name="ignoreOverriddenMethods" value="true"/>
</module>
<!-- Checks for blocks. You know, those {}'s -->
<!-- See http://checkstyle.sf.net/config_blocks.html -->
<module name="AvoidNestedBlocks"/>
<module name="EmptyBlock"/>
<module name="LeftCurly"/>
<module name="NeedBraces"/>
<module name="RightCurly"/>
<!-- Checks for common coding problems -->
<!-- See http://checkstyle.sf.net/config_coding.html -->
<module name="MagicNumber">
<property name="ignoreHashCodeMethod" value="true"/>
<property name="ignoreAnnotation" value="true"/>
<property name="ignoreFieldDeclaration" value="true"/>
<property name="ignoreNumbers" value="-1, 0, 1, 2, 4"/>
</module>
<module name="RequireThis"/>
<module name="DeclarationOrder"/>
</module>
</module>

View file

@ -1,63 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>info.frostfs.sdk</groupId>
<artifactId>frostfs-sdk-java</artifactId>
<version>${revision}</version>
</parent>
<artifactId>client</artifactId>
<properties>
<maven.compiler.source>11</maven.compiler.source>
<maven.compiler.target>11</maven.compiler.target>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
</properties>
<dependencies>
<dependency>
<groupId>info.frostfs.sdk</groupId>
<artifactId>cryptography</artifactId>
<version>${revision}</version>
</dependency>
<dependency>
<groupId>info.frostfs.sdk</groupId>
<artifactId>models</artifactId>
<version>${revision}</version>
</dependency>
<dependency>
<groupId>info.frostfs.sdk</groupId>
<artifactId>exceptions</artifactId>
<version>${revision}</version>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<version>1.17.0</version>
</dependency>
<!-- Prometheus instrumentation -->
<dependency>
<groupId>io.prometheus</groupId>
<artifactId>simpleclient</artifactId>
<version>0.16.0</version>
</dependency>
<dependency>
<groupId>io.prometheus</groupId>
<artifactId>simpleclient_hotspot</artifactId>
<version>0.16.0</version>
</dependency>
<dependency>
<groupId>io.prometheus</groupId>
<artifactId>simpleclient_common</artifactId>
<version>0.16.0</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>2.0.16</version>
</dependency>
</dependencies>
</project>

View file

@ -1,248 +0,0 @@
package info.frostfs.sdk;
import frostfs.accounting.Types;
import info.frostfs.sdk.dto.ape.Chain;
import info.frostfs.sdk.dto.container.Container;
import info.frostfs.sdk.dto.container.ContainerId;
import info.frostfs.sdk.dto.netmap.NetmapSnapshot;
import info.frostfs.sdk.dto.netmap.NodeInfo;
import info.frostfs.sdk.dto.netmap.Version;
import info.frostfs.sdk.dto.object.ObjectFrostFS;
import info.frostfs.sdk.dto.object.ObjectHeader;
import info.frostfs.sdk.dto.object.ObjectId;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.exceptions.ProcessFrostFSException;
import info.frostfs.sdk.jdo.ClientEnvironment;
import info.frostfs.sdk.jdo.ClientSettings;
import info.frostfs.sdk.jdo.ECDsa;
import info.frostfs.sdk.jdo.NetworkSettings;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.jdo.parameters.ape.PrmApeChainAdd;
import info.frostfs.sdk.jdo.parameters.ape.PrmApeChainList;
import info.frostfs.sdk.jdo.parameters.ape.PrmApeChainRemove;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerCreate;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerDelete;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerGet;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerGetAll;
import info.frostfs.sdk.jdo.parameters.object.*;
import info.frostfs.sdk.jdo.parameters.object.patch.PrmObjectPatch;
import info.frostfs.sdk.jdo.parameters.object.patch.PrmRangeGet;
import info.frostfs.sdk.jdo.parameters.object.patch.PrmRangeHashGet;
import info.frostfs.sdk.jdo.parameters.session.PrmSessionCreate;
import info.frostfs.sdk.jdo.result.ObjectHeaderResult;
import info.frostfs.sdk.pool.SessionCache;
import info.frostfs.sdk.pool.WrapperPrm;
import info.frostfs.sdk.services.CommonClient;
import info.frostfs.sdk.services.impl.*;
import info.frostfs.sdk.services.impl.interceptor.Configuration;
import info.frostfs.sdk.services.impl.interceptor.MonitoringClientInterceptor;
import info.frostfs.sdk.services.impl.rwhelper.ObjectWriter;
import info.frostfs.sdk.services.impl.rwhelper.RangeReader;
import info.frostfs.sdk.utils.Validator;
import io.grpc.Channel;
import io.grpc.ClientInterceptors;
import io.grpc.ManagedChannel;
import org.apache.commons.lang3.StringUtils;
import java.util.List;
import static info.frostfs.sdk.constants.ErrorConst.VERSION_UNSUPPORTED_TEMPLATE;
import static info.frostfs.sdk.tools.GrpcClient.initGrpcChannel;
import static java.util.Objects.nonNull;
public class FrostFSClient implements CommonClient {
private static final MonitoringClientInterceptor MONITORING_CLIENT_INTERCEPTOR =
MonitoringClientInterceptor.create(Configuration.allMetrics());
private final ContainerClientImpl containerClientImpl;
private final ObjectClientImpl objectClientImpl;
private final ApeManagerClientImpl apeManagerClient;
private final NetmapClientImpl netmapClientImpl;
private final SessionClientImpl sessionClientImpl;
private final ObjectToolsImpl objectToolsImpl;
private final AccountingClientImpl accountingClient;
private final ManagedChannel channel;
public FrostFSClient(ClientSettings clientSettings) {
Validator.validate(clientSettings);
this.channel = nonNull(clientSettings.getChannel())
? clientSettings.getChannel()
: initGrpcChannel(clientSettings);
var ecdsa = StringUtils.isBlank(clientSettings.getWif())
? new ECDsa(clientSettings.getWallet(), clientSettings.getPassword())
: new ECDsa(clientSettings.getWif());
Channel interceptChannel = ClientInterceptors.intercept(channel, MONITORING_CLIENT_INTERCEPTOR);
ClientEnvironment clientEnvironment = new ClientEnvironment(
ecdsa, interceptChannel, new Version(), this, new SessionCache(0)
);
Validator.validate(clientEnvironment);
this.containerClientImpl = new ContainerClientImpl(clientEnvironment);
this.objectClientImpl = new ObjectClientImpl(clientEnvironment);
this.apeManagerClient = new ApeManagerClientImpl(clientEnvironment);
this.netmapClientImpl = new NetmapClientImpl(clientEnvironment);
this.sessionClientImpl = new SessionClientImpl(clientEnvironment);
this.objectToolsImpl = new ObjectToolsImpl(clientEnvironment);
this.accountingClient = new AccountingClientImpl(clientEnvironment);
checkFrostFSVersionSupport(clientEnvironment.getVersion());
}
public FrostFSClient(WrapperPrm prm, SessionCache cache) {
this.channel = initGrpcChannel(prm.getAddress());
Channel interceptChannel = ClientInterceptors.intercept(channel, MONITORING_CLIENT_INTERCEPTOR);
ClientEnvironment clientEnvironment =
new ClientEnvironment(prm.getKey(), interceptChannel, new Version(), this, cache);
Validator.validate(clientEnvironment);
this.containerClientImpl = new ContainerClientImpl(clientEnvironment);
this.objectClientImpl = new ObjectClientImpl(clientEnvironment);
this.apeManagerClient = new ApeManagerClientImpl(clientEnvironment);
this.netmapClientImpl = new NetmapClientImpl(clientEnvironment);
this.sessionClientImpl = new SessionClientImpl(clientEnvironment);
this.objectToolsImpl = new ObjectToolsImpl(clientEnvironment);
this.accountingClient = new AccountingClientImpl(clientEnvironment);
checkFrostFSVersionSupport(clientEnvironment.getVersion());
}
private void checkFrostFSVersionSupport(Version version) {
var localNodeInfo = netmapClientImpl.getLocalNodeInfo(new CallContext());
if (!localNodeInfo.getVersion().isSupported(version)) {
throw new ProcessFrostFSException(
String.format(VERSION_UNSUPPORTED_TEMPLATE, localNodeInfo.getVersion())
);
}
}
@Override
public Container getContainer(PrmContainerGet args, CallContext ctx) {
return containerClientImpl.getContainer(args, ctx);
}
@Override
public List<ContainerId> listContainers(PrmContainerGetAll args, CallContext ctx) {
return containerClientImpl.listContainers(args, ctx);
}
@Override
public ContainerId createContainer(PrmContainerCreate args, CallContext ctx) {
return containerClientImpl.createContainer(args, ctx);
}
@Override
public void deleteContainer(PrmContainerDelete args, CallContext ctx) {
containerClientImpl.deleteContainer(args, ctx);
}
@Override
public ObjectHeaderResult getObjectHead(PrmObjectHeadGet args, CallContext ctx) {
return objectClientImpl.getObjectHead(args, ctx);
}
@Override
public ObjectFrostFS getObject(PrmObjectGet args, CallContext ctx) {
return objectClientImpl.getObject(args, ctx);
}
@Override
public ObjectWriter putObject(PrmObjectPut args, CallContext ctx) {
return objectClientImpl.putObject(args, ctx);
}
@Override
public ObjectId putClientCutObject(PrmObjectClientCutPut args, CallContext ctx) {
return objectClientImpl.putClientCutObject(args, ctx);
}
@Override
public ObjectId putSingleObject(PrmObjectSinglePut args, CallContext ctx) {
return objectClientImpl.putSingleObject(args, ctx);
}
@Override
public void deleteObject(PrmObjectDelete args, CallContext ctx) {
objectClientImpl.deleteObject(args, ctx);
}
@Override
public Iterable<ObjectId> searchObjects(PrmObjectSearch args, CallContext ctx) {
return objectClientImpl.searchObjects(args, ctx);
}
@Override
public RangeReader getRange(PrmRangeGet args, CallContext ctx) {
return objectClientImpl.getRange(args, ctx);
}
@Override
public byte[][] getRangeHash(PrmRangeHashGet args, CallContext ctx) {
return objectClientImpl.getRangeHash(args, ctx);
}
@Override
public ObjectId patchObject(PrmObjectPatch args, CallContext ctx) {
return objectClientImpl.patchObject(args, ctx);
}
@Override
public byte[] addChain(PrmApeChainAdd args, CallContext ctx) {
return apeManagerClient.addChain(args, ctx);
}
@Override
public void removeChain(PrmApeChainRemove args, CallContext ctx) {
apeManagerClient.removeChain(args, ctx);
}
@Override
public List<Chain> listChains(PrmApeChainList args, CallContext ctx) {
return apeManagerClient.listChains(args, ctx);
}
@Override
public NetmapSnapshot getNetmapSnapshot(CallContext ctx) {
return netmapClientImpl.getNetmapSnapshot(ctx);
}
@Override
public NodeInfo getLocalNodeInfo(CallContext ctx) {
return netmapClientImpl.getLocalNodeInfo(ctx);
}
@Override
public NetworkSettings getNetworkSettings(CallContext ctx) {
return netmapClientImpl.getNetworkSettings(ctx);
}
@Override
public SessionToken createSession(PrmSessionCreate args, CallContext ctx) {
return sessionClientImpl.createSession(args, ctx);
}
public frostfs.session.Types.SessionToken createSessionInternal(PrmSessionCreate args, CallContext ctx) {
return sessionClientImpl.createSessionInternal(args, ctx);
}
@Override
public ObjectId calculateObjectId(ObjectHeader header) {
return objectToolsImpl.calculateObjectId(header);
}
@Override
public Types.Decimal getBalance(CallContext ctx) {
return accountingClient.getBalance(ctx);
}
@Override
public String dial(CallContext ctx) {
accountingClient.getBalance(ctx);
return null;
}
public void close() {
channel.shutdown();
}
}

View file

@ -1,14 +0,0 @@
package info.frostfs.sdk.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.TYPE, ElementType.ANNOTATION_TYPE})
public @interface AtLeastOneIsFilled {
String message() default "At least one of the fields (%s) must be filled in";
String[] fields();
}

View file

@ -1,12 +0,0 @@
package info.frostfs.sdk.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.TYPE, ElementType.ANNOTATION_TYPE})
public @interface ComplexAtLeastOneIsFilled {
AtLeastOneIsFilled[] value();
}

View file

@ -1,11 +0,0 @@
package info.frostfs.sdk.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.FIELD)
public @interface NotBlank {
}

View file

@ -1,11 +0,0 @@
package info.frostfs.sdk.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.FIELD)
public @interface NotNull {
}

View file

@ -1,11 +0,0 @@
package info.frostfs.sdk.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.FIELD)
public @interface Validate {
}

View file

@ -1,10 +0,0 @@
package info.frostfs.sdk.constants;
public class CryptoConst {
public static final String SIGNATURE_ALGORITHM = "NONEwithECDSAinP1363Format";
public static final int RFC6979_SIGNATURE_SIZE = 64;
public static final int HASH_SIGNATURE_SIZE = 65;
private CryptoConst() {
}
}

View file

@ -1,14 +0,0 @@
package info.frostfs.sdk.constants;
public class PoolConst {
public static final int DEFAULT_SESSION_TOKEN_EXPIRATION_DURATION = 100; // in epochs
public static final int DEFAULT_ERROR_THRESHOLD = 100;
public static final int DEFAULT_GRACEFUL_CLOSE_ON_SWITCH_TIMEOUT = 10; // Seconds
public static final int DEFAULT_REBALANCE_INTERVAL = 15; // Seconds
public static final int DEFAULT_HEALTHCHECK_TIMEOUT = 4; // Seconds
public static final int DEFAULT_DIAL_TIMEOUT = 5; // Seconds
public static final int DEFAULT_STREAM_TIMEOUT = 10; // Seconds
private PoolConst() {
}
}

View file

@ -1,30 +0,0 @@
package info.frostfs.sdk.constants;
public class RuleConst {
public static final byte VERSION = 0;
public static final int BYTE_SIZE = 1;
public static final int U_INT_8_SIZE = BYTE_SIZE;
public static final int BOOL_SIZE = BYTE_SIZE;
public static final long NULL_SLICE = -1L;
public static final int NULL_SLICE_SIZE = 1;
public static final byte BYTE_TRUE = 1;
public static final byte BYTE_FALSE = 0;
// maxSliceLen taken from
// https://github.com/neo-project/neo/blob/38218bbee5bbe8b33cd8f9453465a19381c9a547/src/Neo/IO/Helper.cs#L77
public static final int MAX_SLICE_LENGTH = 0x1000000;
public static final int MAX_VAR_INT_LENGTH = 10;
public static final int CHAIN_MARSHAL_VERSION = 0;
public static final long OFFSET127 = 0x7f;
public static final long OFFSET128 = 0x80;
public static final int UNSIGNED_SERIALIZE_SIZE = 7;
private RuleConst() {
}
}

View file

@ -1,23 +0,0 @@
package info.frostfs.sdk.enums;
public enum HealthyStatus {
// status HEALTHY is set when connection is ready to be used by the pool.
HEALTHY(1),
// status UNHEALTHY_ON_REQUEST is set when communication after dialing to the
// endpoint is failed due to immediate or accumulated errors, connection is
// available and pool should close it before re-establishing connection once again.
UNHEALTHY_ON_REQUEST(2),
// status UNHEALTHY_ON_DIAL is set when dialing to the endpoint is failed,
// so there is no connection to the endpoint, and pool should not close it
// before re-establishing connection once again.
UNHEALTHY_ON_DIAL(3),
;
public final int value;
HealthyStatus(int value) {
this.value = value;
}
}

View file

@ -1,29 +0,0 @@
package info.frostfs.sdk.enums;
public enum MethodIndex {
METHOD_BALANCE_GET("balanceGet"),
METHOD_CONTAINER_PUT("containerPut"),
METHOD_CONTAINER_GET("ContainerGet"),
METHOD_CONTAINER_LIST("ContainerList"),
METHOD_CONTAINER_DELETE("ContainerDelete"),
METHOD_ENDPOINT_INFO("EndpointInfo"),
METHOD_NETWORK_INFO("NetworkInfo"),
METHOD_NETMAP_SNAPSHOT("NetMapSnapshot"),
METHOD_OBJECT_PUT("ObjectPut"),
METHOD_OBJECT_DELETE("ObjectDelete"),
METHOD_OBJECT_GET("ObjectGet"),
METHOD_OBJECT_HEAD("ObjectHead"),
METHOD_OBJECT_RANGE("ObjectRange"),
METHOD_OBJECT_PATCH("ObjectPatch"),
METHOD_SESSION_CREATE("SessionCreate"),
METHOD_APE_MANAGER_ADD_CHAIN("APEManagerAddChain"),
METHOD_APE_MANAGER_REMOVE_CHAIN("APEManagerRemoveChain"),
METHOD_APE_MANAGER_LIST_CHAINS("APEManagerListChains"),
;
public final String methodName;
MethodIndex(String methodName) {
this.methodName = methodName;
}
}

View file

@ -1,6 +0,0 @@
package info.frostfs.sdk.enums;
public enum WaitExpects {
EXISTS,
REMOVED
}

View file

@ -1,19 +0,0 @@
package info.frostfs.sdk.exceptions;
import info.frostfs.sdk.dto.response.ResponseStatus;
import lombok.Getter;
@Getter
public class ResponseFrostFSException extends FrostFSException {
private final ResponseStatus status;
public ResponseFrostFSException(ResponseStatus status) {
super(status.toString());
this.status = status;
}
public ResponseFrostFSException(String message) {
super(message);
this.status = null;
}
}

View file

@ -1,57 +0,0 @@
package info.frostfs.sdk.jdo;
import info.frostfs.sdk.FrostFSClient;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.annotations.Validate;
import info.frostfs.sdk.dto.netmap.Version;
import info.frostfs.sdk.dto.object.OwnerId;
import info.frostfs.sdk.pool.SessionCache;
import io.grpc.Channel;
import lombok.Getter;
import lombok.Setter;
import org.apache.commons.lang3.StringUtils;
import static info.frostfs.sdk.Helper.getHexString;
import static info.frostfs.sdk.pool.Pool.formCacheKey;
@Getter
@Setter
public class ClientEnvironment {
@NotNull
private final OwnerId ownerId;
@NotNull
private final Version version;
@NotNull
@Validate
private final ECDsa key;
@NotNull
private final Channel channel;
@NotNull
private final FrostFSClient frostFSClient;
private String sessionKey;
private String address;
private NetworkSettings networkSettings;
private SessionCache sessionCache;
public ClientEnvironment(ECDsa key, Channel channel, Version version, FrostFSClient frostFSClient,
SessionCache sessionCache) {
this.key = key;
this.ownerId = new OwnerId(key.getAccount().getAddress());
this.version = version;
this.channel = channel;
this.frostFSClient = frostFSClient;
this.sessionCache = sessionCache;
this.address = channel.authority();
}
public String getSessionKey() {
if (StringUtils.isBlank(sessionKey)) {
this.sessionKey = formCacheKey(address, getHexString(key.getPublicKeyByte()));
}
return sessionKey;
}
}

View file

@ -1,61 +0,0 @@
package info.frostfs.sdk.jdo;
import info.frostfs.sdk.annotations.AtLeastOneIsFilled;
import info.frostfs.sdk.annotations.ComplexAtLeastOneIsFilled;
import io.grpc.ChannelCredentials;
import io.grpc.ManagedChannel;
import lombok.Getter;
import lombok.experimental.FieldNameConstants;
import java.io.File;
@Getter
@FieldNameConstants
@ComplexAtLeastOneIsFilled(value = {
@AtLeastOneIsFilled(fields = {ClientSettings.Fields.host, ClientSettings.Fields.channel}),
@AtLeastOneIsFilled(fields = {ClientSettings.Fields.wif, ClientSettings.Fields.wallet}),
})
public class ClientSettings {
private String wif;
private File wallet;
private String password;
private String host;
private ChannelCredentials credentials;
private ManagedChannel channel;
public ClientSettings(String wif, String host) {
this.wif = wif;
this.host = host;
}
public ClientSettings(String wif, String host, ChannelCredentials credentials) {
this.wif = wif;
this.host = host;
this.credentials = credentials;
}
public ClientSettings(String wif, ManagedChannel channel) {
this.wif = wif;
this.channel = channel;
}
public ClientSettings(File wallet, String password, String host) {
this.wallet = wallet;
this.password = password;
this.host = host;
}
public ClientSettings(File wallet, String password, String host, ChannelCredentials credentials) {
this.wallet = wallet;
this.password = password;
this.host = host;
this.credentials = credentials;
}
public ClientSettings(File wallet, String password, ManagedChannel channel) {
this.wallet = wallet;
this.password = password;
this.channel = channel;
}
}

View file

@ -1,73 +0,0 @@
package info.frostfs.sdk.jdo;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.exceptions.FrostFSException;
import info.frostfs.sdk.exceptions.ValidationFrostFSException;
import io.neow3j.wallet.Account;
import io.neow3j.wallet.nep6.NEP6Account;
import io.neow3j.wallet.nep6.NEP6Wallet;
import lombok.Getter;
import org.apache.commons.lang3.StringUtils;
import java.io.File;
import java.io.FileInputStream;
import java.security.PrivateKey;
import java.util.Optional;
import static info.frostfs.sdk.KeyExtension.loadPrivateKey;
import static info.frostfs.sdk.constants.ErrorConst.WALLET_IS_INVALID;
import static info.frostfs.sdk.constants.ErrorConst.WIF_IS_INVALID;
import static info.frostfs.sdk.constants.FieldConst.EMPTY_STRING;
import static io.neow3j.wallet.Wallet.OBJECT_MAPPER;
import static java.util.Objects.isNull;
@Getter
public class ECDsa {
@NotNull
private final byte[] publicKeyByte;
@NotNull
private final byte[] privateKeyByte;
@NotNull
private final PrivateKey privateKey;
@NotNull
private final Account account;
public ECDsa(String wif) {
if (StringUtils.isEmpty(wif)) {
throw new ValidationFrostFSException(WIF_IS_INVALID);
}
this.account = Account.fromWIF(wif);
this.privateKeyByte = account.getECKeyPair().getPrivateKey().getBytes();
this.publicKeyByte = account.getECKeyPair().getPublicKey().getEncoded(true);
this.privateKey = loadPrivateKey(privateKeyByte);
}
public ECDsa(File walletFile, String password) {
if (isNull(walletFile)) {
throw new ValidationFrostFSException(WALLET_IS_INVALID);
}
try (var walletStream = new FileInputStream(walletFile)) {
NEP6Wallet nep6Wallet = OBJECT_MAPPER.readValue(walletStream, NEP6Wallet.class);
Optional<NEP6Account> defaultAccount = nep6Wallet.getAccounts().stream()
.filter(NEP6Account::getDefault)
.findFirst();
var account = defaultAccount.map(Account::fromNEP6Account)
.orElseGet(() -> Account.fromNEP6Account(nep6Wallet.getAccounts().get(0)));
account.decryptPrivateKey(isNull(password) ? EMPTY_STRING : password);
this.account = account;
this.privateKeyByte = account.getECKeyPair().getPrivateKey().getBytes();
this.publicKeyByte = account.getECKeyPair().getPublicKey().getEncoded(true);
this.privateKey = loadPrivateKey(privateKeyByte);
} catch (Exception exp) {
throw new FrostFSException(exp.getMessage());
}
}
}

View file

@ -1,28 +0,0 @@
package info.frostfs.sdk.jdo;
import lombok.Getter;
import lombok.Setter;
import java.util.HashMap;
import java.util.Map;
@Getter
@Setter
public class NetworkSettings {
private Long auditFee;
private Long basicIncomeRate;
private Long containerFee;
private Long containerAliasFee;
private Long innerRingCandidateFee;
private Long withdrawFee;
private Long epochDuration;
private Long iRCandidateFee;
private Long maxObjectSize;
private Long maxECDataCount;
private Long maxECParityCount;
private Long withdrawalFee;
private Boolean homomorphicHashingDisabled;
private Boolean maintenanceModeAllowed;
private Map<String, Object> unnamedSettings = new HashMap<>();
}

View file

@ -1,13 +0,0 @@
package info.frostfs.sdk.jdo;
import info.frostfs.sdk.dto.object.ObjectId;
import lombok.AllArgsConstructor;
import lombok.Getter;
@Getter
@AllArgsConstructor
public class PutObjectResult {
private final ObjectId objectId;
private final int objectSize;
}

View file

@ -1,23 +0,0 @@
package info.frostfs.sdk.jdo.parameters;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.concurrent.TimeUnit;
import static info.frostfs.sdk.constants.AppConst.DEFAULT_GRPC_TIMEOUT;
@Getter
@Builder
@AllArgsConstructor
public class CallContext {
private final long timeout;
private final TimeUnit timeUnit;
public CallContext() {
this.timeout = DEFAULT_GRPC_TIMEOUT;
this.timeUnit = TimeUnit.SECONDS;
}
}

View file

@ -1,26 +0,0 @@
package info.frostfs.sdk.jdo.parameters;
import lombok.AllArgsConstructor;
import lombok.Getter;
import java.time.Duration;
import java.time.LocalDateTime;
@Getter
@AllArgsConstructor
public class PrmWait {
private static final Duration DEFAULT_TIMEOUT = Duration.ofSeconds(120);
private static final Duration DEFAULT_POLL_INTERVAL = Duration.ofSeconds(5);
private final Duration timeout;
private final Duration pollInterval;
public PrmWait() {
this.timeout = DEFAULT_TIMEOUT;
this.pollInterval = DEFAULT_POLL_INTERVAL;
}
public LocalDateTime getDeadline() {
return LocalDateTime.now().plus(timeout);
}
}

View file

@ -1,27 +0,0 @@
package info.frostfs.sdk.jdo.parameters.ape;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.ape.Chain;
import info.frostfs.sdk.dto.chain.ChainTarget;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmApeChainAdd {
@NotNull
private Chain chain;
@NotNull
private ChainTarget chainTarget;
private Map<String, String> xHeaders;
public PrmApeChainAdd(Chain chain, ChainTarget chainTarget) {
this.chain = chain;
this.chainTarget = chainTarget;
}
}

View file

@ -1,23 +0,0 @@
package info.frostfs.sdk.jdo.parameters.ape;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.chain.ChainTarget;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmApeChainList {
@NotNull
private ChainTarget chainTarget;
private Map<String, String> xHeaders;
public PrmApeChainList(ChainTarget chainTarget) {
this.chainTarget = chainTarget;
}
}

View file

@ -1,27 +0,0 @@
package info.frostfs.sdk.jdo.parameters.ape;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.chain.ChainTarget;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmApeChainRemove {
@NotNull
private byte[] chainId;
@NotNull
private ChainTarget chainTarget;
private Map<String, String> xHeaders;
public PrmApeChainRemove(byte[] chainId, ChainTarget chainTarget) {
this.chainId = chainId;
this.chainTarget = chainTarget;
}
}

View file

@ -1,33 +0,0 @@
package info.frostfs.sdk.jdo.parameters.container;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.container.Container;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.parameters.PrmWait;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmContainerCreate implements SessionContext {
@NotNull
private Container container;
private PrmWait waitParams;
private SessionToken sessionToken;
private Map<String, String> xHeaders;
public PrmContainerCreate(Container container, PrmWait waitParams) {
this.container = container;
this.waitParams = waitParams;
}
public PrmContainerCreate(Container container) {
this.container = container;
}
}

View file

@ -1,33 +0,0 @@
package info.frostfs.sdk.jdo.parameters.container;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.container.ContainerId;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.parameters.PrmWait;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmContainerDelete implements SessionContext {
@NotNull
private ContainerId containerId;
private PrmWait waitParams;
private SessionToken sessionToken;
private Map<String, String> xHeaders;
public PrmContainerDelete(ContainerId containerId, PrmWait waitParams) {
this.containerId = containerId;
this.waitParams = waitParams;
}
public PrmContainerDelete(ContainerId containerId) {
this.containerId = containerId;
}
}

View file

@ -1,23 +0,0 @@
package info.frostfs.sdk.jdo.parameters.container;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.container.ContainerId;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmContainerGet {
@NotNull
private ContainerId containerId;
private Map<String, String> xHeaders;
public PrmContainerGet(ContainerId containerId) {
this.containerId = containerId;
}
}

View file

@ -1,16 +0,0 @@
package info.frostfs.sdk.jdo.parameters.container;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import lombok.NoArgsConstructor;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
@NoArgsConstructor
public class PrmContainerGetAll {
private Map<String, String> xHeaders;
}

View file

@ -1,28 +0,0 @@
package info.frostfs.sdk.jdo.parameters.object;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.object.ObjectHeader;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.io.InputStream;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmObjectClientCutPut implements PrmObjectPutBase, SessionContext {
@NotNull
private final PutObjectContext putObjectContext = new PutObjectContext();
@NotNull
private ObjectHeader objectHeader;
@NotNull
private InputStream payload;
private int bufferMaxSize;
private byte[] customerBuffer;
private SessionToken sessionToken;
private Map<String, String> xHeaders;
}

View file

@ -1,30 +0,0 @@
package info.frostfs.sdk.jdo.parameters.object;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.container.ContainerId;
import info.frostfs.sdk.dto.object.ObjectId;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmObjectDelete implements SessionContext {
@NotNull
private ContainerId containerId;
@NotNull
private ObjectId objectId;
private SessionToken sessionToken;
private Map<String, String> xHeaders;
public PrmObjectDelete(ContainerId containerId, ObjectId objectId) {
this.containerId = containerId;
this.objectId = objectId;
}
}

View file

@ -1,30 +0,0 @@
package info.frostfs.sdk.jdo.parameters.object;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.container.ContainerId;
import info.frostfs.sdk.dto.object.ObjectId;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmObjectGet implements SessionContext {
@NotNull
private ContainerId containerId;
@NotNull
private ObjectId objectId;
private SessionToken sessionToken;
private Map<String, String> xHeaders;
public PrmObjectGet(ContainerId containerId, ObjectId objectId) {
this.containerId = containerId;
this.objectId = objectId;
}
}

View file

@ -1,31 +0,0 @@
package info.frostfs.sdk.jdo.parameters.object;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.container.ContainerId;
import info.frostfs.sdk.dto.object.ObjectId;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmObjectHeadGet implements SessionContext {
@NotNull
private ContainerId containerId;
@NotNull
private ObjectId objectId;
private boolean raw;
private SessionToken sessionToken;
private Map<String, String> xHeaders;
public PrmObjectHeadGet(ContainerId containerId, ObjectId objectId) {
this.containerId = containerId;
this.objectId = objectId;
}
}

View file

@ -1,28 +0,0 @@
package info.frostfs.sdk.jdo.parameters.object;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.object.ObjectHeader;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmObjectPut implements PrmObjectPutBase, SessionContext {
@NotNull
private final PutObjectContext putObjectContext = new PutObjectContext();
@NotNull
private ObjectHeader objectHeader;
private SessionToken sessionToken;
private Map<String, String> xHeaders;
public PrmObjectPut(ObjectHeader objectHeader) {
this.objectHeader = objectHeader;
}
}

View file

@ -1,12 +0,0 @@
package info.frostfs.sdk.jdo.parameters.object;
import info.frostfs.sdk.dto.object.ObjectHeader;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import java.util.Map;
public interface PrmObjectPutBase extends SessionContext {
ObjectHeader getObjectHeader();
Map<String, String> getXHeaders();
}

View file

@ -1,30 +0,0 @@
package info.frostfs.sdk.jdo.parameters.object;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.container.ContainerId;
import info.frostfs.sdk.dto.object.ObjectFilter;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmObjectSearch implements SessionContext {
@NotNull
private ContainerId containerId;
@NotNull
private ObjectFilter<?>[] filters;
private SessionToken sessionToken;
private Map<String, String> xHeaders;
public PrmObjectSearch(ContainerId containerId, ObjectFilter<?>... filters) {
this.containerId = containerId;
this.filters = filters;
}
}

View file

@ -1,26 +0,0 @@
package info.frostfs.sdk.jdo.parameters.object;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.object.ObjectFrostFS;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmObjectSinglePut implements SessionContext {
@NotNull
private ObjectFrostFS objectFrostFS;
private SessionToken sessionToken;
private Map<String, String> xHeaders;
public PrmObjectSinglePut(ObjectFrostFS objectFrostFS) {
this.objectFrostFS = objectFrostFS;
}
}

View file

@ -1,14 +0,0 @@
package info.frostfs.sdk.jdo.parameters.object;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Setter;
@Getter
@Setter
@NoArgsConstructor
public class PutObjectContext {
private int maxObjectSizeCache;
private long currentStreamPosition;
private long fullLength;
}

View file

@ -1,42 +0,0 @@
package info.frostfs.sdk.jdo.parameters.object.patch;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.object.ObjectAttribute;
import info.frostfs.sdk.dto.object.patch.Address;
import info.frostfs.sdk.dto.object.patch.Range;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.io.InputStream;
import java.util.List;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmObjectPatch implements SessionContext {
@NotNull
private Address address;
@NotNull
private Range range;
@NotNull
private InputStream payload;
private List<ObjectAttribute> newAttributes;
private boolean replaceAttributes;
private int maxChunkLength;
private SessionToken sessionToken;
private Map<String, String> xHeaders;
public PrmObjectPatch(Address address, Range range, InputStream payload, int maxChunkLength) {
this.address = address;
this.range = range;
this.payload = payload;
this.maxChunkLength = maxChunkLength;
}
}

View file

@ -1,35 +0,0 @@
package info.frostfs.sdk.jdo.parameters.object.patch;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.container.ContainerId;
import info.frostfs.sdk.dto.object.ObjectId;
import info.frostfs.sdk.dto.object.patch.Range;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmRangeGet implements SessionContext {
@NotNull
private ContainerId containerId;
@NotNull
private ObjectId objectId;
@NotNull
private Range range;
private boolean raw;
private SessionToken sessionToken;
private Map<String, String> xHeaders;
public PrmRangeGet(ContainerId containerId, ObjectId objectId, Range range) {
this.containerId = containerId;
this.objectId = objectId;
this.range = range;
}
}

View file

@ -1,38 +0,0 @@
package info.frostfs.sdk.jdo.parameters.object.patch;
import info.frostfs.sdk.annotations.NotNull;
import info.frostfs.sdk.dto.container.ContainerId;
import info.frostfs.sdk.dto.object.ObjectId;
import info.frostfs.sdk.dto.object.patch.Range;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.List;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmRangeHashGet implements SessionContext {
@NotNull
private ContainerId containerId;
@NotNull
private ObjectId objectId;
@NotNull
private List<Range> ranges;
@NotNull
private byte[] salt;
private SessionToken sessionToken;
private Map<String, String> xHeaders;
public PrmRangeHashGet(ContainerId containerId, ObjectId objectId, List<Range> ranges, byte[] salt) {
this.containerId = containerId;
this.objectId = objectId;
this.ranges = ranges;
this.salt = salt;
}
}

View file

@ -1,19 +0,0 @@
package info.frostfs.sdk.jdo.parameters.session;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import java.util.Map;
@Getter
@Builder
@AllArgsConstructor
public class PrmSessionCreate {
private long expiration; //-1 is max
private Map<String, String> xHeaders;
public PrmSessionCreate(long expiration) {
this.expiration = expiration;
}
}

View file

@ -1,7 +0,0 @@
package info.frostfs.sdk.jdo.parameters.session;
import info.frostfs.sdk.dto.session.SessionToken;
public interface SessionContext {
SessionToken getSessionToken();
}

View file

@ -1,12 +0,0 @@
package info.frostfs.sdk.jdo.pool;
import lombok.AllArgsConstructor;
import lombok.Getter;
@Getter
@AllArgsConstructor
public class NodeParameters {
private final int priority;
private final String address;
private final double weight;
}

View file

@ -1,44 +0,0 @@
package info.frostfs.sdk.jdo.pool;
import info.frostfs.sdk.jdo.ECDsa;
import info.frostfs.sdk.pool.ClientWrapper;
import io.grpc.ClientInterceptors;
import io.netty.channel.ChannelOption;
import lombok.Getter;
import lombok.Setter;
import org.slf4j.Logger;
import java.util.ArrayList;
import java.util.Collection;
import java.util.function.Function;
@Getter
@Setter
public class PoolInitParameters {
private ECDsa key;
private long nodeDialTimeout;
private long nodeStreamTimeout;
private long healthCheckTimeout;
private long clientRebalanceInterval;
private long sessionExpirationDuration;
private int errorThreshold;
private NodeParameters[] nodeParams;
private ChannelOption<?>[] dialOptions;
private Function<String, ClientWrapper> clientBuilder;
private long gracefulCloseOnSwitchTimeout;
private Logger logger;
private Collection<ClientInterceptors> interceptors = new ArrayList<>();
}

View file

@ -1,15 +0,0 @@
package info.frostfs.sdk.jdo.result;
import info.frostfs.sdk.dto.object.ObjectHeader;
import info.frostfs.sdk.dto.object.SplitInfo;
import lombok.Builder;
import lombok.Getter;
import lombok.Setter;
@Builder
@Getter
@Setter
public class ObjectHeaderResult {
private ObjectHeader headerInfo;
private SplitInfo splitInfo;
}

View file

@ -1,27 +0,0 @@
package info.frostfs.sdk.pool;
public interface ClientStatus {
// isHealthy checks if the connection can handle requests.
boolean isHealthy();
// isDialed checks if the connection was created.
boolean isDialed();
// setUnhealthy marks client as unhealthy.
void setUnhealthy();
// address return address of endpoint.
String getAddress();
// currentErrorRate returns current errors rate.
// After specific threshold connection is considered as unhealthy.
// Pool.startRebalance routine can make this connection healthy again.
int getCurrentErrorRate();
// overallErrorRate returns the number of all happened errors.
long getOverallErrorRate();
// methodsStatus returns statistic for all used methods.
StatusSnapshot[] getMethodsStatus();
}

View file

@ -1,114 +0,0 @@
package info.frostfs.sdk.pool;
import info.frostfs.sdk.enums.HealthyStatus;
import info.frostfs.sdk.enums.MethodIndex;
import info.frostfs.sdk.utils.FrostFSMessages;
import lombok.Getter;
import lombok.Setter;
import org.slf4j.Logger;
import java.util.Arrays;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReentrantLock;
@Getter
@Setter
public class ClientStatusMonitor implements ClientStatus {
private final ReentrantLock lock = new ReentrantLock();
private final Logger logger;
private final AtomicInteger healthy = new AtomicInteger();
private final String address;
private final MethodStatus[] methods;
private int errorThreshold;
private int currentErrorCount;
private long overallErrorCount;
public ClientStatusMonitor(Logger logger, String address) {
this.logger = logger;
this.healthy.set(HealthyStatus.HEALTHY.value);
this.address = address;
this.methods = Arrays.stream(MethodIndex.values())
.map(t -> new MethodStatus(t.methodName))
.toArray(MethodStatus[]::new);
}
@Override
public boolean isHealthy() {
return healthy.get() == HealthyStatus.HEALTHY.value;
}
@Override
public boolean isDialed() {
return healthy.get() != HealthyStatus.UNHEALTHY_ON_DIAL.value;
}
public void setHealthy() {
healthy.set(HealthyStatus.HEALTHY.ordinal());
}
@Override
public void setUnhealthy() {
healthy.set(HealthyStatus.UNHEALTHY_ON_REQUEST.value);
}
public void setUnhealthyOnDial() {
healthy.set(HealthyStatus.UNHEALTHY_ON_DIAL.value);
}
public void incErrorRate() {
boolean thresholdReached;
lock.lock();
try {
currentErrorCount++;
overallErrorCount++;
thresholdReached = currentErrorCount >= errorThreshold;
if (thresholdReached) {
setUnhealthy();
currentErrorCount = 0;
}
} finally {
lock.unlock();
}
if (thresholdReached && logger != null) {
FrostFSMessages.errorThresholdReached(logger, address, errorThreshold);
}
}
@Override
public int getCurrentErrorRate() {
lock.lock();
try {
return currentErrorCount;
} finally {
lock.unlock();
}
}
@Override
public long getOverallErrorRate() {
lock.lock();
try {
return overallErrorCount;
} finally {
lock.unlock();
}
}
@Override
public StatusSnapshot[] getMethodsStatus() {
StatusSnapshot[] result = new StatusSnapshot[methods.length];
for (int i = 0; i < result.length; i++) {
result[i] = methods[i].getSnapshot();
}
return result;
}
}

View file

@ -1,131 +0,0 @@
package info.frostfs.sdk.pool;
import info.frostfs.sdk.FrostFSClient;
import info.frostfs.sdk.enums.MethodIndex;
import info.frostfs.sdk.exceptions.ResponseFrostFSException;
import info.frostfs.sdk.exceptions.ValidationFrostFSException;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.utils.WaitUtil;
import lombok.AccessLevel;
import lombok.Getter;
import org.apache.commons.lang3.StringUtils;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import static info.frostfs.sdk.constants.ErrorConst.POOL_CLIENT_UNHEALTHY;
@Getter
public class ClientWrapper extends ClientStatusMonitor {
@Getter(value = AccessLevel.NONE)
private final Lock lock = new ReentrantLock();
private final SessionCache sessionCache;
private final WrapperPrm wrapperPrm;
private FrostFSClient client;
public ClientWrapper(WrapperPrm wrapperPrm, Pool pool) {
super(wrapperPrm.getLogger(), wrapperPrm.getAddress());
this.wrapperPrm = wrapperPrm;
setErrorThreshold(wrapperPrm.getErrorThreshold());
this.sessionCache = pool.getSessionCache();
this.client = new FrostFSClient(wrapperPrm, sessionCache);
}
public FrostFSClient getClient() {
lock.lock();
try {
if (isHealthy()) {
return client;
}
return null;
} finally {
lock.unlock();
}
}
public void dial(CallContext ctx) {
FrostFSClient client = getClient();
if (client == null) {
throw new ValidationFrostFSException(POOL_CLIENT_UNHEALTHY);
}
client.dial(ctx);
}
public void handleError(Exception exp) {
if (exp instanceof ResponseFrostFSException && ((ResponseFrostFSException) exp).getStatus() != null) {
switch (((ResponseFrostFSException) exp).getStatus().getCode()) {
case INTERNAL:
case WRONG_MAGIC_NUMBER:
case SIGNATURE_VERIFICATION_FAILURE:
case NODE_UNDER_MAINTENANCE:
incErrorRate();
}
return;
}
incErrorRate();
}
private void scheduleGracefulClose() {
if (client == null) {
return;
}
WaitUtil.sleep(wrapperPrm.getGracefulCloseOnSwitchTimeout());
client.close();
}
public CompletableFuture<Boolean> restartIfUnhealthy(CallContext ctx) {
try {
client.getLocalNodeInfo(ctx);
return CompletableFuture.completedFuture(false);
} catch (Exception ignored) {
}
if (isDialed()) {
scheduleGracefulClose();
}
return CompletableFuture.completedFuture(restartClient(ctx));
}
private boolean restartClient(CallContext ctx) {
FrostFSClient newClient = null;
try {
newClient = new FrostFSClient(wrapperPrm, sessionCache);
var error = newClient.dial(ctx);
if (StringUtils.isNotBlank(error)) {
setUnhealthyOnDial();
newClient.close();
return true;
}
lock.lock();
client = newClient;
lock.unlock();
} catch (Exception exp) {
if (newClient != null) {
newClient.close();
}
}
try {
client.getLocalNodeInfo(ctx);
} catch (Exception exp) {
setUnhealthy();
return true;
}
setHealthy();
return false;
}
public void incRequests(long elapsed, MethodIndex method) {
var methodStat = getMethods()[method.ordinal()];
methodStat.incRequests(elapsed);
}
}

View file

@ -1,54 +0,0 @@
package info.frostfs.sdk.pool;
import java.util.concurrent.locks.ReentrantLock;
class InnerPool {
private static final int ATTEMPTS_COUNT = 3;
private final ReentrantLock lock = new ReentrantLock();
private final ClientWrapper[] clients;
private Sampler sampler;
InnerPool(Sampler sampler, ClientWrapper[] clients) {
this.sampler = sampler;
this.clients = clients;
}
Sampler getSampler() {
return sampler;
}
void setSampler(Sampler sampler) {
this.sampler = sampler;
}
ClientWrapper[] getClients() {
return clients;
}
ClientWrapper connection() {
lock.lock();
try {
if (clients.length == 1) {
ClientWrapper client = clients[0];
if (client.isHealthy()) {
return client;
}
} else {
int attempts = ATTEMPTS_COUNT * clients.length;
for (int i = 0; i < attempts; i++) {
int index = sampler.next();
if (clients[index].isHealthy()) {
return clients[index];
}
}
}
return null;
} finally {
lock.unlock();
}
}
}

View file

@ -1,31 +0,0 @@
package info.frostfs.sdk.pool;
import lombok.AccessLevel;
import lombok.Getter;
import lombok.Setter;
import java.util.concurrent.locks.ReentrantLock;
@Setter
@Getter
public class MethodStatus {
@Getter(AccessLevel.NONE)
private final ReentrantLock lock = new ReentrantLock();
private final String name;
private StatusSnapshot snapshot;
public MethodStatus(String name) {
this.name = name;
this.snapshot = new StatusSnapshot();
}
void incRequests(long elapsed) {
lock.lock();
try {
snapshot.setAllTime(snapshot.getAllTime() + elapsed);
snapshot.setAllRequests(snapshot.getAllRequests() + 1);
} finally {
lock.unlock();
}
}
}

View file

@ -1,13 +0,0 @@
package info.frostfs.sdk.pool;
import lombok.Getter;
import lombok.Setter;
@Getter
@Setter
public class NodeStatistic {
private String address;
private StatusSnapshot[] methods;
private long overallErrors;
private int currentErrors;
}

View file

@ -1,19 +0,0 @@
package info.frostfs.sdk.pool;
import lombok.Getter;
import java.util.ArrayList;
import java.util.List;
@Getter
public class NodesParam {
private final int priority;
private final List<String> address;
private final List<Double> weight;
public NodesParam(int priority) {
this.priority = priority;
this.address = new ArrayList<>();
this.weight = new ArrayList<>();
}
}

View file

@ -1,558 +0,0 @@
package info.frostfs.sdk.pool;
import frostfs.refs.Types;
import info.frostfs.sdk.dto.ape.Chain;
import info.frostfs.sdk.dto.container.Container;
import info.frostfs.sdk.dto.container.ContainerId;
import info.frostfs.sdk.dto.netmap.NetmapSnapshot;
import info.frostfs.sdk.dto.netmap.NodeInfo;
import info.frostfs.sdk.dto.object.ObjectFrostFS;
import info.frostfs.sdk.dto.object.ObjectHeader;
import info.frostfs.sdk.dto.object.ObjectId;
import info.frostfs.sdk.dto.object.OwnerId;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.exceptions.FrostFSException;
import info.frostfs.sdk.exceptions.SessionExpiredFrostFSException;
import info.frostfs.sdk.exceptions.SessionNotFoundFrostFSException;
import info.frostfs.sdk.exceptions.ValidationFrostFSException;
import info.frostfs.sdk.jdo.ECDsa;
import info.frostfs.sdk.jdo.NetworkSettings;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.jdo.parameters.ape.PrmApeChainAdd;
import info.frostfs.sdk.jdo.parameters.ape.PrmApeChainList;
import info.frostfs.sdk.jdo.parameters.ape.PrmApeChainRemove;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerCreate;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerDelete;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerGet;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerGetAll;
import info.frostfs.sdk.jdo.parameters.object.*;
import info.frostfs.sdk.jdo.parameters.object.patch.PrmObjectPatch;
import info.frostfs.sdk.jdo.parameters.object.patch.PrmRangeGet;
import info.frostfs.sdk.jdo.parameters.object.patch.PrmRangeHashGet;
import info.frostfs.sdk.jdo.parameters.session.PrmSessionCreate;
import info.frostfs.sdk.jdo.pool.NodeParameters;
import info.frostfs.sdk.jdo.pool.PoolInitParameters;
import info.frostfs.sdk.jdo.result.ObjectHeaderResult;
import info.frostfs.sdk.services.CommonClient;
import info.frostfs.sdk.services.impl.rwhelper.ObjectWriter;
import info.frostfs.sdk.services.impl.rwhelper.RangeReader;
import info.frostfs.sdk.utils.FrostFSMessages;
import info.frostfs.sdk.utils.WaitUtil;
import lombok.Getter;
import org.slf4j.Logger;
import java.util.*;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Function;
import static info.frostfs.sdk.Helper.getHexString;
import static info.frostfs.sdk.constants.ErrorConst.*;
import static info.frostfs.sdk.constants.PoolConst.*;
@Getter
public class Pool implements CommonClient {
private final ReentrantLock lock = new ReentrantLock();
private final ECDsa key;
private final SessionCache sessionCache;
private final long sessionTokenDuration;
private final RebalanceParameters rebalanceParams;
private final Function<String, ClientWrapper> clientBuilder;
private final Logger logger;
private InnerPool[] innerPools;
private Types.OwnerID ownerID;
private OwnerId ownerId;
private boolean disposedValue;
private long maxObjectSize;
private ClientStatus clientStatus;
public Pool(PoolInitParameters options) {
if (options == null || options.getKey() == null) {
throw new ValidationFrostFSException(
String.format(
PARAMS_ARE_MISSING_TEMPLATE,
String.join(
FIELDS_DELIMITER_COMMA, PoolInitParameters.class.getName(), ECDsa.class.getName()
)
)
);
}
List<NodesParam> nodesParams = adjustNodeParams(options.getNodeParams());
SessionCache cache = new SessionCache(options.getSessionExpirationDuration());
fillDefaultInitParams(options, this);
this.key = options.getKey();
this.sessionCache = cache;
this.logger = options.getLogger();
this.sessionTokenDuration = options.getSessionExpirationDuration();
this.rebalanceParams = new RebalanceParameters(
nodesParams.toArray(new NodesParam[0]),
options.getHealthCheckTimeout(),
options.getClientRebalanceInterval(),
options.getSessionExpirationDuration());
this.clientBuilder = options.getClientBuilder();
}
private static List<NodesParam> adjustNodeParams(NodeParameters[] nodeParams) {
if (nodeParams == null || nodeParams.length == 0) {
throw new ValidationFrostFSException(POOL_PEERS_IS_MISSING);
}
Map<Integer, NodesParam> nodesParamsDict = new HashMap<>(nodeParams.length);
for (NodeParameters nodeParam : nodeParams) {
var nodesParam = nodesParamsDict
.computeIfAbsent(nodeParam.getPriority(), k -> new NodesParam(nodeParam.getPriority()));
nodesParam.getAddress().add(nodeParam.getAddress());
nodesParam.getWeight().add(nodeParam.getWeight());
}
List<NodesParam> nodesParams = new ArrayList<>(nodesParamsDict.values());
nodesParams.sort(Comparator.comparingInt(NodesParam::getPriority));
for (NodesParam nodes : nodesParams) {
double[] newWeights = adjustWeights(nodes.getWeight().stream().mapToDouble(Double::doubleValue).toArray());
nodes.getWeight().clear();
for (double weight : newWeights) {
nodes.getWeight().add(weight);
}
}
return nodesParams;
}
private static double[] adjustWeights(double[] weights) {
double[] adjusted = new double[weights.length];
double sum = Arrays.stream(weights).sum();
if (sum > 0) {
for (int i = 0; i < weights.length; i++) {
adjusted[i] = weights[i] / sum;
}
}
return adjusted;
}
private static void fillDefaultInitParams(PoolInitParameters parameters, Pool pool) {
if (parameters.getSessionExpirationDuration() == 0) {
parameters.setSessionExpirationDuration(DEFAULT_SESSION_TOKEN_EXPIRATION_DURATION);
}
if (parameters.getErrorThreshold() == 0) {
parameters.setErrorThreshold(DEFAULT_ERROR_THRESHOLD);
}
if (parameters.getClientRebalanceInterval() <= 0) {
parameters.setClientRebalanceInterval(DEFAULT_REBALANCE_INTERVAL);
}
if (parameters.getGracefulCloseOnSwitchTimeout() <= 0) {
parameters.setGracefulCloseOnSwitchTimeout(DEFAULT_GRACEFUL_CLOSE_ON_SWITCH_TIMEOUT);
}
if (parameters.getHealthCheckTimeout() <= 0) {
parameters.setHealthCheckTimeout(DEFAULT_HEALTHCHECK_TIMEOUT);
}
if (parameters.getNodeDialTimeout() <= 0) {
parameters.setNodeDialTimeout(DEFAULT_DIAL_TIMEOUT);
}
if (parameters.getNodeStreamTimeout() <= 0) {
parameters.setNodeStreamTimeout(DEFAULT_STREAM_TIMEOUT);
}
if (parameters.getSessionExpirationDuration() == 0) {
parameters.setSessionExpirationDuration(DEFAULT_SESSION_TOKEN_EXPIRATION_DURATION);
}
if (parameters.getClientBuilder() == null) {
parameters.setClientBuilder(address -> {
WrapperPrm wrapperPrm = new WrapperPrm();
wrapperPrm.setAddress(address);
wrapperPrm.setKey(parameters.getKey());
wrapperPrm.setLogger(parameters.getLogger());
wrapperPrm.setDialTimeout(parameters.getNodeDialTimeout());
wrapperPrm.setStreamTimeout(parameters.getNodeStreamTimeout());
wrapperPrm.setErrorThreshold(parameters.getErrorThreshold());
wrapperPrm.setGracefulCloseOnSwitchTimeout(parameters.getGracefulCloseOnSwitchTimeout());
wrapperPrm.setInterceptors(parameters.getInterceptors());
return new ClientWrapper(wrapperPrm, pool);
});
}
}
private static SessionToken initSessionForDuration(CallContext ctx, ClientWrapper cw, long duration) {
var client = cw.getClient();
NetworkSettings networkInfo = client.getNetworkSettings(ctx);
long epoch = networkInfo.getEpochDuration();
long exp = (Long.MAX_VALUE - epoch < duration) ? Long.MAX_VALUE : (epoch + duration);
return client.createSession(new PrmSessionCreate(exp), ctx);
}
public static String formCacheKey(String address, String key) {
return address + key;
}
@Override
public String dial(CallContext ctx) {
InnerPool[] inner = new InnerPool[rebalanceParams.getNodesParams().length];
boolean atLeastOneHealthy = false;
int i = 0;
for (NodesParam nodeParams : rebalanceParams.getNodesParams()) {
ClientWrapper[] clients = new ClientWrapper[nodeParams.getWeight().size()];
for (int j = 0; j < nodeParams.getAddress().size(); j++) {
ClientWrapper client = clients[j] = clientBuilder.apply(nodeParams.getAddress().get(j));
boolean dialed = false;
try {
client.dial(ctx);
dialed = true;
SessionToken token = initSessionForDuration(
ctx, client, rebalanceParams.getSessionExpirationDuration()
);
String cacheKey = formCacheKey(
nodeParams.getAddress().get(j),
getHexString(key.getPublicKeyByte())
);
sessionCache.setValue(cacheKey, token);
atLeastOneHealthy = true;
} catch (ValidationFrostFSException exp) {
break;
} catch (Exception exp) {
if (!dialed) {
client.setUnhealthyOnDial();
} else {
client.setUnhealthy();
}
if (logger != null) {
FrostFSMessages
.sessionCreationError(logger, client.getWrapperPrm().getAddress(), exp.getMessage());
}
}
}
Sampler sampler = new Sampler(nodeParams.getWeight().stream().mapToDouble(Double::doubleValue).toArray());
inner[i] = new InnerPool(sampler, clients);
i++;
}
if (!atLeastOneHealthy) {
return POOL_NODES_UNHEALTHY;
}
this.innerPools = inner;
NetworkSettings networkSettings = getNetworkSettings(ctx);
this.maxObjectSize = networkSettings.getMaxObjectSize();
startRebalance(ctx);
return null;
}
private ClientWrapper connection() {
for (InnerPool pool : innerPools) {
ClientWrapper client = pool.connection();
if (client != null) {
return client;
}
}
throw new FrostFSException(POOL_CLIENTS_UNHEALTHY);
}
public void close() {
if (innerPools != null) {
for (InnerPool innerPool : innerPools) {
for (ClientWrapper client : innerPool.getClients()) {
if (client.isDialed()) {
client.getClient().close();
}
}
}
}
}
public void startRebalance(CallContext ctx) {
double[][] buffers = new double[rebalanceParams.getNodesParams().length][];
for (int i = 0; i < rebalanceParams.getNodesParams().length; i++) {
NodesParam parameters = rebalanceParams.getNodesParams()[i];
buffers[i] = new double[parameters.getWeight().size()];
CompletableFuture.runAsync(() -> {
WaitUtil.sleep(rebalanceParams.getClientRebalanceInterval());
updateNodesHealth(ctx, buffers);
});
}
}
private void updateNodesHealth(CallContext ctx, double[][] buffers) {
CompletableFuture<?>[] tasks = new CompletableFuture<?>[innerPools.length];
for (int i = 0; i < innerPools.length; i++) {
double[] bufferWeights = buffers[i];
int finalI = i;
tasks[i] = CompletableFuture.runAsync(() -> updateInnerNodesHealth(ctx, finalI, bufferWeights));
}
CompletableFuture.allOf(tasks).join();
}
private void updateInnerNodesHealth(CallContext ctx, int poolIndex, double[] bufferWeights) {
if (poolIndex > innerPools.length - 1) {
return;
}
InnerPool pool = innerPools[poolIndex];
RebalanceParameters options = rebalanceParams;
int[] healthyChanged = {0};
CompletableFuture<?>[] tasks = new CompletableFuture<?>[pool.getClients().length];
for (int j = 0; j < pool.getClients().length; j++) {
ClientWrapper client = pool.getClients()[j];
AtomicBoolean healthy = new AtomicBoolean(false);
AtomicReference<String> error = new AtomicReference<>();
AtomicBoolean changed = new AtomicBoolean(false);
int finalJ = j;
tasks[j] = client.restartIfUnhealthy(ctx).handle((unused, throwable) -> {
if (throwable != null) {
error.set(throwable.getMessage());
bufferWeights[finalJ] = 0;
sessionCache.deleteByPrefix(client.getAddress());
} else {
changed.set(unused);
healthy.set(true);
bufferWeights[finalJ] = options.getNodesParams()[poolIndex].getWeight().get(finalJ);
}
return null;
}).thenRun(() -> {
if (changed.get()) {
if (error.get() != null && logger != null) {
FrostFSMessages.healthChanged(logger, client.getAddress(), healthy.get(), error.get());
}
healthyChanged[0] = 1;
}
});
}
CompletableFuture.allOf(tasks).thenRun(() -> {
if (healthyChanged[0] == 1) {
double[] probabilities = adjustWeights(bufferWeights);
lock.lock();
try {
pool.setSampler(new Sampler(probabilities));
} finally {
lock.unlock();
}
}
});
}
private boolean checkSessionTokenErr(Exception error, String address) {
if (error == null) {
return false;
}
if (error instanceof SessionNotFoundFrostFSException || error instanceof SessionExpiredFrostFSException) {
sessionCache.deleteByPrefix(address);
return true;
}
return false;
}
public Statistic statistic() {
if (innerPools == null) {
throw new ValidationFrostFSException(POOL_NOT_DIALED);
}
Statistic statistics = new Statistic();
for (InnerPool inner : innerPools) {
int valueIndex = 0;
String[] nodes = new String[inner.getClients().length];
lock.lock();
try {
for (ClientWrapper client : inner.getClients()) {
if (client.isHealthy()) {
nodes[valueIndex] = client.getAddress();
}
NodeStatistic node = new NodeStatistic();
node.setAddress(client.getAddress());
node.setMethods(client.getMethodsStatus());
node.setOverallErrors(client.getOverallErrorRate());
node.setCurrentErrors(client.getCurrentErrorRate());
statistics.getNodes().add(node);
valueIndex++;
statistics.setOverallErrors(statistics.getOverallErrors() + node.getOverallErrors());
}
if (statistics.getCurrentNodes() == null || statistics.getCurrentNodes().length == 0) {
statistics.setCurrentNodes(nodes);
}
} finally {
lock.unlock();
}
}
return statistics;
}
@Override
public Container getContainer(PrmContainerGet args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().getContainer(args, ctx);
}
@Override
public List<ContainerId> listContainers(PrmContainerGetAll args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().listContainers(args, ctx);
}
@Override
public ContainerId createContainer(PrmContainerCreate args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().createContainer(args, ctx);
}
@Override
public void deleteContainer(PrmContainerDelete args, CallContext ctx) {
ClientWrapper client = connection();
client.getClient().deleteContainer(args, ctx);
}
@Override
public ObjectHeaderResult getObjectHead(PrmObjectHeadGet args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().getObjectHead(args, ctx);
}
@Override
public ObjectFrostFS getObject(PrmObjectGet args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().getObject(args, ctx);
}
@Override
public ObjectWriter putObject(PrmObjectPut args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().putObject(args, ctx);
}
@Override
public ObjectId putClientCutObject(PrmObjectClientCutPut args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().putClientCutObject(args, ctx);
}
@Override
public ObjectId putSingleObject(PrmObjectSinglePut args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().putSingleObject(args, ctx);
}
@Override
public void deleteObject(PrmObjectDelete args, CallContext ctx) {
ClientWrapper client = connection();
client.getClient().deleteObject(args, ctx);
}
@Override
public Iterable<ObjectId> searchObjects(PrmObjectSearch args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().searchObjects(args, ctx);
}
@Override
public RangeReader getRange(PrmRangeGet args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().getRange(args, ctx);
}
@Override
public byte[][] getRangeHash(PrmRangeHashGet args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().getRangeHash(args, ctx);
}
@Override
public ObjectId patchObject(PrmObjectPatch args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().patchObject(args, ctx);
}
@Override
public byte[] addChain(PrmApeChainAdd args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().addChain(args, ctx);
}
@Override
public void removeChain(PrmApeChainRemove args, CallContext ctx) {
ClientWrapper client = connection();
client.getClient().removeChain(args, ctx);
}
@Override
public List<Chain> listChains(PrmApeChainList args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().listChains(args, ctx);
}
@Override
public NetmapSnapshot getNetmapSnapshot(CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().getNetmapSnapshot(ctx);
}
@Override
public NodeInfo getLocalNodeInfo(CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().getLocalNodeInfo(ctx);
}
@Override
public NetworkSettings getNetworkSettings(CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().getNetworkSettings(ctx);
}
@Override
public SessionToken createSession(PrmSessionCreate args, CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().createSession(args, ctx);
}
@Override
public ObjectId calculateObjectId(ObjectHeader header) {
ClientWrapper client = connection();
return client.getClient().calculateObjectId(header);
}
@Override
public frostfs.accounting.Types.Decimal getBalance(CallContext ctx) {
ClientWrapper client = connection();
return client.getClient().getBalance(ctx);
}
}

View file

@ -1,15 +0,0 @@
package info.frostfs.sdk.pool;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.Setter;
@Getter
@Setter
@AllArgsConstructor
public class RebalanceParameters {
private NodesParam[] nodesParams;
private long nodeRequestTimeout;
private long clientRebalanceInterval;
private long sessionExpirationDuration;
}

View file

@ -1,15 +0,0 @@
package info.frostfs.sdk.pool;
import info.frostfs.sdk.enums.MethodIndex;
import lombok.Getter;
import lombok.Setter;
import java.time.Duration;
@Getter
@Setter
public class RequestInfo {
private String address;
private MethodIndex methodIndex;
private Duration elapsed;
}

View file

@ -1,77 +0,0 @@
package info.frostfs.sdk.pool;
import java.util.ArrayList;
import java.util.Random;
class Sampler {
private final Object lock = new Object();
private final Random random = new Random();
private final double[] probabilities;
private final int[] alias;
Sampler(double[] probabilities) {
ArrayList<Integer> small = new ArrayList<>();
ArrayList<Integer> large = new ArrayList<>();
int n = probabilities.length;
this.probabilities = new double[n];
this.alias = new int[n];
// Compute scaled probabilities.
double[] p = new double[n];
for (int i = 0; i < n; i++) {
p[i] = probabilities[i] * n;
if (p[i] < 1) {
small.add(i);
} else {
large.add(i);
}
}
while (!small.isEmpty() && !large.isEmpty()) {
int l = small.remove(small.size() - 1);
int g = large.remove(large.size() - 1);
this.probabilities[l] = p[l];
this.alias[l] = g;
p[g] = p[g] + p[l] - 1;
if (p[g] < 1) {
small.add(g);
} else {
large.add(g);
}
}
while (!large.isEmpty()) {
int g = large.remove(large.size() - 1);
this.probabilities[g] = 1;
}
while (!small.isEmpty()) {
int l = small.remove(small.size() - 1);
probabilities[l] = 1;
}
}
int next() {
int n = alias.length;
int i;
double f;
synchronized (lock) {
i = random.nextInt(n);
f = random.nextDouble();
}
if (f < probabilities[i]) {
return i;
}
return alias[i];
}
}

View file

@ -1,37 +0,0 @@
package info.frostfs.sdk.pool;
import info.frostfs.sdk.dto.session.SessionToken;
import org.apache.commons.lang3.StringUtils;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
public class SessionCache {
private final ConcurrentMap<String, SessionToken> cache = new ConcurrentHashMap<>();
private final long tokenDuration;
private long currentEpoch;
public SessionCache(long sessionExpirationDuration) {
this.tokenDuration = sessionExpirationDuration;
}
public boolean contains(String key) {
return cache.containsKey(key);
}
public SessionToken tryGetValue(String key) {
return StringUtils.isBlank(key) ? null : cache.get(key);
}
public void setValue(String key, SessionToken value) {
if (key != null) {
cache.put(key, value);
}
}
public void deleteByPrefix(String prefix) {
cache.keySet().removeIf(key -> key.startsWith(prefix));
}
}

View file

@ -1,15 +0,0 @@
package info.frostfs.sdk.pool;
import lombok.Getter;
import lombok.Setter;
import java.util.ArrayList;
import java.util.List;
@Getter
@Setter
public class Statistic {
private long overallErrors;
private List<NodeStatistic> nodes = new ArrayList<>();
private String[] currentNodes;
}

View file

@ -1,13 +0,0 @@
package info.frostfs.sdk.pool;
import lombok.Getter;
import lombok.Setter;
@Getter
@Setter
public class StatusSnapshot {
private long allTime;
private long allRequests;
}

View file

@ -1,22 +0,0 @@
package info.frostfs.sdk.pool;
import java.util.ArrayList;
import java.util.List;
class WorkList {
private final List<Integer> elements = new ArrayList<>();
private int getLength() {
return elements.size();
}
private void add(int element) {
elements.add(element);
}
private int remove() {
int last = elements.get(elements.size() - 1);
elements.remove(elements.size() - 1);
return last;
}
}

View file

@ -1,26 +0,0 @@
package info.frostfs.sdk.pool;
import info.frostfs.sdk.jdo.ECDsa;
import io.grpc.ClientInterceptors;
import io.grpc.ManagedChannelBuilder;
import lombok.Getter;
import lombok.Setter;
import org.slf4j.Logger;
import java.util.Collection;
@Getter
@Setter
public class WrapperPrm {
private Logger logger;
private String address;
private ECDsa key;
private long dialTimeout;
private long streamTimeout;
private int errorThreshold;
private Runnable responseInfoCallback;
private Runnable poolRequestInfoCallback;
private ManagedChannelBuilder<?> grpcChannelOptions;
private long gracefulCloseOnSwitchTimeout;
private Collection<ClientInterceptors> interceptors;
}

View file

@ -1,8 +0,0 @@
package info.frostfs.sdk.services;
import frostfs.accounting.Types;
import info.frostfs.sdk.jdo.parameters.CallContext;
public interface AccountingClient {
Types.Decimal getBalance(CallContext ctx);
}

View file

@ -1,17 +0,0 @@
package info.frostfs.sdk.services;
import info.frostfs.sdk.dto.ape.Chain;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.jdo.parameters.ape.PrmApeChainAdd;
import info.frostfs.sdk.jdo.parameters.ape.PrmApeChainList;
import info.frostfs.sdk.jdo.parameters.ape.PrmApeChainRemove;
import java.util.List;
public interface ApeManagerClient {
byte[] addChain(PrmApeChainAdd args, CallContext ctx);
void removeChain(PrmApeChainRemove args, CallContext ctx);
List<Chain> listChains(PrmApeChainList args, CallContext ctx);
}

View file

@ -1,9 +0,0 @@
package info.frostfs.sdk.services;
import info.frostfs.sdk.jdo.parameters.CallContext;
public interface CommonClient extends
AccountingClient, ApeManagerClient, ContainerClient, NetmapClient, ObjectClient, SessionClient, ToolsClient {
String dial(CallContext ctx);
}

View file

@ -1,21 +0,0 @@
package info.frostfs.sdk.services;
import info.frostfs.sdk.dto.container.Container;
import info.frostfs.sdk.dto.container.ContainerId;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerCreate;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerDelete;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerGet;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerGetAll;
import java.util.List;
public interface ContainerClient {
Container getContainer(PrmContainerGet args, CallContext ctx);
List<ContainerId> listContainers(PrmContainerGetAll args, CallContext ctx);
ContainerId createContainer(PrmContainerCreate args, CallContext ctx);
void deleteContainer(PrmContainerDelete args, CallContext ctx);
}

View file

@ -1,13 +0,0 @@
package info.frostfs.sdk.services;
import info.frostfs.sdk.jdo.ClientEnvironment;
import lombok.Getter;
@Getter
public class ContextAccessor {
private final ClientEnvironment context;
public ContextAccessor(ClientEnvironment context) {
this.context = context;
}
}

View file

@ -1,14 +0,0 @@
package info.frostfs.sdk.services;
import info.frostfs.sdk.dto.netmap.NetmapSnapshot;
import info.frostfs.sdk.dto.netmap.NodeInfo;
import info.frostfs.sdk.jdo.NetworkSettings;
import info.frostfs.sdk.jdo.parameters.CallContext;
public interface NetmapClient {
NetmapSnapshot getNetmapSnapshot(CallContext ctx);
NodeInfo getLocalNodeInfo(CallContext ctx);
NetworkSettings getNetworkSettings(CallContext ctx);
}

View file

@ -1,34 +0,0 @@
package info.frostfs.sdk.services;
import info.frostfs.sdk.dto.object.ObjectFrostFS;
import info.frostfs.sdk.dto.object.ObjectId;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.jdo.parameters.object.*;
import info.frostfs.sdk.jdo.parameters.object.patch.PrmObjectPatch;
import info.frostfs.sdk.jdo.parameters.object.patch.PrmRangeGet;
import info.frostfs.sdk.jdo.parameters.object.patch.PrmRangeHashGet;
import info.frostfs.sdk.jdo.result.ObjectHeaderResult;
import info.frostfs.sdk.services.impl.rwhelper.ObjectWriter;
import info.frostfs.sdk.services.impl.rwhelper.RangeReader;
public interface ObjectClient {
ObjectHeaderResult getObjectHead(PrmObjectHeadGet args, CallContext ctx);
ObjectFrostFS getObject(PrmObjectGet args, CallContext ctx);
ObjectWriter putObject(PrmObjectPut args, CallContext ctx);
ObjectId putClientCutObject(PrmObjectClientCutPut args, CallContext ctx);
ObjectId putSingleObject(PrmObjectSinglePut args, CallContext ctx);
void deleteObject(PrmObjectDelete args, CallContext ctx);
Iterable<ObjectId> searchObjects(PrmObjectSearch args, CallContext ctx);
RangeReader getRange(PrmRangeGet args, CallContext ctx);
byte[][] getRangeHash(PrmRangeHashGet args, CallContext ctx);
ObjectId patchObject(PrmObjectPatch args, CallContext ctx);
}

View file

@ -1,9 +0,0 @@
package info.frostfs.sdk.services;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.jdo.parameters.session.PrmSessionCreate;
public interface SessionClient {
SessionToken createSession(PrmSessionCreate args, CallContext ctx);
}

View file

@ -1,9 +0,0 @@
package info.frostfs.sdk.services;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.ClientEnvironment;
import info.frostfs.sdk.jdo.parameters.CallContext;
public interface SessionTools {
SessionToken getOrCreateSession(ClientEnvironment env, CallContext ctx);
}

View file

@ -1,8 +0,0 @@
package info.frostfs.sdk.services;
import info.frostfs.sdk.dto.object.ObjectHeader;
import info.frostfs.sdk.dto.object.ObjectId;
public interface ToolsClient {
ObjectId calculateObjectId(ObjectHeader header);
}

View file

@ -1,48 +0,0 @@
package info.frostfs.sdk.services.impl;
import frostfs.accounting.AccountingServiceGrpc;
import frostfs.accounting.Service;
import frostfs.accounting.Types;
import info.frostfs.sdk.jdo.ClientEnvironment;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.mappers.object.OwnerIdMapper;
import info.frostfs.sdk.services.AccountingClient;
import info.frostfs.sdk.services.ContextAccessor;
import info.frostfs.sdk.tools.RequestConstructor;
import info.frostfs.sdk.tools.RequestSigner;
import info.frostfs.sdk.tools.Verifier;
import static info.frostfs.sdk.utils.DeadLineUtil.deadLineAfter;
public class AccountingClientImpl extends ContextAccessor implements AccountingClient {
private final AccountingServiceGrpc.AccountingServiceBlockingStub serviceBlockingStub;
public AccountingClientImpl(ClientEnvironment clientEnvironment) {
super(clientEnvironment);
this.serviceBlockingStub = AccountingServiceGrpc.newBlockingStub(getContext().getChannel());
}
@Override
public Types.Decimal getBalance(CallContext ctx) {
var request = createGetRequest();
var service = deadLineAfter(serviceBlockingStub, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.balance(request);
Verifier.checkResponse(response);
return response.getBody().getBalance();
}
private Service.BalanceRequest createGetRequest() {
var body = Service.BalanceRequest.Body.newBuilder()
.setOwnerId(OwnerIdMapper.toGrpcMessage(getContext().getOwnerId()))
.build();
var request = Service.BalanceRequest.newBuilder()
.setBody(body);
RequestConstructor.addMetaHeader(request);
RequestSigner.sign(request, getContext().getKey());
return request.build();
}
}

View file

@ -1,123 +0,0 @@
package info.frostfs.sdk.services.impl;
import com.google.protobuf.ByteString;
import frostfs.ape.Types;
import frostfs.apemanager.APEManagerServiceGrpc;
import frostfs.apemanager.Service;
import info.frostfs.sdk.dto.ape.Chain;
import info.frostfs.sdk.jdo.ClientEnvironment;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.jdo.parameters.ape.PrmApeChainAdd;
import info.frostfs.sdk.jdo.parameters.ape.PrmApeChainList;
import info.frostfs.sdk.jdo.parameters.ape.PrmApeChainRemove;
import info.frostfs.sdk.mappers.chain.ChainTargetMapper;
import info.frostfs.sdk.services.ApeManagerClient;
import info.frostfs.sdk.services.ContextAccessor;
import info.frostfs.sdk.tools.RequestConstructor;
import info.frostfs.sdk.tools.RequestSigner;
import info.frostfs.sdk.tools.Verifier;
import info.frostfs.sdk.tools.ape.RuleDeserializer;
import info.frostfs.sdk.tools.ape.RuleSerializer;
import java.util.List;
import java.util.stream.Collectors;
import static info.frostfs.sdk.utils.DeadLineUtil.deadLineAfter;
import static info.frostfs.sdk.utils.Validator.validate;
public class ApeManagerClientImpl extends ContextAccessor implements ApeManagerClient {
private final APEManagerServiceGrpc.APEManagerServiceBlockingStub apeManagerServiceClient;
public ApeManagerClientImpl(ClientEnvironment clientEnvironment) {
super(clientEnvironment);
this.apeManagerServiceClient = APEManagerServiceGrpc.newBlockingStub(getContext().getChannel());
}
@Override
public byte[] addChain(PrmApeChainAdd args, CallContext ctx) {
validate(args);
var request = createAddChainRequest(args);
var service = deadLineAfter(apeManagerServiceClient, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.addChain(request);
Verifier.checkResponse(response);
return response.getBody().getChainId().toByteArray();
}
@Override
public void removeChain(PrmApeChainRemove args, CallContext ctx) {
validate(args);
var request = createRemoveChainRequest(args);
var service = deadLineAfter(apeManagerServiceClient, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.removeChain(request);
Verifier.checkResponse(response);
}
@Override
public List<Chain> listChains(PrmApeChainList args, CallContext ctx) {
validate(args);
var request = createListChainsRequest(args);
var service = deadLineAfter(apeManagerServiceClient, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.listChains(request);
Verifier.checkResponse(response);
return response.getBody().getChainsList().stream()
.map(chain -> RuleDeserializer.deserialize(chain.getRaw().toByteArray()))
.collect(Collectors.toList());
}
private Service.AddChainRequest createAddChainRequest(PrmApeChainAdd args) {
var raw = RuleSerializer.serialize(args.getChain());
var chainGrpc = Types.Chain.newBuilder()
.setRaw(ByteString.copyFrom(raw))
.build();
var body = Service.AddChainRequest.Body.newBuilder()
.setChain(chainGrpc)
.setTarget(ChainTargetMapper.toGrpcMessage(args.getChainTarget()))
.build();
var request = Service.AddChainRequest.newBuilder()
.setBody(body);
RequestConstructor.addMetaHeader(request, args.getXHeaders());
RequestSigner.sign(request, getContext().getKey());
return request.build();
}
private Service.RemoveChainRequest createRemoveChainRequest(PrmApeChainRemove args) {
var body = Service.RemoveChainRequest.Body.newBuilder()
.setChainId(ByteString.copyFrom(args.getChainId()))
.setTarget(ChainTargetMapper.toGrpcMessage(args.getChainTarget()))
.build();
var request = Service.RemoveChainRequest.newBuilder()
.setBody(body);
RequestConstructor.addMetaHeader(request, args.getXHeaders());
RequestSigner.sign(request, getContext().getKey());
return request.build();
}
private Service.ListChainsRequest createListChainsRequest(PrmApeChainList args) {
var body = Service.ListChainsRequest.Body.newBuilder()
.setTarget(ChainTargetMapper.toGrpcMessage(args.getChainTarget()))
.build();
var request = Service.ListChainsRequest.newBuilder()
.setBody(body);
RequestConstructor.addMetaHeader(request, args.getXHeaders());
RequestSigner.sign(request, getContext().getKey());
return request.build();
}
}

View file

@ -1,253 +0,0 @@
package info.frostfs.sdk.services.impl;
import frostfs.container.ContainerServiceGrpc;
import frostfs.container.Service;
import frostfs.refs.Types;
import info.frostfs.sdk.dto.container.Container;
import info.frostfs.sdk.dto.container.ContainerId;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.enums.StatusCode;
import info.frostfs.sdk.enums.WaitExpects;
import info.frostfs.sdk.exceptions.ResponseFrostFSException;
import info.frostfs.sdk.exceptions.TimeoutFrostFSException;
import info.frostfs.sdk.jdo.ClientEnvironment;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.jdo.parameters.PrmWait;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerCreate;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerDelete;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerGet;
import info.frostfs.sdk.jdo.parameters.container.PrmContainerGetAll;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import info.frostfs.sdk.mappers.container.ContainerIdMapper;
import info.frostfs.sdk.mappers.container.ContainerMapper;
import info.frostfs.sdk.mappers.netmap.VersionMapper;
import info.frostfs.sdk.mappers.object.OwnerIdMapper;
import info.frostfs.sdk.services.ContainerClient;
import info.frostfs.sdk.services.ContextAccessor;
import info.frostfs.sdk.tools.RequestConstructor;
import info.frostfs.sdk.tools.RequestSigner;
import info.frostfs.sdk.tools.Verifier;
import info.frostfs.sdk.utils.WaitUtil;
import java.time.LocalDateTime;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import static info.frostfs.sdk.constants.AttributeConst.DISABLE_HOMOMORPHIC_HASHING_ATTRIBUTE;
import static info.frostfs.sdk.utils.DeadLineUtil.deadLineAfter;
import static info.frostfs.sdk.utils.Validator.validate;
import static java.util.Objects.isNull;
import static java.util.Objects.nonNull;
public class ContainerClientImpl extends ContextAccessor implements ContainerClient {
private final ContainerServiceGrpc.ContainerServiceBlockingStub serviceBlockingStub;
private final SessionToolsImpl sessionTools;
public ContainerClientImpl(ClientEnvironment clientEnvironment) {
super(clientEnvironment);
this.serviceBlockingStub = ContainerServiceGrpc.newBlockingStub(clientEnvironment.getChannel());
this.sessionTools = new SessionToolsImpl(clientEnvironment);
}
public SessionToken getOrCreateSession(SessionContext sessionContext, CallContext ctx) {
return isNull(sessionContext.getSessionToken())
? sessionTools.getOrCreateSession(getContext(), ctx)
: sessionContext.getSessionToken();
}
@Override
public Container getContainer(PrmContainerGet args, CallContext ctx) {
validate(args);
var request = createGetRequest(args);
var service = deadLineAfter(serviceBlockingStub, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.get(request);
Verifier.checkResponse(response);
return ContainerMapper.toModel(response.getBody().getContainer());
}
@Override
public List<ContainerId> listContainers(PrmContainerGetAll args, CallContext ctx) {
validate(args);
var request = createListRequest(args);
var service = deadLineAfter(serviceBlockingStub, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.list(request);
Verifier.checkResponse(response);
return response.getBody().getContainerIdsList().stream()
.map(cid -> new ContainerId(cid.getValue().toByteArray()))
.collect(Collectors.toList());
}
@Override
public ContainerId createContainer(PrmContainerCreate args, CallContext ctx) {
validate(args);
var request = createPutRequest(args, ctx);
var service = deadLineAfter(serviceBlockingStub, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.put(request);
Verifier.checkResponse(response);
waitForContainer(WaitExpects.EXISTS, response.getBody().getContainerId(), args.getWaitParams());
return new ContainerId(response.getBody().getContainerId().getValue().toByteArray());
}
@Override
public void deleteContainer(PrmContainerDelete args, CallContext ctx) {
validate(args);
var request = createDeleteRequest(args, ctx);
var service = deadLineAfter(serviceBlockingStub, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.delete(request);
Verifier.checkResponse(response);
waitForContainer(WaitExpects.REMOVED, request.getBody().getContainerId(), args.getWaitParams());
}
private void waitForContainer(WaitExpects expect, Types.ContainerID cid, PrmWait waitParams) {
var request = createGetRequest(cid, null);
waitParams = isNull(waitParams) ? new PrmWait() : waitParams;
var deadLine = waitParams.getDeadline();
while (true) {
try {
var response = serviceBlockingStub.get(request);
Verifier.checkResponse(response);
if (expect == WaitExpects.EXISTS) {
break;
}
if (LocalDateTime.now().isAfter(deadLine)) {
throw new TimeoutFrostFSException();
}
WaitUtil.sleep(waitParams.getPollInterval().toMillis());
} catch (ResponseFrostFSException exp) {
if (LocalDateTime.now().isAfter(deadLine)) {
throw new TimeoutFrostFSException();
}
if (exp.getStatus().getCode() != StatusCode.CONTAINER_NOT_FOUND) {
throw exp;
}
if (expect == WaitExpects.REMOVED) {
break;
}
WaitUtil.sleep(waitParams.getPollInterval().toMillis());
}
}
}
private Service.GetRequest createGetRequest(PrmContainerGet args) {
var cid = ContainerIdMapper.toGrpcMessage(args.getContainerId());
return createGetRequest(cid, args.getXHeaders());
}
private Service.GetRequest createGetRequest(Types.ContainerID cid, Map<String, String> xHeaders) {
var body = Service.GetRequest.Body.newBuilder()
.setContainerId(cid)
.build();
var request = Service.GetRequest.newBuilder()
.setBody(body);
RequestConstructor.addMetaHeader(request, xHeaders);
RequestSigner.sign(request, getContext().getKey());
return request.build();
}
private Service.ListRequest createListRequest(PrmContainerGetAll args) {
var body = Service.ListRequest.Body.newBuilder()
.setOwnerId(OwnerIdMapper.toGrpcMessage(getContext().getOwnerId()))
.build();
var request = Service.ListRequest.newBuilder()
.setBody(body);
RequestConstructor.addMetaHeader(request, args.getXHeaders());
RequestSigner.sign(request, getContext().getKey());
return request.build();
}
private Service.PutRequest createPutRequest(PrmContainerCreate args, CallContext ctx) {
syncContainerWithNetwork(args.getContainer(), ctx);
var builder = ContainerMapper.toGrpcMessage(args.getContainer());
if (!builder.hasOwnerId()) {
builder.setOwnerId(OwnerIdMapper.toGrpcMessage(getContext().getOwnerId()));
}
if (!builder.hasVersion()) {
builder.setVersion(VersionMapper.toGrpcMessage(getContext().getVersion()));
}
var container = builder.build();
var body = Service.PutRequest.Body.newBuilder()
.setContainer(container)
.setSignature(RequestSigner.signRFC6979(getContext().getKey(), container))
.build();
var request = Service.PutRequest.newBuilder()
.setBody(body);
var sessionToken = getOrCreateSession(args, ctx);
var protoToken = RequestConstructor.createContainerTokenContext(
sessionToken,
null,
frostfs.session.Types.ContainerSessionContext.Verb.PUT,
container.getOwnerId(),
getContext().getKey()
);
RequestConstructor.addMetaHeader(request, args.getXHeaders(), protoToken);
RequestSigner.sign(request, getContext().getKey());
return request.build();
}
private Service.DeleteRequest createDeleteRequest(PrmContainerDelete args, CallContext ctx) {
var cid = ContainerIdMapper.toGrpcMessage(args.getContainerId());
var body = Service.DeleteRequest.Body.newBuilder()
.setContainerId(cid)
.setSignature(RequestSigner.signRFC6979(getContext().getKey(), cid.getValue()))
.build();
var request = Service.DeleteRequest.newBuilder()
.setBody(body);
var sessionToken = getOrCreateSession(args, ctx);
var protoToken = RequestConstructor.createContainerTokenContext(
sessionToken,
null,
frostfs.session.Types.ContainerSessionContext.Verb.DELETE,
null,
getContext().getKey()
);
RequestConstructor.addMetaHeader(request, args.getXHeaders(), protoToken);
RequestSigner.sign(request, getContext().getKey());
return request.build();
}
private void syncContainerWithNetwork(Container container, CallContext callContext) {
var settings = getContext().getFrostFSClient().getNetworkSettings(callContext);
if (nonNull(settings.getHomomorphicHashingDisabled()) && settings.getHomomorphicHashingDisabled()) {
container.getAttributes().put(DISABLE_HOMOMORPHIC_HASHING_ATTRIBUTE, Boolean.TRUE.toString());
} else {
container.getAttributes().remove(DISABLE_HOMOMORPHIC_HASHING_ATTRIBUTE, Boolean.TRUE.toString());
}
}
}

View file

@ -1,160 +0,0 @@
package info.frostfs.sdk.services.impl;
import frostfs.netmap.NetmapServiceGrpc;
import frostfs.netmap.Service;
import frostfs.netmap.Types;
import info.frostfs.sdk.dto.netmap.NetmapSnapshot;
import info.frostfs.sdk.dto.netmap.NodeInfo;
import info.frostfs.sdk.jdo.ClientEnvironment;
import info.frostfs.sdk.jdo.NetworkSettings;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.mappers.netmap.NetmapSnapshotMapper;
import info.frostfs.sdk.mappers.netmap.NodeInfoMapper;
import info.frostfs.sdk.services.ContextAccessor;
import info.frostfs.sdk.services.NetmapClient;
import info.frostfs.sdk.tools.RequestConstructor;
import info.frostfs.sdk.tools.Verifier;
import java.nio.charset.StandardCharsets;
import static info.frostfs.sdk.tools.RequestSigner.sign;
import static info.frostfs.sdk.utils.DeadLineUtil.deadLineAfter;
import static java.util.Objects.nonNull;
public class NetmapClientImpl extends ContextAccessor implements NetmapClient {
private final NetmapServiceGrpc.NetmapServiceBlockingStub netmapServiceClient;
public NetmapClientImpl(ClientEnvironment clientEnvironment) {
super(clientEnvironment);
this.netmapServiceClient = NetmapServiceGrpc.newBlockingStub(getContext().getChannel());
}
private static boolean getBoolValue(byte[] bytes) {
for (var byteValue : bytes) {
if (byteValue != 0) {
return true;
}
}
return false;
}
private static long getLongValue(byte[] bytes) {
long val = 0;
for (var i = bytes.length - 1; i >= 0; i--) {
val = (val << Byte.SIZE) + bytes[i];
}
return val;
}
private static void setNetworksParam(Types.NetworkConfig.Parameter param, NetworkSettings settings) {
var key = new String(param.getKey().toByteArray(), StandardCharsets.UTF_8);
var valueBytes = param.getValue().toByteArray();
switch (key) {
case "AuditFee":
settings.setAuditFee(getLongValue(valueBytes));
break;
case "BasicIncomeRate":
settings.setBasicIncomeRate(getLongValue(valueBytes));
break;
case "ContainerFee":
settings.setContainerFee(getLongValue(valueBytes));
break;
case "ContainerAliasFee":
settings.setContainerAliasFee(getLongValue(valueBytes));
break;
case "EpochDuration":
settings.setEpochDuration(getLongValue(valueBytes));
break;
case "InnerRingCandidateFee":
settings.setIRCandidateFee(getLongValue(valueBytes));
break;
case "MaxECDataCount":
settings.setMaxECDataCount(getLongValue(valueBytes));
break;
case "MaxECParityCount":
settings.setMaxECParityCount(getLongValue(valueBytes));
break;
case "MaxObjectSize":
settings.setMaxObjectSize(getLongValue(valueBytes));
break;
case "WithdrawFee":
settings.setWithdrawalFee(getLongValue(valueBytes));
break;
case "HomomorphicHashingDisabled":
settings.setHomomorphicHashingDisabled(getBoolValue(valueBytes));
break;
case "MaintenanceModeAllowed":
settings.setMaintenanceModeAllowed(getBoolValue(valueBytes));
break;
default:
settings.getUnnamedSettings().put(key, valueBytes);
break;
}
}
@Override
public NetworkSettings getNetworkSettings(CallContext ctx) {
if (nonNull(getContext().getNetworkSettings())) {
return getContext().getNetworkSettings();
}
var info = getNetworkInfo(ctx);
var settings = new NetworkSettings();
for (var param : info.getBody().getNetworkInfo().getNetworkConfig().getParametersList()) {
setNetworksParam(param, settings);
}
getContext().setNetworkSettings(settings);
return settings;
}
@Override
public NodeInfo getLocalNodeInfo(CallContext ctx) {
var request = Service.LocalNodeInfoRequest.newBuilder();
RequestConstructor.addMetaHeader(request);
sign(request, getContext().getKey());
var service = deadLineAfter(netmapServiceClient, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.localNodeInfo(request.build());
Verifier.checkResponse(response);
return NodeInfoMapper.toModel(response.getBody());
}
public Service.NetworkInfoResponse getNetworkInfo(CallContext ctx) {
var request = Service.NetworkInfoRequest.newBuilder();
RequestConstructor.addMetaHeader(request);
sign(request, getContext().getKey());
var service = deadLineAfter(netmapServiceClient, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.networkInfo(request.build());
Verifier.checkResponse(response);
return response;
}
@Override
public NetmapSnapshot getNetmapSnapshot(CallContext ctx) {
var request = Service.NetmapSnapshotRequest.newBuilder();
RequestConstructor.addMetaHeader(request);
sign(request, getContext().getKey());
var service = deadLineAfter(netmapServiceClient, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.netmapSnapshot(request.build());
Verifier.checkResponse(response);
return NetmapSnapshotMapper.toModel(response);
}
}

View file

@ -1,648 +0,0 @@
package info.frostfs.sdk.services.impl;
import com.google.common.collect.Iterables;
import com.google.protobuf.ByteString;
import frostfs.object.ObjectServiceGrpc;
import frostfs.object.Service;
import frostfs.refs.Types;
import info.frostfs.sdk.constants.AppConst;
import info.frostfs.sdk.dto.object.*;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.enums.ObjectType;
import info.frostfs.sdk.exceptions.ProcessFrostFSException;
import info.frostfs.sdk.jdo.ClientEnvironment;
import info.frostfs.sdk.jdo.PutObjectResult;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.jdo.parameters.object.*;
import info.frostfs.sdk.jdo.parameters.object.patch.PrmObjectPatch;
import info.frostfs.sdk.jdo.parameters.object.patch.PrmRangeGet;
import info.frostfs.sdk.jdo.parameters.object.patch.PrmRangeHashGet;
import info.frostfs.sdk.jdo.parameters.session.SessionContext;
import info.frostfs.sdk.jdo.result.ObjectHeaderResult;
import info.frostfs.sdk.mappers.container.ContainerIdMapper;
import info.frostfs.sdk.mappers.object.*;
import info.frostfs.sdk.mappers.object.patch.AddressMapper;
import info.frostfs.sdk.mappers.object.patch.RangeMapper;
import info.frostfs.sdk.services.ContextAccessor;
import info.frostfs.sdk.services.ObjectClient;
import info.frostfs.sdk.services.impl.rwhelper.*;
import info.frostfs.sdk.tools.RequestConstructor;
import info.frostfs.sdk.tools.Verifier;
import org.apache.commons.collections4.CollectionUtils;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import static info.frostfs.sdk.constants.ErrorConst.PROTO_MESSAGE_IS_EMPTY_TEMPLATE;
import static info.frostfs.sdk.tools.RequestSigner.sign;
import static info.frostfs.sdk.utils.DeadLineUtil.deadLineAfter;
import static info.frostfs.sdk.utils.Validator.validate;
import static java.util.Objects.isNull;
import static java.util.Objects.nonNull;
public class ObjectClientImpl extends ContextAccessor implements ObjectClient {
private final ObjectServiceGrpc.ObjectServiceBlockingStub objectServiceBlockingClient;
private final ObjectServiceGrpc.ObjectServiceStub objectServiceClient;
private final ObjectToolsImpl objectToolsImpl;
private final SessionToolsImpl sessionTools;
public ObjectClientImpl(ClientEnvironment clientEnvironment) {
super(clientEnvironment);
this.objectServiceBlockingClient = ObjectServiceGrpc.newBlockingStub(getContext().getChannel());
this.objectServiceClient = ObjectServiceGrpc.newStub(getContext().getChannel());
this.objectToolsImpl = new ObjectToolsImpl(clientEnvironment);
this.sessionTools = new SessionToolsImpl(clientEnvironment);
}
public SessionToken getOrCreateSession(SessionContext sessionContext, CallContext ctx) {
return isNull(sessionContext.getSessionToken())
? sessionTools.getOrCreateSession(getContext(), ctx)
: sessionContext.getSessionToken();
}
@Override
public ObjectHeaderResult getObjectHead(PrmObjectHeadGet args, CallContext ctx) {
validate(args);
var request = createHeadRequest(args, ctx);
var service = deadLineAfter(objectServiceBlockingClient, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.head(request);
Verifier.checkResponse(response);
return ObjectHeaderResult.builder()
.headerInfo(ObjectHeaderMapper.toModel(response.getBody().getHeader().getHeader()))
.splitInfo(SplitInfoMapper.toModel(response.getBody().getSplitInfo()))
.build();
}
@Override
public ObjectFrostFS getObject(PrmObjectGet args, CallContext ctx) {
validate(args);
var request = createGetRequest(args, ctx);
return getObject(request, ctx);
}
@Override
public void deleteObject(PrmObjectDelete args, CallContext ctx) {
validate(args);
var request = createDeleteRequest(args, ctx);
var service = deadLineAfter(objectServiceBlockingClient, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.delete(request);
Verifier.checkResponse(response);
}
@Override
public Iterable<ObjectId> searchObjects(PrmObjectSearch args, CallContext ctx) {
validate(args);
var request = createSearchRequest(args, ctx);
var objectsIds = searchObjects(request, ctx);
return Iterables.transform(objectsIds, input -> new ObjectId(input.getValue().toByteArray()));
}
@Override
public ObjectWriter putObject(PrmObjectPut args, CallContext ctx) {
validate(args);
return new ObjectWriter(getContext(), args, getUploadStream(args, ctx));
}
@Override
public ObjectId putClientCutObject(PrmObjectClientCutPut args, CallContext ctx) {
validate(args);
var header = args.getObjectHeader();
var fullLength = header.getPayloadLength() == 0 ? getStreamSize(args.getPayload()) : header.getPayloadLength();
args.getPutObjectContext().setFullLength(fullLength);
if (args.getPutObjectContext().getMaxObjectSizeCache() == 0) {
var networkSettings = getContext().getFrostFSClient().getNetworkSettings(ctx);
args.getPutObjectContext().setMaxObjectSizeCache(networkSettings.getMaxObjectSize().intValue());
}
var restBytes = fullLength - args.getPutObjectContext().getCurrentStreamPosition();
var objectSize = restBytes > 0
? Math.min(args.getPutObjectContext().getMaxObjectSizeCache(), restBytes)
: args.getPutObjectContext().getMaxObjectSizeCache();
//define collection capacity
var restPart = (restBytes % objectSize) > 0 ? 1 : 0;
var objectsCount = fullLength > 0 ? (int) (restBytes / objectSize) + restPart : 0;
List<ObjectId> sentObjectIds = new ArrayList<>(objectsCount);
// keep attributes for the large object
var attributes = args.getObjectHeader().getAttributes();
Split split = new Split();
args.getObjectHeader().setAttributes(new ArrayList<>());
// send all parts except the last one as separate Objects
while (restBytes > (long) args.getPutObjectContext().getMaxObjectSizeCache()) {
var previous = CollectionUtils.isNotEmpty(sentObjectIds)
? sentObjectIds.get(sentObjectIds.size() - 1)
: null;
split.setPrevious(previous);
args.getObjectHeader().setSplit(split);
var result = putMultipartStreamObject(args, ctx);
sentObjectIds.add(result.getObjectId());
restBytes -= result.getObjectSize();
}
// send the last part and create linkObject
if (CollectionUtils.isNotEmpty(sentObjectIds)) {
var largeObjectHeader = new ObjectHeader(
header.getContainerId(), ObjectType.REGULAR, attributes, fullLength, header.getVersion()
);
largeObjectHeader.setOwnerId(header.getOwnerId());
split.setParentHeader(largeObjectHeader);
var result = putMultipartStreamObject(args, ctx);
sentObjectIds.add(result.getObjectId());
var linkObject = new LinkObject(header.getContainerId(), split.getSplitId(), largeObjectHeader);
linkObject.addChildren(sentObjectIds);
putSingleObject(new PrmObjectSinglePut(linkObject), ctx);
return split.getParent();
}
// We are here if the payload is placed to one Object. It means no cut action, just simple PUT.
var singlePartResult = putMultipartStreamObject(args, ctx);
return singlePartResult.getObjectId();
}
@Override
public ObjectId putSingleObject(PrmObjectSinglePut args, CallContext ctx) {
var grpcObject = objectToolsImpl.createObject(args.getObjectFrostFS());
var request = createPutSingleRequest(grpcObject, args, ctx);
var service = deadLineAfter(objectServiceBlockingClient, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.putSingle(request);
Verifier.checkResponse(response);
return new ObjectId(grpcObject.getObjectId().getValue().toByteArray());
}
@Override
public RangeReader getRange(PrmRangeGet args, CallContext ctx) {
validate(args);
var request = createGetRangeRequest(args, ctx);
var service = deadLineAfter(objectServiceBlockingClient, ctx.getTimeout(), ctx.getTimeUnit());
return new RangeReader(service.getRange(request));
}
@Override
public byte[][] getRangeHash(PrmRangeHashGet args, CallContext ctx) {
validate(args);
var request = createGetRangeHashRequest(args, ctx);
var service = deadLineAfter(objectServiceBlockingClient, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.getRangeHash(request);
Verifier.checkResponse(response);
return response.getBody().getHashListList().stream().map(ByteString::toByteArray).toArray(byte[][]::new);
}
@Override
public ObjectId patchObject(PrmObjectPatch args, CallContext ctx) {
validate(args);
var request = createInitPatchRequest(args);
var protoToken = RequestConstructor.createObjectTokenContext(
getOrCreateSession(args, ctx),
request.getBody().getAddress(),
frostfs.session.Types.ObjectSessionContext.Verb.PATCH,
getContext().getKey()
);
var currentPos = args.getRange().getOffset();
var chunkSize = args.getMaxChunkLength();
byte[] chunkBuffer = new byte[chunkSize];
var service = deadLineAfter(objectServiceClient, ctx.getTimeout(), ctx.getTimeUnit());
PatchStreamer writer = new PatchStreamer(service);
var bytesCount = readNBytes(args.getPayload(), chunkBuffer, chunkSize);
while (bytesCount > 0) {
var range = Service.Range.newBuilder()
.setOffset(currentPos)
.setLength(bytesCount)
.build();
Service.PatchRequest.Body.Patch.newBuilder()
.setChunk(ByteString.copyFrom(chunkBuffer, 0, bytesCount))
.setSourceRange(range)
.build();
currentPos += bytesCount;
RequestConstructor.addMetaHeader(request, args.getXHeaders(), protoToken);
sign(request, getContext().getKey());
writer.write(request.build());
bytesCount = readNBytes(args.getPayload(), chunkBuffer, chunkSize);
}
var response = writer.complete();
Verifier.checkResponse(response);
return ObjectIdMapper.toModel(response.getBody().getObjectId());
}
private ObjectFrostFS getObject(Service.GetRequest request, CallContext ctx) {
var reader = getObjectInit(request, ctx);
var grpcObject = reader.readHeader();
var modelObject = ObjectFrostFSMapper.toModel(grpcObject);
modelObject.setObjectReader(reader);
return modelObject;
}
private ObjectReaderImpl getObjectInit(Service.GetRequest initRequest, CallContext ctx) {
if (initRequest.getSerializedSize() == 0) {
throw new ProcessFrostFSException(
String.format(PROTO_MESSAGE_IS_EMPTY_TEMPLATE, initRequest.getClass().getName())
);
}
var service = deadLineAfter(objectServiceBlockingClient, ctx.getTimeout(), ctx.getTimeUnit());
return new ObjectReaderImpl(service.get(initRequest));
}
private PutObjectResult putMultipartStreamObject(PrmObjectClientCutPut args, CallContext ctx) {
var chunkSize = args.getBufferMaxSize() > 0 ? args.getBufferMaxSize() : AppConst.OBJECT_CHUNK_SIZE;
var restBytes =
args.getPutObjectContext().getFullLength() - args.getPutObjectContext().getCurrentStreamPosition();
chunkSize = (int) Math.min(restBytes, chunkSize);
byte[] chunkBuffer = args.getCustomerBuffer() != null
? args.getCustomerBuffer()
: new byte[chunkSize];//todo change to pool
var sentBytes = 0;
// 0 means no limit from client, so server side cut is performed
var objectLimitSize = args.getPutObjectContext().getMaxObjectSizeCache();
var stream = getUploadStream(args, ctx);
while (objectLimitSize == 0 || sentBytes < objectLimitSize) {
// send chunks limited to default or user's settings
var bufferSize = objectLimitSize > 0 ? Math.min(objectLimitSize - sentBytes, chunkSize) : chunkSize;
var bytesCount = readNBytes(args.getPayload(), chunkBuffer, bufferSize);
if (bytesCount == 0) {
break;
}
sentBytes += bytesCount;
var body = Service.PutRequest.Body.newBuilder()
.setChunk(ByteString.copyFrom(chunkBuffer, 0, bytesCount))
.build();
var chunkRequest = Service.PutRequest.newBuilder()
.setBody(body)
.clearVerifyHeader();
RequestConstructor.addMetaHeader(chunkRequest, args.getXHeaders());
sign(chunkRequest, getContext().getKey());
stream.write(chunkRequest.build());
}
var response = stream.complete();
Verifier.checkResponse(response);
var objectId = new ObjectId(response.getBody().getObjectId().getValue().toByteArray());
return new PutObjectResult(objectId, sentBytes);
}
private ObjectStreamer getUploadStream(PrmObjectPutBase args, CallContext ctx) {
var header = args.getObjectHeader();
header.setOwnerId(getContext().getOwnerId());
header.setVersion(getContext().getVersion());
var grpcHeader = ObjectHeaderMapper.toGrpcMessage(header);
if (nonNull(header.getSplit())) {
grpcHeader = objectToolsImpl.updateSplitValues(grpcHeader, header.getSplit());
}
var initRequest = createInitPutRequest(grpcHeader, args, ctx);
return putObjectInit(initRequest, ctx);
}
private ObjectStreamer putObjectInit(Service.PutRequest initRequest, CallContext ctx) {
if (initRequest.getSerializedSize() == 0) {
throw new ProcessFrostFSException(
String.format(PROTO_MESSAGE_IS_EMPTY_TEMPLATE, initRequest.getClass().getName())
);
}
var service = deadLineAfter(objectServiceClient, ctx.getTimeout(), ctx.getTimeUnit());
ObjectStreamer writer = new ObjectStreamer(service);
writer.write(initRequest);
return writer;
}
private Iterable<Types.ObjectID> searchObjects(Service.SearchRequest request, CallContext ctx) {
var reader = getSearchReader(request, ctx);
var ids = reader.read();
List<Types.ObjectID> result = new ArrayList<>();
while (CollectionUtils.isNotEmpty(ids)) {
result.addAll(ids);
ids = reader.read();
}
return result;//todo return yield
}
private SearchReader getSearchReader(Service.SearchRequest initRequest, CallContext ctx) {
if (initRequest.getSerializedSize() == 0) {
throw new ProcessFrostFSException(
String.format(PROTO_MESSAGE_IS_EMPTY_TEMPLATE, initRequest.getClass().getName())
);
}
var service = deadLineAfter(objectServiceBlockingClient, ctx.getTimeout(), ctx.getTimeUnit());
return new SearchReader(service.search(initRequest));
}
private int readNBytes(InputStream inputStream, byte[] buffer, int size) {
try {
return inputStream.readNBytes(buffer, 0, size);
} catch (IOException exp) {
throw new ProcessFrostFSException(exp.getMessage());
}
}
private long getStreamSize(InputStream inputStream) {
try {
return inputStream.available();
} catch (IOException exp) {
throw new ProcessFrostFSException(exp.getMessage());
}
}
private Service.HeadRequest createHeadRequest(PrmObjectHeadGet args, CallContext ctx) {
var address = Types.Address.newBuilder()
.setContainerId(ContainerIdMapper.toGrpcMessage(args.getContainerId()))
.setObjectId(ObjectIdMapper.toGrpcMessage(args.getObjectId()))
.build();
var body = Service.HeadRequest.Body.newBuilder()
.setAddress(address)
.setRaw(args.isRaw())
.build();
var request = Service.HeadRequest.newBuilder()
.setBody(body);
var sessionToken = getOrCreateSession(args, ctx);
var protoToken = RequestConstructor.createObjectTokenContext(
sessionToken,
address,
frostfs.session.Types.ObjectSessionContext.Verb.HEAD,
getContext().getKey()
);
RequestConstructor.addMetaHeader(request, args.getXHeaders(), protoToken);
sign(request, getContext().getKey());
return request.build();
}
private Service.GetRequest createGetRequest(PrmObjectGet args, CallContext ctx) {
var address = Types.Address.newBuilder()
.setContainerId(ContainerIdMapper.toGrpcMessage(args.getContainerId()))
.setObjectId(ObjectIdMapper.toGrpcMessage(args.getObjectId()))
.build();
var body = Service.GetRequest.Body.newBuilder()
.setAddress(address)
.build();
var request = Service.GetRequest.newBuilder()
.setBody(body);
var sessionToken = getOrCreateSession(args, ctx);
var protoToken = RequestConstructor.createObjectTokenContext(
sessionToken,
address,
frostfs.session.Types.ObjectSessionContext.Verb.GET,
getContext().getKey()
);
RequestConstructor.addMetaHeader(request, args.getXHeaders(), protoToken);
sign(request, getContext().getKey());
return request.build();
}
private Service.DeleteRequest createDeleteRequest(PrmObjectDelete args, CallContext ctx) {
var address = Types.Address.newBuilder()
.setContainerId(ContainerIdMapper.toGrpcMessage(args.getContainerId()))
.setObjectId(ObjectIdMapper.toGrpcMessage(args.getObjectId()))
.build();
var body = Service.DeleteRequest.Body.newBuilder()
.setAddress(address)
.build();
var request = Service.DeleteRequest.newBuilder()
.setBody(body);
var sessionToken = getOrCreateSession(args, ctx);
var protoToken = RequestConstructor.createObjectTokenContext(
sessionToken,
address,
frostfs.session.Types.ObjectSessionContext.Verb.DELETE,
getContext().getKey()
);
RequestConstructor.addMetaHeader(request, args.getXHeaders(), protoToken);
sign(request, getContext().getKey());
return request.build();
}
private Service.SearchRequest createSearchRequest(PrmObjectSearch args, CallContext ctx) {
var cid = ContainerIdMapper.toGrpcMessage(args.getContainerId());
var address = Types.Address.newBuilder()
.setContainerId(cid)
.build();
var body = Service.SearchRequest.Body.newBuilder()
.setContainerId(cid)
.setVersion(1);// TODO: clarify this param
for (ObjectFilter<?> filter : args.getFilters()) {
body.addFilters(ObjectFilterMapper.toGrpcMessage(filter));
}
var request = Service.SearchRequest.newBuilder()
.setBody(body.build());
var sessionToken = getOrCreateSession(args, ctx);
var protoToken = RequestConstructor.createObjectTokenContext(
sessionToken,
address,
frostfs.session.Types.ObjectSessionContext.Verb.SEARCH,
getContext().getKey()
);
RequestConstructor.addMetaHeader(request, args.getXHeaders(), protoToken);
sign(request, getContext().getKey());
return request.build();
}
private Service.PutRequest createInitPutRequest(frostfs.object.Types.Header header,
PrmObjectPutBase args,
CallContext ctx) {
var address = Types.Address.newBuilder()
.setContainerId(header.getContainerId())
.build();
var init = Service.PutRequest.Body.Init.newBuilder()
.setHeader(header)
.build();
var body = Service.PutRequest.Body.newBuilder()
.setInit(init)
.build();
var request = Service.PutRequest.newBuilder()
.setBody(body);
var sessionToken = getOrCreateSession(args, ctx);
var protoToken = RequestConstructor.createObjectTokenContext(
sessionToken,
address,
frostfs.session.Types.ObjectSessionContext.Verb.PUT,
getContext().getKey()
);
RequestConstructor.addMetaHeader(request, args.getXHeaders(), protoToken);
sign(request, getContext().getKey());
return request.build();
}
private Service.PutSingleRequest createPutSingleRequest(frostfs.object.Types.Object grpcObject,
PrmObjectSinglePut args,
CallContext ctx) {
var address = Types.Address.newBuilder()
.setContainerId(grpcObject.getHeader().getContainerId())
.build();
var body = Service.PutSingleRequest.Body.newBuilder()
.setObject(grpcObject)
.build();
var request = Service.PutSingleRequest.newBuilder()
.setBody(body);
var sessionToken = getOrCreateSession(args, ctx);
var protoToken = RequestConstructor.createObjectTokenContext(
sessionToken,
address,
frostfs.session.Types.ObjectSessionContext.Verb.PUT,
getContext().getKey()
);
RequestConstructor.addMetaHeader(request, args.getXHeaders(), protoToken);
sign(request, getContext().getKey());
return request.build();
}
private Service.GetRangeRequest createGetRangeRequest(PrmRangeGet args, CallContext ctx) {
var address = Types.Address.newBuilder()
.setContainerId(ContainerIdMapper.toGrpcMessage(args.getContainerId()))
.setObjectId(ObjectIdMapper.toGrpcMessage(args.getObjectId()))
.build();
var body = Service.GetRangeRequest.Body.newBuilder()
.setAddress(address)
.setRange(RangeMapper.toGrpcMessage(args.getRange()))
.setRaw(args.isRaw())
.build();
var request = Service.GetRangeRequest.newBuilder()
.setBody(body);
var sessionToken = getOrCreateSession(args, ctx);
var protoToken = RequestConstructor.createObjectTokenContext(
sessionToken,
address,
frostfs.session.Types.ObjectSessionContext.Verb.RANGE,
getContext().getKey()
);
RequestConstructor.addMetaHeader(request, args.getXHeaders(), protoToken);
sign(request, getContext().getKey());
return request.build();
}
private Service.GetRangeHashRequest createGetRangeHashRequest(PrmRangeHashGet args, CallContext ctx) {
var address = Types.Address.newBuilder()
.setContainerId(ContainerIdMapper.toGrpcMessage(args.getContainerId()))
.setObjectId(ObjectIdMapper.toGrpcMessage(args.getObjectId()))
.build();
var body = Service.GetRangeHashRequest.Body.newBuilder()
.setAddress(address)
.setType(Types.ChecksumType.SHA256)
.setSalt(ByteString.copyFrom(args.getSalt()))
.addAllRanges(RangeMapper.toGrpcMessages(args.getRanges()))
.build();
var request = Service.GetRangeHashRequest.newBuilder()
.setBody(body);
var sessionToken = getOrCreateSession(args, ctx);
var protoToken = RequestConstructor.createObjectTokenContext(
sessionToken,
address,
frostfs.session.Types.ObjectSessionContext.Verb.RANGEHASH,
getContext().getKey()
);
RequestConstructor.addMetaHeader(request, args.getXHeaders(), protoToken);
sign(request, getContext().getKey());
return request.build();
}
private Service.PatchRequest.Builder createInitPatchRequest(PrmObjectPatch args) {
var address = AddressMapper.toGrpcMessage(args.getAddress());
var body = Service.PatchRequest.Body.newBuilder()
.setAddress(address)
.setReplaceAttributes(args.isReplaceAttributes())
.addAllNewAttributes(ObjectAttributeMapper.toGrpcMessages(args.getNewAttributes()))
.build();
return Service.PatchRequest.newBuilder()
.setBody(body);
}
}

View file

@ -1,126 +0,0 @@
package info.frostfs.sdk.services.impl;
import com.google.protobuf.ByteString;
import frostfs.object.Types;
import info.frostfs.sdk.dto.object.ObjectFrostFS;
import info.frostfs.sdk.dto.object.ObjectHeader;
import info.frostfs.sdk.dto.object.ObjectId;
import info.frostfs.sdk.dto.object.Split;
import info.frostfs.sdk.jdo.ClientEnvironment;
import info.frostfs.sdk.mappers.netmap.VersionMapper;
import info.frostfs.sdk.mappers.object.ObjectHeaderMapper;
import info.frostfs.sdk.mappers.object.ObjectIdMapper;
import info.frostfs.sdk.mappers.object.OwnerIdMapper;
import info.frostfs.sdk.services.ContextAccessor;
import info.frostfs.sdk.services.ToolsClient;
import org.apache.commons.collections4.CollectionUtils;
import java.util.stream.Collectors;
import static info.frostfs.sdk.Helper.getSha256;
import static info.frostfs.sdk.tools.RequestSigner.signData;
import static java.util.Objects.isNull;
import static java.util.Objects.nonNull;
public class ObjectToolsImpl extends ContextAccessor implements ToolsClient {
public ObjectToolsImpl(ClientEnvironment context) {
super(context);
}
private static frostfs.refs.Types.Checksum sha256Checksum(byte[] data) {
return frostfs.refs.Types.Checksum.newBuilder()
.setType(frostfs.refs.Types.ChecksumType.SHA256)
.setSum(ByteString.copyFrom(getSha256(data)))
.build();
}
@Override
public ObjectId calculateObjectId(ObjectHeader header) {
var grpcHeader = createHeader(header, new byte[]{});
if (nonNull(header.getSplit())) {
grpcHeader = updateSplitValues(grpcHeader, header.getSplit());
}
return ObjectIdMapper.toModel(
frostfs.refs.Types.ObjectID.newBuilder().setValue(getSha256(grpcHeader)).build()
);
}
public Types.Object createObject(ObjectFrostFS objectFrostFS) {
objectFrostFS.getHeader().setOwnerId(getContext().getOwnerId());
objectFrostFS.getHeader().setVersion(getContext().getVersion());
objectFrostFS.getHeader().setPayloadLength(objectFrostFS.getPayload().length);
var grpcHeader = ObjectHeaderMapper.toGrpcMessage(objectFrostFS.getHeader()).toBuilder()
.setPayloadHash(sha256Checksum(objectFrostFS.getPayload()))
.build();
var split = objectFrostFS.getHeader().getSplit();
if (nonNull(split)) {
grpcHeader = updateSplitValues(grpcHeader, split);
}
var objectId = frostfs.refs.Types.ObjectID.newBuilder().setValue(getSha256(grpcHeader)).build();
var sig = frostfs.refs.Types.Signature.newBuilder()
.setKey(ByteString.copyFrom(getContext().getKey().getPublicKeyByte()))
.setSign(ByteString.copyFrom(signData(getContext().getKey(), objectId.toByteArray())));
return Types.Object.newBuilder()
.setHeader(grpcHeader)
.setObjectId(objectId)
.setPayload(ByteString.copyFrom(objectFrostFS.getPayload()))
.setSignature(sig)
.build();
}
public Types.Header updateSplitValues(Types.Header grpcHeader, Split split) {
if (isNull(split)) {
return grpcHeader;
}
var grpcSplit = grpcHeader.getSplit().toBuilder()
.setSplitId(ByteString.copyFrom(split.getSplitId().toBinary()));
if (CollectionUtils.isNotEmpty(split.getChildren())) {
var grpcChildren = split.getChildren().stream()
.map(ObjectIdMapper::toGrpcMessage)
.collect(Collectors.toList());
grpcSplit.addAllChildren(grpcChildren);
}
if (nonNull(split.getParentHeader())) {
var grpcParentHeader = createHeader(split.getParentHeader(), new byte[]{});
var parentObjectId = frostfs.refs.Types.ObjectID.newBuilder().setValue(getSha256(grpcParentHeader)).build();
var signature = frostfs.refs.Types.Signature.newBuilder()
.setKey(ByteString.copyFrom(getContext().getKey().getPublicKeyByte()))
.setSign(ByteString.copyFrom(signData(getContext().getKey(), parentObjectId.toByteArray())))
.build();
grpcSplit
.setParent(parentObjectId)
.setParentHeader(grpcParentHeader)
.setParentSignature(signature);
split.setParent(ObjectIdMapper.toModel(parentObjectId));
}
if (nonNull(split.getPrevious())) {
grpcSplit.setPrevious(ObjectIdMapper.toGrpcMessage(split.getPrevious())).build();
}
return grpcHeader.toBuilder().setSplit(grpcSplit.build()).build();
}
private Types.Header createHeader(ObjectHeader header, byte[] payload) {
var grpcHeader = ObjectHeaderMapper.toGrpcMessage(header).toBuilder()
.setOwnerId(OwnerIdMapper.toGrpcMessage(getContext().getOwnerId()))
.setVersion(VersionMapper.toGrpcMessage(getContext().getVersion()));
if (payload != null) {
grpcHeader.setPayloadHash(sha256Checksum(payload));
}
return grpcHeader.build();
}
}

View file

@ -1,72 +0,0 @@
package info.frostfs.sdk.services.impl;
import frostfs.session.Service;
import frostfs.session.SessionServiceGrpc;
import frostfs.session.Types;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.jdo.ClientEnvironment;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.jdo.parameters.session.PrmSessionCreate;
import info.frostfs.sdk.mappers.object.OwnerIdMapper;
import info.frostfs.sdk.mappers.session.SessionMapper;
import info.frostfs.sdk.services.ContextAccessor;
import info.frostfs.sdk.services.SessionClient;
import info.frostfs.sdk.tools.RequestConstructor;
import info.frostfs.sdk.tools.Verifier;
import static info.frostfs.sdk.tools.RequestSigner.sign;
import static info.frostfs.sdk.utils.DeadLineUtil.deadLineAfter;
public class SessionClientImpl extends ContextAccessor implements SessionClient {
private final SessionServiceGrpc.SessionServiceBlockingStub serviceBlockingStub;
public SessionClientImpl(ClientEnvironment clientEnvironment) {
super(clientEnvironment);
this.serviceBlockingStub = SessionServiceGrpc.newBlockingStub(getContext().getChannel());
}
@Override
public SessionToken createSession(PrmSessionCreate args, CallContext ctx) {
var sessionToken = createSessionInternal(args, ctx);
var token = SessionMapper.serialize(sessionToken);
return new SessionToken(token);
}
public Types.SessionToken createSessionInternal(PrmSessionCreate args, CallContext ctx) {
var body = Service.CreateRequest.Body.newBuilder()
.setOwnerId(OwnerIdMapper.toGrpcMessage(getContext().getOwnerId()))
.setExpiration(args.getExpiration())
.build();
var request = Service.CreateRequest.newBuilder()
.setBody(body);
RequestConstructor.addMetaHeader(request);
sign(request, getContext().getKey());
return createSession(request.build(), ctx);
}
private Types.SessionToken createSession(Service.CreateRequest request, CallContext ctx) {
var service = deadLineAfter(serviceBlockingStub, ctx.getTimeout(), ctx.getTimeUnit());
var response = service.create(request);
Verifier.checkResponse(response);
var lifetime = Types.SessionToken.Body.TokenLifetime.newBuilder()
.setExp(request.getBody().getExpiration())
.setIat(response.getMetaHeader().getEpoch())
.setNbf(response.getMetaHeader().getEpoch())
.build();
var body = Types.SessionToken.Body.newBuilder()
.setId(response.getBody().getId())
.setSessionKey(response.getBody().getSessionKey())
.setOwnerId(request.getBody().getOwnerId())
.setLifetime(lifetime)
.build();
return Types.SessionToken.newBuilder()
.setBody(body)
.build();
}
}

View file

@ -1,35 +0,0 @@
package info.frostfs.sdk.services.impl;
import info.frostfs.sdk.dto.session.SessionToken;
import info.frostfs.sdk.exceptions.FrostFSException;
import info.frostfs.sdk.jdo.ClientEnvironment;
import info.frostfs.sdk.jdo.parameters.CallContext;
import info.frostfs.sdk.jdo.parameters.session.PrmSessionCreate;
import info.frostfs.sdk.services.ContextAccessor;
import info.frostfs.sdk.services.SessionTools;
import static info.frostfs.sdk.constants.ErrorConst.SESSION_CREATE_FAILED;
import static java.util.Objects.isNull;
public class SessionToolsImpl extends ContextAccessor implements SessionTools {
public SessionToolsImpl(ClientEnvironment clientEnvironment) {
super(clientEnvironment);
}
@Override
public SessionToken getOrCreateSession(ClientEnvironment env, CallContext ctx) {
var token = env.getSessionCache().tryGetValue(env.getSessionKey());
if (isNull(token)) {
token = env.getFrostFSClient().createSession(new PrmSessionCreate(-1), ctx);
if (isNull(token)) {
throw new FrostFSException(SESSION_CREATE_FAILED);
}
env.getSessionCache().setValue(env.getSessionKey(), token);
}
return token;
}
}

View file

@ -1,174 +0,0 @@
package info.frostfs.sdk.services.impl.interceptor;
import io.grpc.Metadata;
import io.grpc.Status;
import io.prometheus.client.CollectorRegistry;
import io.prometheus.client.Counter;
import io.prometheus.client.Histogram;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import static info.frostfs.sdk.services.impl.interceptor.Labels.*;
public class ClientMetrics {
private static final List<String> defaultRequestLabels =
Arrays.asList("grpc_target", "grpc_type", "grpc_service", "grpc_method");
private static final List<String> defaultResponseLabels =
Arrays.asList("grpc_target", "grpc_type", "grpc_service", "grpc_method", "code", "grpc_code");
private static final Counter.Builder rpcStartedBuilder =
Counter.build()
.namespace("grpc")
.subsystem("client")
.name("started")
.help("Total number of RPCs started on the client.");
private static final Counter.Builder rpcCompletedBuilder =
Counter.build()
.namespace("grpc")
.subsystem("client")
.name("completed")
.help("Total number of RPCs completed on the client, regardless of success or failure.");
private static final Histogram.Builder completedLatencySecondsBuilder =
Histogram.build()
.namespace("grpc")
.subsystem("client")
.name("completed_latency_seconds")
.help("Histogram of rpc response latency (in seconds) for completed rpcs.");
private static final Counter.Builder streamMessagesReceivedBuilder =
Counter.build()
.namespace("grpc")
.subsystem("client")
.name("msg_received")
.help("Total number of stream messages received from the server.");
private static final Counter.Builder streamMessagesSentBuilder =
Counter.build()
.namespace("grpc")
.subsystem("client")
.name("msg_sent")
.help("Total number of stream messages sent by the client.");
private final List<Metadata.Key<String>> labelHeaderKeys;
private final Counter rpcStarted;
private final Counter rpcCompleted;
private final Counter streamMessagesReceived;
private final Counter streamMessagesSent;
private final Optional<Histogram> completedLatencySeconds;
private final GrpcMethod method;
private ClientMetrics(
List<Metadata.Key<String>> labelHeaderKeys,
GrpcMethod method,
Counter rpcStarted,
Counter rpcCompleted,
Counter streamMessagesReceived,
Counter streamMessagesSent,
Optional<Histogram> completedLatencySeconds) {
this.labelHeaderKeys = labelHeaderKeys;
this.method = method;
this.rpcStarted = rpcStarted;
this.rpcCompleted = rpcCompleted;
this.streamMessagesReceived = streamMessagesReceived;
this.streamMessagesSent = streamMessagesSent;
this.completedLatencySeconds = completedLatencySeconds;
}
public void recordCallStarted(Metadata metadata) {
addLabels(rpcStarted, customLabels(metadata, labelHeaderKeys), method).inc();
}
public void recordClientHandled(Status.Code code, Metadata metadata) {
List<String> allLabels = new ArrayList<>();
allLabels.add(code.toString());
allLabels.add(code.toString());
allLabels.addAll(customLabels(metadata, labelHeaderKeys));
addLabels(rpcCompleted, allLabels, method).inc();
}
public void recordStreamMessageSent(Metadata metadata) {
addLabels(streamMessagesSent, customLabels(metadata, labelHeaderKeys), method).inc();
}
public void recordStreamMessageReceived(Metadata metadata) {
addLabels(streamMessagesReceived, customLabels(metadata, labelHeaderKeys), method).inc();
}
/**
* Only has any effect if monitoring is configured to include latency histograms. Otherwise, this
* does nothing.
*/
public void recordLatency(double latencySec, Metadata metadata) {
if (completedLatencySeconds.isEmpty()) {
return;
}
addLabels(completedLatencySeconds.get(), customLabels(metadata, labelHeaderKeys), method)
.observe(latencySec);
}
/**
* Knows how to produce {@link ClientMetrics} instances for individual methods.
*/
static class Factory {
private final List<Metadata.Key<String>> labelHeaderKeys;
private final Counter rpcStarted;
private final Counter rpcCompleted;
private final Counter streamMessagesReceived;
private final Counter streamMessagesSent;
private final Optional<Histogram> completedLatencySeconds;
Factory(Configuration configuration) {
CollectorRegistry registry = configuration.getCollectorRegistry();
this.labelHeaderKeys = metadataKeys(configuration.getLabelHeaders());
this.rpcStarted =
rpcStartedBuilder
.labelNames(asArray(defaultRequestLabels, configuration.getSanitizedLabelHeaders()))
.register(registry);
this.rpcCompleted =
rpcCompletedBuilder
.labelNames(asArray(defaultResponseLabels, configuration.getSanitizedLabelHeaders()))
.register(registry);
this.streamMessagesReceived =
streamMessagesReceivedBuilder
.labelNames(asArray(defaultRequestLabels, configuration.getSanitizedLabelHeaders()))
.register(registry);
this.streamMessagesSent =
streamMessagesSentBuilder
.labelNames(asArray(defaultRequestLabels, configuration.getSanitizedLabelHeaders()))
.register(registry);
if (configuration.isIncludeLatencyHistograms()) {
this.completedLatencySeconds =
Optional.of(
ClientMetrics.completedLatencySecondsBuilder
.buckets(configuration.getLatencyBuckets())
.labelNames(
asArray(defaultRequestLabels, configuration.getSanitizedLabelHeaders()))
.register(registry));
} else {
this.completedLatencySeconds = Optional.empty();
}
}
/**
* Creates a {@link ClientMetrics} for the supplied gRPC method.
*/
ClientMetrics createMetricsForMethod(GrpcMethod grpcMethod) {
return new ClientMetrics(
labelHeaderKeys,
grpcMethod,
rpcStarted,
rpcCompleted,
streamMessagesReceived,
streamMessagesSent,
completedLatencySeconds);
}
}
}

View file

@ -1,168 +0,0 @@
package info.frostfs.sdk.services.impl.interceptor;
import io.prometheus.client.CollectorRegistry;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
public class Configuration {
private static final double[] DEFAULT_LATENCY_BUCKETS =
new double[]{.001, .005, .01, .05, 0.075, .1, .25, .5, 1, 2, 5, 10};
private final boolean isIncludeLatencyHistograms;
private final CollectorRegistry collectorRegistry;
private final double[] latencyBuckets;
private final List<String> labelHeaders;
private final boolean isAddCodeLabelToHistograms;
private Configuration(
boolean isIncludeLatencyHistograms,
CollectorRegistry collectorRegistry,
double[] latencyBuckets,
List<String> labelHeaders,
boolean isAddCodeLabelToHistograms) {
this.isIncludeLatencyHistograms = isIncludeLatencyHistograms;
this.collectorRegistry = collectorRegistry;
this.latencyBuckets = latencyBuckets;
this.labelHeaders = labelHeaders;
this.isAddCodeLabelToHistograms = isAddCodeLabelToHistograms;
}
/**
* Returns a {@link Configuration} for recording all cheap metrics about the rpcs.
*/
public static Configuration cheapMetricsOnly() {
return new Configuration(
false /* isIncludeLatencyHistograms */,
CollectorRegistry.defaultRegistry,
DEFAULT_LATENCY_BUCKETS,
new ArrayList<>(),
false /* isAddCodeLabelToHistograms */);
}
/**
* Returns a {@link Configuration} for recording all metrics about the rpcs. This includes metrics
* which might produce a lot of data, such as latency histograms.
*/
public static Configuration allMetrics() {
return new Configuration(
true /* isIncludeLatencyHistograms */,
CollectorRegistry.defaultRegistry,
DEFAULT_LATENCY_BUCKETS,
new ArrayList<>(),
false);
}
/**
* Returns a copy {@link Configuration} with the difference that Prometheus metrics are recorded
* using the supplied {@link CollectorRegistry}.
*/
public Configuration withCollectorRegistry(CollectorRegistry collectorRegistry) {
return new Configuration(
isIncludeLatencyHistograms,
collectorRegistry,
latencyBuckets,
labelHeaders,
isAddCodeLabelToHistograms);
}
/**
* Returns a copy {@link Configuration} with the difference that the latency histogram values are
* recorded with the specified set of buckets.
*/
public Configuration withLatencyBuckets(double[] buckets) {
return new Configuration(
isIncludeLatencyHistograms,
collectorRegistry,
buckets,
labelHeaders,
isAddCodeLabelToHistograms);
}
/**
* Returns a copy {@link Configuration} that recognizes the given list of header names and uses
* their value from each request as prometheus labels.
*
* <p>Since hyphens is a common character in header names, and since Prometheus does not allow
* hyphens in label names, All hyphens in the list of header names will be converted to
* underscores before being added as metric label names.
*
* <p>If one of the headers added here is absent in one of the requests, its metric value for that
* request will be an empty string.
*
* <p>Example: {@code withLabelHeaders(Arrays.asList("User-Agent"))} will make all metrics carry a
* label "User_Agent", with label value filled in from the value of the "User-Agent" header of
* each request.
*/
public Configuration withLabelHeaders(List<String> headers) {
List<String> newHeaders = new ArrayList<>(labelHeaders);
newHeaders.addAll(headers);
return new Configuration(
isIncludeLatencyHistograms,
collectorRegistry,
latencyBuckets,
newHeaders,
isAddCodeLabelToHistograms);
}
/**
* Returns a copy {@link Configuration} with the difference that status code label will be added
* to latency histogram. If latency histogram itself is disabled, this takes no effect. Warning:
* this will increase the number of histograms by a factor of actually happened codes (up to
* {@link io.grpc.Status.Code} values count), which could lead to additional local memory usage
* and load on prometheus (storage and memory usage, query-time complexity)
*/
public Configuration withCodeLabelInLatencyHistogram() {
return new Configuration(
isIncludeLatencyHistograms,
collectorRegistry,
latencyBuckets,
labelHeaders,
true /* isAddCodeLabelToHistograms */);
}
/**
* Returns whether or not latency histograms for calls should be included.
*/
public boolean isIncludeLatencyHistograms() {
return isIncludeLatencyHistograms;
}
/**
* Returns the {@link CollectorRegistry} used to record stats.
*/
public CollectorRegistry getCollectorRegistry() {
return collectorRegistry;
}
/**
* Returns the histogram buckets to use for latency metrics.
*/
public double[] getLatencyBuckets() {
return latencyBuckets;
}
/**
* Returns the configured list of headers to be used as labels.
*/
public List<String> getLabelHeaders() {
return labelHeaders;
}
/**
* Returns whether or not status code label should be added to latency histogram.
*/
public boolean isAddCodeLabelToHistograms() {
return isAddCodeLabelToHistograms;
}
/**
* Returns the sanitized version of the label headers, after turning all hyphens to underscores.
*/
public List<String> getSanitizedLabelHeaders() {
return labelHeaders.stream().map(h -> h.replaceAll("-", "_")).collect(Collectors.toList());
}
}

View file

@ -1,51 +0,0 @@
package info.frostfs.sdk.services.impl.interceptor;
import io.grpc.Channel;
import io.grpc.MethodDescriptor;
import io.grpc.MethodDescriptor.MethodType;
public class GrpcMethod {
private final String targetName;
private final String serviceName;
private final String methodName;
private final MethodType type;
private GrpcMethod(String targetName, String serviceName, String methodName, MethodType type) {
this.targetName = targetName;
this.serviceName = serviceName;
this.methodName = methodName;
this.type = type;
}
static GrpcMethod of(MethodDescriptor<?, ?> method, Channel channel) {
String serviceName = MethodDescriptor.extractFullServiceName(method.getFullMethodName());
// Full method names are of the form: "full.serviceName/MethodName". We extract the last part.
String methodName = method.getFullMethodName().substring(serviceName.length() + 1);
return new GrpcMethod(channel.authority(), serviceName, methodName, method.getType());
}
String targetName() {
return targetName;
}
String serviceName() {
return serviceName;
}
String methodName() {
return methodName;
}
String type() {
return type.toString();
}
boolean streamsRequests() {
return type == MethodType.CLIENT_STREAMING || type == MethodType.BIDI_STREAMING;
}
boolean streamsResponses() {
return type == MethodType.SERVER_STREAMING || type == MethodType.BIDI_STREAMING;
}
}

View file

@ -1,63 +0,0 @@
package info.frostfs.sdk.services.impl.interceptor;
import io.grpc.Metadata;
import io.grpc.Metadata.Key;
import io.prometheus.client.SimpleCollector;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
public class Labels {
private Labels() {
}
/**
* Merges two string lists into an array, maintaining order of first list then second list.
*/
static String[] asArray(List<String> firstList, List<String> secondList) {
List<String> list = new ArrayList<>(firstList);
list.addAll(secondList);
return list.toArray(new String[0]);
}
/**
* Converts a list of strings to a list of grpc metadata keys.
*/
static List<Key<String>> metadataKeys(List<String> headerNames) {
List<Key<String>> keys = new ArrayList<>();
for (String name : headerNames) {
keys.add(Key.of(name, Metadata.ASCII_STRING_MARSHALLER));
}
return Collections.unmodifiableList(keys);
}
/**
* Returns the ordered list of custom label values, by looking into metadata for values of
* selected custom headers.
*/
static List<String> customLabels(Metadata metadata, List<Key<String>> labelHeaderKeys) {
List<String> labels = new ArrayList<>();
for (Key<String> key : labelHeaderKeys) {
if (metadata.containsKey(key)) {
labels.add(metadata.get(key));
} else {
labels.add("");
}
}
return Collections.unmodifiableList(labels);
}
/**
* Adds standard labels, as well as custom ones, in order, to a given collector.
*/
static <T> T addLabels(SimpleCollector<T> collector, List<String> labels, GrpcMethod method) {
List<String> allLabels = new ArrayList<>();
allLabels.add(method.targetName());
allLabels.add(method.type());
allLabels.add(method.serviceName());
allLabels.add(method.methodName());
allLabels.addAll(labels);
return collector.labels(allLabels.toArray(new String[0]));
}
}

View file

@ -1,47 +0,0 @@
package info.frostfs.sdk.services.impl.interceptor;
import io.grpc.ClientCall;
import io.grpc.ForwardingClientCall;
import io.grpc.Metadata;
import java.time.Clock;
public class MonitoringClientCall<R, S> extends ForwardingClientCall.SimpleForwardingClientCall<R, S> {
private final ClientMetrics clientMetrics;
private final GrpcMethod grpcMethod;
private final Configuration configuration;
private final Clock clock;
private Metadata requestMetadata;
MonitoringClientCall(
ClientCall<R, S> delegate,
ClientMetrics clientMetrics,
GrpcMethod grpcMethod,
Configuration configuration,
Clock clock) {
super(delegate);
this.clientMetrics = clientMetrics;
this.grpcMethod = grpcMethod;
this.configuration = configuration;
this.clock = clock;
}
@Override
public void start(Listener<S> delegate, Metadata metadata) {
this.requestMetadata = metadata;
clientMetrics.recordCallStarted(metadata);
super.start(
new MonitoringClientCallListener<>(
delegate, clientMetrics, grpcMethod, configuration, clock, metadata),
metadata);
}
@Override
public void sendMessage(R requestMessage) {
if (grpcMethod.streamsRequests()) {
clientMetrics.recordStreamMessageSent(
requestMetadata == null ? new Metadata() : requestMetadata);
}
super.sendMessage(requestMessage);
}
}

View file

@ -1,61 +0,0 @@
package info.frostfs.sdk.services.impl.interceptor;
import io.grpc.ClientCall;
import io.grpc.ForwardingClientCallListener;
import io.grpc.Metadata;
import io.grpc.Status;
import java.time.Clock;
import java.time.Instant;
public class MonitoringClientCallListener<S> extends ForwardingClientCallListener<S> {
private static final long MILLIS_PER_SECOND = 1000L;
private final ClientCall.Listener<S> delegate;
private final ClientMetrics clientMetrics;
private final GrpcMethod grpcMethod;
private final Configuration configuration;
private final Clock clock;
private final Instant startInstant;
private final Metadata requestMetadata;
MonitoringClientCallListener(
ClientCall.Listener<S> delegate,
ClientMetrics clientMetrics,
GrpcMethod grpcMethod,
Configuration configuration,
Clock clock,
Metadata requestMetadata) {
this.delegate = delegate;
this.clientMetrics = clientMetrics;
this.grpcMethod = grpcMethod;
this.configuration = configuration;
this.clock = clock;
this.startInstant = clock.instant();
this.requestMetadata = requestMetadata;
}
@Override
protected ClientCall.Listener<S> delegate() {
return delegate;
}
@Override
public void onClose(Status status, Metadata metadata) {
clientMetrics.recordClientHandled(status.getCode(), requestMetadata);
if (configuration.isIncludeLatencyHistograms()) {
double latencySec =
(clock.millis() - startInstant.toEpochMilli()) / (double) MILLIS_PER_SECOND;
clientMetrics.recordLatency(latencySec, requestMetadata);
}
super.onClose(status, metadata);
}
@Override
public void onMessage(S responseMessage) {
if (grpcMethod.streamsResponses()) {
clientMetrics.recordStreamMessageReceived(requestMetadata);
}
super.onMessage(responseMessage);
}
}

View file

@ -1,33 +0,0 @@
package info.frostfs.sdk.services.impl.interceptor;
import io.grpc.*;
import java.time.Clock;
public class MonitoringClientInterceptor implements ClientInterceptor {
private final Clock clock;
private final Configuration configuration;
private final ClientMetrics.Factory clientMetricsFactory;
private MonitoringClientInterceptor(
Clock clock, Configuration configuration, ClientMetrics.Factory clientMetricsFactory) {
this.clock = clock;
this.configuration = configuration;
this.clientMetricsFactory = clientMetricsFactory;
}
public static MonitoringClientInterceptor create(Configuration configuration) {
return new MonitoringClientInterceptor(
Clock.systemDefaultZone(), configuration, new ClientMetrics.Factory(configuration));
}
@Override
public <R, S> ClientCall<R, S> interceptCall(
MethodDescriptor<R, S> methodDescriptor, CallOptions callOptions, Channel channel) {
GrpcMethod grpcMethod = GrpcMethod.of(methodDescriptor, channel);
ClientMetrics metrics = clientMetricsFactory.createMetricsForMethod(grpcMethod);
return new MonitoringClientCall<>(
channel.newCall(methodDescriptor, callOptions), metrics, grpcMethod, configuration, clock);
}
}

View file

@ -1,65 +0,0 @@
package info.frostfs.sdk.services.impl.rwhelper;
import frostfs.object.Service;
import frostfs.object.Types;
import info.frostfs.sdk.dto.object.ObjectReader;
import info.frostfs.sdk.exceptions.ProcessFrostFSException;
import info.frostfs.sdk.tools.Verifier;
import java.util.Iterator;
import static info.frostfs.sdk.constants.ErrorConst.UNEXPECTED_MESSAGE_TYPE_TEMPLATE;
import static info.frostfs.sdk.constants.ErrorConst.UNEXPECTED_STREAM;
public class ObjectReaderImpl implements ObjectReader {
public Iterator<Service.GetResponse> call;
public ObjectReaderImpl(Iterator<Service.GetResponse> call) {
this.call = call;
}
public Types.Object readHeader() {
if (!call.hasNext()) {
throw new ProcessFrostFSException(UNEXPECTED_STREAM);
}
var response = call.next();
Verifier.checkResponse(response);
if (response.getBody().getObjectPartCase().getNumber() != Service.GetResponse.Body.INIT_FIELD_NUMBER) {
throw new ProcessFrostFSException(
String.format(
UNEXPECTED_MESSAGE_TYPE_TEMPLATE,
Service.GetResponse.Body.INIT_FIELD_NUMBER,
response.getBody().getObjectPartCase().getNumber()
)
);
}
return Types.Object.newBuilder()
.setObjectId(response.getBody().getInit().getObjectId())
.setHeader(response.getBody().getInit().getHeader())
.build();
}
public byte[] readChunk() {
if (!call.hasNext()) {
return null;
}
var response = call.next();
Verifier.checkResponse(response);
if (response.getBody().getObjectPartCase().getNumber() != Service.GetResponse.Body.CHUNK_FIELD_NUMBER) {
throw new ProcessFrostFSException(
String.format(
UNEXPECTED_MESSAGE_TYPE_TEMPLATE,
Service.GetResponse.Body.CHUNK_FIELD_NUMBER,
response.getBody().getObjectPartCase().getNumber()
)
);
}
return response.getBody().getChunk().toByteArray();
}
}

View file

@ -1,62 +0,0 @@
package info.frostfs.sdk.services.impl.rwhelper;
import frostfs.object.ObjectServiceGrpc;
import frostfs.object.Service;
import info.frostfs.sdk.exceptions.ProcessFrostFSException;
import info.frostfs.sdk.utils.WaitUtil;
import io.grpc.stub.StreamObserver;
import lombok.Getter;
import static info.frostfs.sdk.constants.AppConst.DEFAULT_POLL_INTERVAL;
import static info.frostfs.sdk.constants.ErrorConst.PROTO_MESSAGE_IS_EMPTY_TEMPLATE;
import static java.util.Objects.isNull;
public class ObjectStreamer {
private final StreamObserver<Service.PutRequest> requestObserver;
private final PutResponseCallback responseObserver;
public ObjectStreamer(ObjectServiceGrpc.ObjectServiceStub objectServiceStub) {
PutResponseCallback responseObserver = new PutResponseCallback();
this.responseObserver = responseObserver;
this.requestObserver = objectServiceStub.put(responseObserver);
}
public void write(Service.PutRequest request) {
if (isNull(request)) {
throw new ProcessFrostFSException(
String.format(PROTO_MESSAGE_IS_EMPTY_TEMPLATE, Service.PutRequest.class.getName())
);
}
requestObserver.onNext(request);
}
public Service.PutResponse complete() {
requestObserver.onCompleted();
while (isNull(responseObserver.getResponse())) {
WaitUtil.sleep(DEFAULT_POLL_INTERVAL);
}
return responseObserver.getResponse();
}
@Getter
private static class PutResponseCallback implements StreamObserver<Service.PutResponse> {
private Service.PutResponse response;
@Override
public void onNext(Service.PutResponse putResponse) {
this.response = putResponse;
}
@Override
public void onError(Throwable throwable) {
throw new ProcessFrostFSException(throwable);
}
@Override
public void onCompleted() {
}
}
}

View file

@ -1,43 +0,0 @@
package info.frostfs.sdk.services.impl.rwhelper;
import com.google.protobuf.ByteString;
import frostfs.object.Service;
import info.frostfs.sdk.dto.object.ObjectId;
import info.frostfs.sdk.jdo.ClientEnvironment;
import info.frostfs.sdk.jdo.parameters.object.PrmObjectPutBase;
import info.frostfs.sdk.tools.RequestConstructor;
import info.frostfs.sdk.tools.Verifier;
import lombok.AllArgsConstructor;
import lombok.Getter;
import static info.frostfs.sdk.tools.RequestSigner.sign;
//todo specify a deadline for each stream request, not for the entire stream
@Getter
@AllArgsConstructor
public class ObjectWriter {
private final ClientEnvironment environment;
private final PrmObjectPutBase args;
private final ObjectStreamer streamer;
public void write(byte[] buffer) {
var body = Service.PutRequest.Body.newBuilder()
.setChunk(ByteString.copyFrom(buffer))
.build();
var chunkRequest = Service.PutRequest.newBuilder()
.setBody(body)
.clearVerifyHeader();
RequestConstructor.addMetaHeader(chunkRequest, args.getXHeaders());
sign(chunkRequest, environment.getKey());
streamer.write(chunkRequest.build());
}
public ObjectId complete() {
var response = streamer.complete();
Verifier.checkResponse(response);
return new ObjectId(response.getBody().getObjectId().getValue().toByteArray());
}
}

View file

@ -1,62 +0,0 @@
package info.frostfs.sdk.services.impl.rwhelper;
import frostfs.object.ObjectServiceGrpc;
import frostfs.object.Service;
import info.frostfs.sdk.exceptions.ProcessFrostFSException;
import info.frostfs.sdk.utils.WaitUtil;
import io.grpc.stub.StreamObserver;
import lombok.Getter;
import static info.frostfs.sdk.constants.AppConst.DEFAULT_POLL_INTERVAL;
import static info.frostfs.sdk.constants.ErrorConst.PROTO_MESSAGE_IS_EMPTY_TEMPLATE;
import static java.util.Objects.isNull;
public class PatchStreamer {
private final StreamObserver<Service.PatchRequest> requestObserver;
private final PatchResponseCallback responseObserver;
public PatchStreamer(ObjectServiceGrpc.ObjectServiceStub objectServiceStub) {
PatchResponseCallback responseObserver = new PatchResponseCallback();
this.responseObserver = responseObserver;
this.requestObserver = objectServiceStub.patch(responseObserver);
}
public void write(Service.PatchRequest request) {
if (isNull(request)) {
throw new ProcessFrostFSException(
String.format(PROTO_MESSAGE_IS_EMPTY_TEMPLATE, Service.PutRequest.class.getName())
);
}
requestObserver.onNext(request);
}
public Service.PatchResponse complete() {
requestObserver.onCompleted();
while (isNull(responseObserver.getResponse())) {
WaitUtil.sleep(DEFAULT_POLL_INTERVAL);
}
return responseObserver.getResponse();
}
@Getter
private static class PatchResponseCallback implements StreamObserver<Service.PatchResponse> {
private Service.PatchResponse response;
@Override
public void onNext(Service.PatchResponse patchResponse) {
this.response = patchResponse;
}
@Override
public void onError(Throwable throwable) {
throw new ProcessFrostFSException(throwable);
}
@Override
public void onCompleted() {
}
}
}

Some files were not shown because too many files have changed in this diff Show more