[#52] Add placement vector #53

Merged
orikik merged 1 commit from orikik/frostfs-sdk-java:vectors into master 2025-04-23 07:57:07 +00:00
49 changed files with 2630 additions and 24 deletions

View file

@ -76,7 +76,7 @@
<property name="ignoreHashCodeMethod" value="true"/> <property name="ignoreHashCodeMethod" value="true"/>
<property name="ignoreAnnotation" value="true"/> <property name="ignoreAnnotation" value="true"/>
<property name="ignoreFieldDeclaration" value="true"/> <property name="ignoreFieldDeclaration" value="true"/>
<property name="ignoreNumbers" value="-1, 0, 1, 2, 4"/> <property name="ignoreNumbers" value="-1, 0, 1, 2, 3, 4"/>
</module> </module>
<module name="RequireThis"/> <module name="RequireThis"/>
<module name="DeclarationOrder"/> <module name="DeclarationOrder"/>

View file

@ -5,6 +5,10 @@ public class CryptoConst {
public static final int RFC6979_SIGNATURE_SIZE = 64; public static final int RFC6979_SIGNATURE_SIZE = 64;
public static final int HASH_SIGNATURE_SIZE = 65; public static final int HASH_SIGNATURE_SIZE = 65;
public static final int MURMUR_MULTIPLIER = 33;
public static final long LANDAU_PRIME_DIVISOR_64BIT = 0xc4ceb9fe1a85ec53L;
public static final long LANDAU_PRIME_DIVISOR_65BIT = 0xff51afd7ed558ccdL;
private CryptoConst() { private CryptoConst() {
} }
} }

View file

@ -0,0 +1,369 @@
package info.frostfs.sdk.placement;
import info.frostfs.sdk.dto.netmap.*;
import info.frostfs.sdk.enums.netmap.FilterOperation;
import info.frostfs.sdk.enums.netmap.SelectorClause;
import info.frostfs.sdk.exceptions.FrostFSException;
import lombok.Getter;
import lombok.Setter;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import java.util.*;
import java.util.function.Function;
import java.util.stream.Collectors;
import static info.frostfs.sdk.constants.AttributeConst.ATTRIBUTE_CAPACITY;
import static info.frostfs.sdk.constants.AttributeConst.ATTRIBUTE_PRICE;
import static info.frostfs.sdk.constants.ErrorConst.*;
@Getter
@Setter
public final class Context {
public static final String MAIN_FILTER_NAME = "*";
public static final String LIKE_WILDCARD = "*";
// network map to operate on
private final NetmapSnapshot netMap;
// cache of processed filters
private final Map<String, Filter> processedFilters = new HashMap<>();
// cache of processed selectors
private final Map<String, Selector> processedSelectors = new HashMap<>();
// stores results of selector processing
private final Map<String, List<List<NodeInfo>>> selections = new HashMap<>();
// cache of parsed numeric values
private final Map<String, Long> numCache = new HashMap<>();
private final Map<Long, Boolean> usedNodes = new HashMap<>();
private final Function<NodeInfo, Double> weightFunc;
private byte[] hrwSeed;
private long hrwSeedHash;
private int cbf;
private boolean strict;
public Context(NetmapSnapshot netMap) {
this.netMap = netMap;
this.weightFunc = Tools.defaultWeightFunc(netMap.getNodeInfoCollection());
}
private static Pair<Integer, Integer> calcNodesCount(Selector selector) {
return selector.getClause() == SelectorClause.SAME
? new ImmutablePair<>(1, selector.getCount())
: new ImmutablePair<>(selector.getCount(), 1);
}
private static double calcBucketWeight(List<NodeInfo> ns, MeanIQRAgg a, Function<NodeInfo, Double> wf) {
for (NodeInfo node : ns) {
a.add(wf.apply(node));
}
return a.compute();
}
public void processFilters(PlacementPolicy policy) {
for (Filter filter : policy.getFilters()) {
processFilter(filter, true);
}
}
private void processFilter(Filter filter, boolean top) {
String filterName = filter.getName();
if (MAIN_FILTER_NAME.equals(filterName)) {
throw new FrostFSException(String.format(INVALID_FILTER_NAME_TEMPLATE, MAIN_FILTER_NAME));
}
if (top && (filterName == null || filterName.isEmpty())) {
throw new FrostFSException(UNNAMED_TOP_FILTER);
}
if (!top && filterName != null && !filterName.isEmpty() && !processedFilters.containsKey(filterName)) {
throw new FrostFSException(FILTER_NOT_FOUND);
}
if (filter.getOperation() == FilterOperation.AND ||
filter.getOperation() == FilterOperation.OR ||
filter.getOperation() == FilterOperation.NOT) {
for (Filter f : filter.getFilters()) {
processFilter(f, false);
}
} else {
if (filter.getFilters().length != 0) {
throw new FrostFSException(NON_EMPTY_FILTERS);
} else if (!top && filterName != null && !filterName.isEmpty()) {
// named reference
return;
}
switch (filter.getOperation()) {
case EQ:
case NE:
case LIKE:
break;
case GT:
case GE:
case LT:
case LE:
long n = Long.parseLong(filter.getValue());
numCache.put(filter.getValue(), n);
break;
default:
throw new FrostFSException(String.format(INVALID_FILTER_OPERATION_TEMPLATE, filter.getOperation()));
}
}
if (top) {
processedFilters.put(filterName, filter);
}
}
public void processSelectors(PlacementPolicy policy) {
for (Selector selector : policy.getSelectors()) {
String filterName = selector.getFilter();
if (!MAIN_FILTER_NAME.equals(filterName)) {
if (selector.getFilter() == null || !processedFilters.containsKey(selector.getFilter())) {
throw new FrostFSException(String.format(FILTER_NOT_FOUND_TEMPLATE, filterName));
}
}
processedSelectors.put(selector.getName(), selector);
List<List<NodeInfo>> selection = getSelection(selector);
selections.put(selector.getName(), selection);
}
}
private NodeAttributePair[] getSelectionBase(Selector selector) {
String fName = selector.getFilter();
if (fName == null) {
throw new FrostFSException(FILTER_NAME_IS_EMPTY);
}
Filter f = processedFilters.get(fName);
boolean isMain = MAIN_FILTER_NAME.equals(fName);
List<NodeAttributePair> result = new ArrayList<>();
Map<String, List<NodeInfo>> nodeMap = new HashMap<>();
String attr = selector.getAttribute();
for (NodeInfo node : netMap.getNodeInfoCollection()) {
if (usedNodes.containsKey(node.getHash())) {
continue;
}
if (isMain || match(f, node)) {
if (attr == null) {
result.add(new NodeAttributePair("", new NodeInfo[]{node}));
} else {
String v = node.getAttributes().get(attr);
List<NodeInfo> nodes = nodeMap.computeIfAbsent(v, k -> new ArrayList<>());
nodes.add(node);
}
}
}
if (attr != null && !attr.isEmpty()) {
for (Map.Entry<String, List<NodeInfo>> entry : nodeMap.entrySet()) {
result.add(new NodeAttributePair(entry.getKey(), entry.getValue().toArray(NodeInfo[]::new)));
}
}
if (hrwSeed != null && hrwSeed.length != 0) {
NodeAttributePair[] sortedNodes = new NodeAttributePair[result.size()];
for (int i = 0; i < result.size(); i++) {
double[] ws = new double[result.get(i).getNodes().length];
NodeAttributePair res = result.get(i);
Tools.appendWeightsTo(res.getNodes(), weightFunc, ws);
sortedNodes[i] = new NodeAttributePair(
res.getAttr(),
Tools.sortHasherSliceByWeightValue(Arrays.asList(res.getNodes()), ws, hrwSeedHash)
.toArray(NodeInfo[]::new)
);
}
return sortedNodes;
}
return result.toArray(new NodeAttributePair[0]);
}
public List<List<NodeInfo>> getSelection(Selector s) {
Pair<Integer, Integer> counts = calcNodesCount(s);
int bucketCount = counts.getKey();
int nodesInBucket = counts.getValue();
NodeAttributePair[] buckets = getSelectionBase(s);
if (strict && buckets.length < bucketCount) {
throw new FrostFSException(String.format(NOT_ENOUGH_NODES_TEMPLATE, s.getName()));
}
if (hrwSeed == null || hrwSeed.length == 0) {
if (s.getAttribute() == null || s.getAttribute().isEmpty()) {
Arrays.sort(buckets, Comparator.comparing(b -> b.getNodes()[0].getHash()));
} else {
Arrays.sort(buckets, Comparator.comparing(NodeAttributePair::getAttr));
}
}
int maxNodesInBucket = nodesInBucket * cbf;
List<List<NodeInfo>> res = new ArrayList<>(buckets.length);
List<List<NodeInfo>> fallback = new ArrayList<>(buckets.length);
for (NodeAttributePair bucket : buckets) {
List<NodeInfo> ns = Arrays.asList(bucket.getNodes());
if (ns.size() >= maxNodesInBucket) {
res.add(new ArrayList<>(ns.subList(0, maxNodesInBucket)));
} else if (ns.size() >= nodesInBucket) {
fallback.add(new ArrayList<>(ns));
}
}
if (res.size() < bucketCount) {
res.addAll(fallback);
if (strict && res.size() < bucketCount) {
throw new FrostFSException(String.format(NOT_ENOUGH_NODES_TEMPLATE, s.getName()));
}
}
if (hrwSeed != null && hrwSeed.length != 0) {
double[] weights = new double[res.size()];
var a = new MeanIQRAgg();
for (int i = 0; i < res.size(); i++) {
a.clear();
weights[i] = calcBucketWeight(res.get(i), a, weightFunc);
}
List<HasherList> hashers = res.stream()
.map(HasherList::new)
.collect(Collectors.toList());
hashers = Tools.sortHasherSliceByWeightValue(hashers, weights, hrwSeedHash);
for (int i = 0; i < res.size(); i++) {
res.set(i, hashers.get(i).getNodes());
}
}
if (res.size() < bucketCount) {
if (strict && res.isEmpty()) {
throw new FrostFSException(NOT_ENOUGH_NODES);
}
bucketCount = res.size();
}
if (s.getAttribute() == null || s.getAttribute().isEmpty()) {
fallback = res.subList(bucketCount, res.size());
res = new ArrayList<>(res.subList(0, bucketCount));
for (int i = 0; i < fallback.size(); i++) {
int index = i % bucketCount;
if (res.get(index).size() >= maxNodesInBucket) {
break;
}
res.get(index).addAll(fallback.get(i));
}
}
return res.subList(0, bucketCount);
}
private boolean matchKeyValue(Filter f, NodeInfo nodeInfo) {
switch (f.getOperation()) {
case EQ:
return nodeInfo.getAttributes().containsKey(f.getKey()) &&
nodeInfo.getAttributes().get(f.getKey()).equals(f.getValue());
case LIKE:
boolean hasPrefix = f.getValue().startsWith(LIKE_WILDCARD);
boolean hasSuffix = f.getValue().endsWith(LIKE_WILDCARD);
int start = hasPrefix ? LIKE_WILDCARD.length() : 0;
int end = hasSuffix ? f.getValue().length() - LIKE_WILDCARD.length() : f.getValue().length();
String str = f.getValue().substring(start, end);
if (hasPrefix && hasSuffix) {
return nodeInfo.getAttributes().get(f.getKey()).contains(str);
}
if (hasPrefix) {
return nodeInfo.getAttributes().get(f.getKey()).endsWith(str);
}
if (hasSuffix) {
return nodeInfo.getAttributes().get(f.getKey()).startsWith(str);
}
return nodeInfo.getAttributes().get(f.getKey()).equals(f.getValue());
case NE:
return !nodeInfo.getAttributes().get(f.getKey()).equals(f.getValue());
default:
long attr;
switch (f.getKey()) {
case ATTRIBUTE_PRICE:
attr = nodeInfo.getPrice().longValue();
break;
case ATTRIBUTE_CAPACITY:
attr = nodeInfo.getCapacity().longValue();
break;
default:
try {
attr = Long.parseLong(nodeInfo.getAttributes().get(f.getKey()));
} catch (NumberFormatException e) {
return false;
}
break;
}
switch (f.getOperation()) {
case GT:
return attr > numCache.get(f.getValue());
case GE:
return attr >= numCache.get(f.getValue());
case LT:
return attr < numCache.get(f.getValue());
case LE:
return attr <= numCache.get(f.getValue());
default:
break;
}
break;
}
return false;
}
boolean match(Filter f, NodeInfo nodeInfo) {
if (f == null) {
return false;
}
switch (f.getOperation()) {
case NOT:
Filter[] inner = f.getFilters();
Filter fSub = inner[0];
if (inner[0].getName() != null && !inner[0].getName().isEmpty()) {
fSub = processedFilters.get(inner[0].getName());
}
return !match(fSub, nodeInfo);
case AND:
case OR:
for (int i = 0; i < f.getFilters().length; i++) {
Filter currentFilter = f.getFilters()[i];
if (currentFilter.getName() != null && !currentFilter.getName().isEmpty()) {
currentFilter = processedFilters.get(currentFilter.getName());
}
boolean ok = match(currentFilter, nodeInfo);
if (ok == (f.getOperation() == FilterOperation.OR)) {
return ok;
}
}
return f.getOperation() == FilterOperation.AND;
default:
return matchKeyValue(f, nodeInfo);
}
}
}

View file

@ -0,0 +1,20 @@
package info.frostfs.sdk.placement;
import info.frostfs.sdk.dto.netmap.Hasher;
import info.frostfs.sdk.dto.netmap.NodeInfo;
import lombok.AllArgsConstructor;
import lombok.Getter;
import org.apache.commons.collections4.CollectionUtils;
import java.util.List;
@Getter
@AllArgsConstructor
public final class HasherList implements Hasher {
private final List<NodeInfo> nodes;
@Override
public long getHash() {
return CollectionUtils.isNotEmpty(nodes) ? nodes.get(0).getHash() : 0L;
}
}

View file

@ -0,0 +1,18 @@
package info.frostfs.sdk.placement;
import java.math.BigInteger;
public class MeanAgg {
private double mean;
private int count;
public void add(BigInteger n) {
int c = count + 1;
mean = mean * count / c + n.doubleValue() / c;
count++;
}
public double compute() {
return mean;
}
}

View file

@ -0,0 +1,57 @@
package info.frostfs.sdk.placement;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
public final class MeanIQRAgg {
private static final int MIN_LN = 4;
private final List<Double> arr = new ArrayList<>();
public MeanIQRAgg() {
}
public void add(double d) {
arr.add(d);
}
public double compute() {
int length = arr.size();
if (length == 0) {
return 0;
}
List<Double> sorted = new ArrayList<>(arr);
Collections.sort(sorted);
double minV, maxV;
if (length < MIN_LN) {
minV = sorted.get(0);
maxV = sorted.get(length - 1);
} else {
int start = length / MIN_LN;
int end = length * 3 / MIN_LN - 1;
minV = sorted.get(start);
maxV = sorted.get(end);
}
int count = 0;
double sum = 0;
for (var e : sorted) {
if (e >= minV && e <= maxV) {
sum += e;
count++;
}
}
return count == 0 ? 0 : sum / count;
}
public void clear() {
arr.clear();
}
}

View file

@ -0,0 +1,24 @@
package info.frostfs.sdk.placement;
import java.math.BigInteger;
public class MinAgg {
private double min;
private boolean minFound;
public void add(BigInteger n) {
if (!minFound) {
min = n.doubleValue();
minFound = true;
return;
}
if (n.doubleValue() < min) {
min = n.doubleValue();
}
}
public double compute() {
return min;
}
}

View file

@ -0,0 +1,15 @@
package info.frostfs.sdk.placement;
import info.frostfs.sdk.dto.netmap.NodeInfo;
import lombok.Getter;
@Getter
public class NodeAttributePair {
private final String attr;
private final NodeInfo[] nodes;
NodeAttributePair(String attr, NodeInfo[] nodes) {
this.attr = attr;
this.nodes = nodes;
}
}

View file

@ -0,0 +1,5 @@
package info.frostfs.sdk.placement;
public interface Normalizer {
double normalize(double w);
}

View file

@ -0,0 +1,197 @@
package info.frostfs.sdk.placement;
import info.frostfs.sdk.dto.netmap.*;
import info.frostfs.sdk.exceptions.FrostFSException;
import lombok.AllArgsConstructor;
import org.apache.commons.codec.digest.MurmurHash3;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.function.Function;
import static info.frostfs.sdk.constants.ErrorConst.SELECTOR_NOT_FOUND_TEMPLATE;
import static info.frostfs.sdk.constants.ErrorConst.VECTORS_IS_NULL;
@AllArgsConstructor
public final class PlacementVector {
private final NetmapSnapshot netmapSnapshot;
private static NodeInfo[] flattenNodes(List<List<NodeInfo>> nodes) {
int size = nodes.stream().mapToInt(List::size).sum();
NodeInfo[] result = new NodeInfo[size];
int i = 0;
for (List<NodeInfo> ns : nodes) {
for (NodeInfo n : ns) {
result[i++] = n;
}
}
return result;
}
/*
* PlacementVectors sorts container nodes returned by ContainerNodes method
* and returns placement vectors for the entity identified by the given pivot.
* For example, in order to build node list to store the object,
* binary-encoded object identifier can be used as pivot.
* Result is deterministic for the fixed NetMap and parameters.
* */
public NodeInfo[][] placementVectors(NodeInfo[][] vectors, byte[] pivot) {
if (vectors == null) {
throw new FrostFSException(VECTORS_IS_NULL);
}
long hash = MurmurHash3.hash128x64(pivot, 0, pivot.length, 0)[0];
Function<NodeInfo, Double> wf = Tools.defaultWeightFunc(netmapSnapshot.getNodeInfoCollection());
NodeInfo[][] result = new NodeInfo[vectors.length][];
int maxSize = Arrays.stream(vectors)
.mapToInt(v -> v.length)
.max()
.orElse(0);
double[] spanWeights = new double[maxSize];
for (int i = 0; i < vectors.length; i++) {
result[i] = Arrays.copyOf(vectors[i], vectors[i].length);
Tools.appendWeightsTo(result[i], wf, spanWeights);
List<NodeInfo> sorted = Tools.sortHasherSliceByWeightValue(
Arrays.asList(result[i]),
spanWeights,
hash
);
result[i] = sorted.toArray(new NodeInfo[0]);
}
return result;
}
/*
* SelectFilterNodes returns a two-dimensional list of nodes as a result of applying the given
* SelectFilterExpr to the NetMap. If the SelectFilterExpr contains only filters, the result contains
* a single row with the result of the last filter application. If the SelectFilterExpr contains only selectors,
* the result contains the selection rows of the last select application.
* */
public List<List<NodeInfo>> selectFilterNodes(SelectFilterExpr expr) {
PlacementPolicy policy = new PlacementPolicy(
null,
false,
expr.getCbf(),
expr.getFilters().toArray(Filter[]::new),
new Selector[]{expr.getSelector()}
);
Context ctx = new Context(netmapSnapshot);
ctx.setCbf(expr.getCbf());
ctx.processFilters(policy);
ctx.processSelectors(policy);
List<List<NodeInfo>> ret = new ArrayList<>();
if (expr.getSelector() == null) {
Filter lastFilter = expr.getFilters().get(expr.getFilters().size() - 1);
List<NodeInfo> subCollection = new ArrayList<>();
ret.add(subCollection);
for (NodeInfo nodeInfo : netmapSnapshot.getNodeInfoCollection()) {
if (ctx.match(ctx.getProcessedFilters().get(lastFilter.getName()), nodeInfo)) {
subCollection.add(nodeInfo);
}
}
} else if (expr.getSelector().getName() != null) {
List<List<NodeInfo>> sel = ctx.getSelection(
ctx.getProcessedSelectors().get(expr.getSelector().getName())
);
for (List<NodeInfo> ns : sel) {
List<NodeInfo> subCollection = new ArrayList<>(ns);
ret.add(subCollection);
}
}
return ret;
}
/*
* ContainerNodes returns two-dimensional list of nodes as a result of applying given PlacementPolicy to the NetMap.
* Each line of the list corresponds to a replica descriptor.
* Line order corresponds to order of ReplicaDescriptor list in the policy.
* Nodes are pre-filtered according to the Filter list from the policy, and then selected by Selector list.
* Result is deterministic for the fixed NetMap and parameters.
*
* Result can be used in PlacementVectors.
* */
public NodeInfo[][] containerNodes(PlacementPolicy p, byte[] pivot) {
Context c = new Context(netmapSnapshot);
c.setCbf(p.getBackupFactory() == 0 ? 3 : p.getBackupFactory());
if (pivot != null && pivot.length > 0) {
c.setHrwSeed(pivot);
var hash = MurmurHash3.hash128x64(pivot, 0, pivot.length, 0)[0];
c.setHrwSeedHash(hash);
}
c.processFilters(p);
c.processSelectors(p);
boolean unique = p.isUnique();
List<List<NodeInfo>> result = new ArrayList<>(p.getReplicas().length);
for (int i = 0; i < p.getReplicas().length; i++) {
result.add(new ArrayList<>());
}
for (int i = 0; i < p.getReplicas().length; i++) {
String sName = p.getReplicas()[i].getSelector();
if ((sName == null || sName.isEmpty()) &&
!(p.getReplicas().length == 1 && p.getSelectors().length == 1)) {
Selector s = new Selector(
"", p.getReplicas()[i].getCountNodes(), null, null,
Context.MAIN_FILTER_NAME
);
List<List<NodeInfo>> nodes = c.getSelection(s);
result.get(i).addAll(Arrays.asList(flattenNodes(nodes)));
if (unique) {
for (NodeInfo n : result.get(i)) {
c.getUsedNodes().put(n.getHash(), true);
}
}
continue;
}
if (unique) {
Selector s = c.getProcessedSelectors().get(sName);
if (s == null) {
throw new FrostFSException(String.format(SELECTOR_NOT_FOUND_TEMPLATE, sName));
}
List<List<NodeInfo>> nodes = c.getSelection(s);
result.get(i).addAll(Arrays.asList(flattenNodes(nodes)));
for (NodeInfo n : result.get(i)) {
c.getUsedNodes().put(n.getHash(), true);
}
} else {
List<List<NodeInfo>> nodes = c.getSelections().get(sName);
result.get(i).addAll(Arrays.asList(flattenNodes(nodes)));
}
}
NodeInfo[][] collection = new NodeInfo[result.size()][];
for (int i = 0; i < result.size(); i++) {
collection[i] = result.get(i).toArray(new NodeInfo[0]);
}
return collection;
}
}

View file

@ -0,0 +1,14 @@
package info.frostfs.sdk.placement;
public class ReverseMinNorm implements Normalizer {
private final double min;
public ReverseMinNorm(double min) {
this.min = min;
}
@Override
public double normalize(double w) {
return (min + 1) / (w + 1);
}
}

View file

@ -0,0 +1,16 @@
package info.frostfs.sdk.placement;
import info.frostfs.sdk.dto.netmap.Filter;
import info.frostfs.sdk.dto.netmap.Selector;
import lombok.AllArgsConstructor;
import lombok.Getter;
import java.util.List;
@Getter
@AllArgsConstructor
public class SelectFilterExpr {
private final int cbf;
private final Selector selector;
private final List<Filter> filters;
}

View file

@ -0,0 +1,19 @@
package info.frostfs.sdk.placement;
public class SigmoidNorm implements Normalizer {
private final double scale;
public SigmoidNorm(double scale) {
this.scale = scale;
}
@Override
public double normalize(double w) {
if (scale == 0) {
return 0;
}
double x = w / scale;
return x / (1 + x);
}
}

View file

@ -0,0 +1,123 @@
package info.frostfs.sdk.placement;
import info.frostfs.sdk.dto.netmap.Hasher;
import info.frostfs.sdk.dto.netmap.NodeInfo;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
import java.util.function.Function;
import static info.frostfs.sdk.constants.AppConst.UNSIGNED_LONG_MASK;
import static info.frostfs.sdk.constants.CryptoConst.*;
public final class Tools {
private Tools() {
}
public static long distance(long x, long y) {
long acc = x ^ y;
acc ^= acc >>> MURMUR_MULTIPLIER;
acc *= LANDAU_PRIME_DIVISOR_65BIT;
acc ^= acc >>> MURMUR_MULTIPLIER;
acc *= LANDAU_PRIME_DIVISOR_64BIT;
acc ^= acc >>> MURMUR_MULTIPLIER;
return acc;
}
public static void appendWeightsTo(NodeInfo[] nodes, Function<NodeInfo, Double> wf, double[] weights) {
if (weights.length < nodes.length) {
weights = new double[nodes.length];
}
for (int i = 0; i < nodes.length; i++) {
weights[i] = wf.apply(nodes[i]);
}
}
public static <T extends Hasher> List<T> sortHasherSliceByWeightValue(List<T> nodes, double[] weights, long hash) {
if (nodes.isEmpty()) {
return nodes;
}
boolean allEquals = true;
if (weights.length > 1) {
for (int i = 1; i < weights.length; i++) {
if (weights[i] != weights[0]) {
allEquals = false;
break;
}
}
}
Double[] dist = new Double[nodes.size()];
if (allEquals) {
for (int i = 0; i < dist.length; i++) {
long x = nodes.get(i).getHash();
dist[i] = toUnsignedBigInteger(distance(x, hash)).doubleValue();
}
return sortHasherByDistance(nodes, dist, true);
}
for (int i = 0; i < dist.length; i++) {
var reverse = UNSIGNED_LONG_MASK.subtract(toUnsignedBigInteger(distance(nodes.get(i).getHash(), hash)));
dist[i] = reverse.doubleValue() * weights[i];
}
return sortHasherByDistance(nodes, dist, false);
}
public static <T extends Hasher, N extends Comparable<N>> List<T> sortHasherByDistance(
List<T> nodes, N[] dist, boolean asc
) {
IndexedValue<T, N>[] indexes = new IndexedValue[nodes.size()];
for (int i = 0; i < dist.length; i++) {
indexes[i] = new IndexedValue<>(nodes.get(i), dist[i]);
}
if (asc) {
Arrays.sort(indexes, Comparator.comparing(iv -> iv.dist));
} else {
Arrays.sort(indexes, (iv1, iv2) -> iv2.dist.compareTo(iv1.dist));
}
List<T> result = new ArrayList<>();
for (IndexedValue<T, N> iv : indexes) {
result.add(iv.nodeInfo);
}
return result;
}
public static Function<NodeInfo, Double> defaultWeightFunc(List<NodeInfo> nodes) {
MeanAgg mean = new MeanAgg();
MinAgg minV = new MinAgg();
for (NodeInfo node : nodes) {
mean.add(node.getCapacity());
minV.add(node.getPrice());
}
return newWeightFunc(new SigmoidNorm(mean.compute()), new ReverseMinNorm(minV.compute()));
}
private static BigInteger toUnsignedBigInteger(long i) {
return i >= 0 ? BigInteger.valueOf(i) : BigInteger.valueOf(i).and(UNSIGNED_LONG_MASK);
}
private static Function<NodeInfo, Double> newWeightFunc(Normalizer capNorm, Normalizer priceNorm) {
return nodeInfo -> capNorm.normalize(nodeInfo.getCapacity().doubleValue())
* priceNorm.normalize(nodeInfo.getPrice().doubleValue());
}
private static class IndexedValue<T, N> {
final T nodeInfo;
final N dist;
IndexedValue(T nodeInfo, N dist) {
this.nodeInfo = nodeInfo;
this.dist = dist;
}
}
}

View file

@ -0,0 +1,238 @@
package info.frostfs.sdk.placement;
import info.frostfs.sdk.dto.netmap.*;
import info.frostfs.sdk.enums.netmap.FilterOperation;
import info.frostfs.sdk.enums.netmap.NodeState;
import info.frostfs.sdk.enums.netmap.SelectorClause;
import lombok.Getter;
import lombok.SneakyThrows;
import org.junit.jupiter.api.Test;
import org.yaml.snakeyaml.Yaml;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.*;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.junit.jupiter.api.Assertions.*;
public class PlacementVectorTest {
private static final Yaml YAML = new Yaml();
private static void compareNodes(Map<String, String> attrs, NodeInfo nodeInfo) {
assertEquals(attrs.size(), nodeInfo.getAttributes().size());
assertEquals(
attrs.entrySet().stream().sorted(Map.Entry.comparingByKey()).collect(Collectors.toList()),
nodeInfo.getAttributes().entrySet().stream().sorted(Map.Entry.comparingByKey()).collect(Collectors.toList())
);
}
@SneakyThrows
@Test
public void placementTest() {
Path resourceDirYaml = Paths.get(Objects.requireNonNull(getClass().getClassLoader()
.getResource("placement")).toURI());
List<Path> yamlFiles;
try (Stream<Path> paths = Files.walk(resourceDirYaml)) {
yamlFiles = paths.filter(Files::isRegularFile).collect(Collectors.toList());
}
Version v = new Version(2, 13);
String[] addresses = {"localhost", "server1"};
for (Path file : yamlFiles) {
TestCase testCase = YAML.loadAs(Files.newInputStream(file), TestCase.class);
assertNotNull(testCase);
assertNotNull(testCase.nodes);
assertTrue(testCase.nodes.length > 0);
List<NodeInfo> nodes = Arrays.stream(testCase.nodes)
.map(n -> new NodeInfo(
n.state,
v,
List.of(addresses),
n.attributes != null ?
Arrays.stream(n.attributes)
.collect(Collectors.toMap(KeyValuePair::getKey, KeyValuePair::getValue)) :
Collections.emptyMap(),
n.getPublicKeyBytes()
))
.collect(Collectors.toList());
NetmapSnapshot netmap = new NetmapSnapshot(100L, nodes);
assertNotNull(testCase.tests);
for (var entry : testCase.tests.entrySet()) {
var test = entry.getValue();
PlacementPolicy policy = new PlacementPolicy(
test.policy.replicas != null ?
Arrays.stream(test.policy.replicas)
.map(r -> new Replica(r.count, r.selector))
.toArray(Replica[]::new) :
new Replica[0],
test.policy.unique,
test.policy.containerBackupFactor,
test.policy.filters != null
? Arrays.stream(test.policy.filters)
.map(FilterDto::getFilter)
.toArray(Filter[]::new)
: new Filter[]{},
test.policy.selectors != null
? Arrays.stream(test.policy.selectors)
.map(SelectorDto::getSelector)
.toArray(Selector[]::new)
: new Selector[]{}
);
try {
var vector = new PlacementVector(netmap);
NodeInfo[][] result = vector.containerNodes(policy, test.getPivotBytes());
if (test.result == null) {
if (test.error != null && !test.error.isEmpty()) {
fail("Error is expected but has not been thrown");
} else {
assertNotNull(test.policy.replicas);
assertEquals(result.length, test.policy.replicas.length);
for (NodeInfo[] nodesArr : result) {
assertEquals(0, nodesArr.length);
}
}
} else {
assertEquals(test.result.length, result.length);
for (int i = 0; i < test.result.length; i++) {
assertEquals(test.result[i].length, result[i].length);
for (int j = 0; j < test.result[i].length; j++) {
compareNodes(nodes.get(test.result[i][j]).getAttributes(), result[i][j]);
}
}
if (test.placement != null
&& test.placement.result != null
&& test.placement.getPivotBytes() != null) {
NodeInfo[][] placementResult = vector.placementVectors(
result, test.placement.getPivotBytes()
);
assertEquals(test.placement.result.length, placementResult.length);
for (int i = 0; i < placementResult.length; i++) {
assertEquals(test.placement.result[i].length, placementResult[i].length);
for (int j = 0; j < placementResult[i].length; j++) {
compareNodes(
nodes.get(test.placement.result[i][j]).getAttributes(),
placementResult[i][j]
);
}
}
}
}
} catch (Exception ex) {
if (test.error != null && !test.error.isEmpty()) {
assertTrue(ex.getMessage().contains(test.error));
} else {
throw ex;
}
}
}
}
}
public static class TestCase {
public String name;
public String comment;
public Node[] nodes;
public Map<String, TestData> tests;
}
public static class Node {
public KeyValuePair[] attributes;
public String publicKey;
public String[] addresses;
public NodeState state = NodeState.ONLINE;
public byte[] getPublicKeyBytes() {
return publicKey == null || publicKey.isEmpty() ? new byte[0] : Base64.getDecoder().decode(publicKey);
}
}
@Getter
public static class KeyValuePair {
public String key;
public String value;
}
public static class TestData {
public PolicyDto policy;
public String pivot;
public int[][] result;
public String error;
public ResultData placement;
public byte[] getPivotBytes() {
return pivot == null ? null : Base64.getDecoder().decode(pivot);
}
}
public static class PolicyDto {
public boolean unique;
public int containerBackupFactor;
public FilterDto[] filters;
public ReplicaDto[] replicas;
public SelectorDto[] selectors;
}
public static class SelectorDto {
public int count;
public String name;
public SelectorClause clause;
public String attribute;
public String filter;
public Selector getSelector() {
return new Selector(name != null ? name : "", count, clause, attribute, filter);
}
}
public static class FilterDto {
public String name;
public String key;
public FilterOperation op;
public String value;
public FilterDto[] filters;
public Filter getFilter() {
return new Filter(
name != null ? name : "",
key != null ? key : "",
op,
value != null ? value : "",
filters != null
? Arrays.stream(filters).map(FilterDto::getFilter).toArray(Filter[]::new)
: new Filter[0]
);
}
}
public static class ReplicaDto {
public int count;
public String selector;
}
public static class ResultData {
public String pivot;
public int[][] result;
public byte[] getPivotBytes() {
return pivot == null ? null : Base64.getDecoder().decode(pivot);
}
}
}

View file

@ -0,0 +1,48 @@
name: default CBF is 3
nodes:
- attributes:
- key: Location
value: Europe
- key: Country
value: RU
- key: City
value: St.Petersburg
- attributes:
- key: Location
value: Europe
- key: Country
value: RU
- key: City
value: Moscow
- attributes:
- key: Location
value: Europe
- key: Country
value: DE
- key: City
value: Berlin
- attributes:
- key: Location
value: Europe
- key: Country
value: FR
- key: City
value: Paris
tests:
set default CBF:
policy:
replicas:
- count: 1
selector: EU
containerBackupFactor: 0
selectors:
- name: EU
count: 1
clause: SAME
attribute: Location
filter: '*'
filters: []
result:
- - 0
- 1
- 2

View file

@ -0,0 +1,52 @@
name: Real node count multiplier is in range [1, specified CBF]
nodes:
- attributes:
- key: ID
value: '1'
- key: Country
value: DE
- attributes:
- key: ID
value: '2'
- key: Country
value: DE
- attributes:
- key: ID
value: '3'
- key: Country
value: DE
tests:
select 2, CBF is 2:
policy:
replicas:
- count: 1
selector: X
containerBackupFactor: 2
selectors:
- name: X
count: 2
clause: SAME
attribute: Country
filter: '*'
filters: []
result:
- - 0
- 1
- 2
select 3, CBF is 2:
policy:
replicas:
- count: 1
selector: X
containerBackupFactor: 2
selectors:
- name: X
count: 3
clause: SAME
attribute: Country
filter: '*'
filters: []
result:
- - 0
- 1
- 2

View file

@ -0,0 +1,82 @@
name: CBF requirements
nodes:
- attributes:
- key: ID
value: '1'
- key: Attr
value: Same
- attributes:
- key: ID
value: '2'
- key: Attr
value: Same
- attributes:
- key: ID
value: '3'
- key: Attr
value: Same
- attributes:
- key: ID
value: '4'
- key: Attr
value: Same
tests:
default CBF, no selector:
policy:
replicas:
- count: 2
containerBackupFactor: 0
selectors: []
filters: []
result:
- - 0
- 2
- 1
- 3
explicit CBF, no selector:
policy:
replicas:
- count: 2
containerBackupFactor: 3
selectors: []
filters: []
result:
- - 0
- 2
- 1
- 3
select distinct, weak CBF:
policy:
replicas:
- count: 2
selector: X
containerBackupFactor: 3
selectors:
- name: X
count: 2
clause: DISTINCT
filter: '*'
filters: []
result:
- - 0
- 2
- 1
- 3
select same, weak CBF:
policy:
replicas:
- count: 2
selector: X
containerBackupFactor: 3
selectors:
- name: X
count: 2
clause: SAME
attribute: Attr
filter: '*'
filters: []
result:
- - 0
- 1
- 2
- 3

View file

@ -0,0 +1,207 @@
name: compound filter
nodes:
- attributes:
- key: Storage
value: SSD
- key: Rating
value: '10'
- key: IntField
value: '100'
- key: Param
value: Value1
tests:
good:
policy:
replicas:
- count: 1
selector: S
containerBackupFactor: 1
selectors:
- name: S
count: 1
clause: DISTINCT
filter: Main
filters:
- name: StorageSSD
key: Storage
op: EQ
value: SSD
filters: []
- name: GoodRating
key: Rating
op: GE
value: '4'
filters: []
- name: Main
op: AND
filters:
- name: StorageSSD
op: OPERATION_UNSPECIFIED
filters: []
- name: ''
key: IntField
op: LT
value: '123'
filters: []
- name: GoodRating
op: OPERATION_UNSPECIFIED
filters: []
- op: OR
filters:
- key: Param
op: EQ
value: Value1
filters: []
- key: Param
op: EQ
value: Value2
filters: []
result:
- - 0
bad storage type:
policy:
replicas:
- count: 1
selector: S
containerBackupFactor: 1
selectors:
- name: S
count: 1
clause: DISTINCT
filter: Main
filters:
- name: StorageSSD
key: Storage
op: EQ
value: HDD
filters: []
- name: GoodRating
key: Rating
op: GE
value: '4'
filters: []
- name: Main
op: AND
filters:
- name: StorageSSD
op: OPERATION_UNSPECIFIED
filters: []
- name: ''
key: IntField
op: LT
value: '123'
filters: []
- name: GoodRating
op: OPERATION_UNSPECIFIED
filters: []
- name: ''
op: OR
filters:
- name: ''
key: Param
op: EQ
value: Value1
filters: []
- name: ''
key: Param
op: EQ
value: Value2
filters: []
bad rating:
policy:
replicas:
- count: 1
selector: S
containerBackupFactor: 1
selectors:
- name: S
count: 1
clause: DISTINCT
filter: Main
filters:
- name: StorageSSD
key: Storage
op: EQ
value: SSD
filters: []
- name: GoodRating
key: Rating
op: GE
value: '15'
filters: []
- name: Main
op: AND
filters:
- name: StorageSSD
op: OPERATION_UNSPECIFIED
filters: []
- name: ''
key: IntField
op: LT
value: '123'
filters: []
- name: GoodRating
op: OPERATION_UNSPECIFIED
filters: []
- name: ''
op: OR
filters:
- name: ''
key: Param
op: EQ
value: Value1
filters: []
- name: ''
key: Param
op: EQ
value: Value2
filters: []
bad param:
policy:
replicas:
- count: 1
selector: S
containerBackupFactor: 1
selectors:
- name: S
count: 1
clause: DISTINCT
filter: Main
filters:
- name: StorageSSD
key: Storage
op: EQ
value: SSD
filters: []
- name: GoodRating
key: Rating
op: GE
value: '4'
filters: []
- name: Main
op: AND
filters:
- name: StorageSSD
op: OPERATION_UNSPECIFIED
filters: []
- name: ''
key: IntField
op: LT
value: '123'
filters: []
- name: GoodRating
op: OPERATION_UNSPECIFIED
filters: []
- name: ''
op: OR
filters:
- name: ''
key: Param
op: EQ
value: Value0
filters: []
- name: ''
key: Param
op: EQ
value: Value2
filters: []

View file

@ -0,0 +1,43 @@
name: invalid integer field
nodes:
- attributes:
- key: IntegerField
value: 'true'
- attributes:
- key: IntegerField
value: str
tests:
empty string is not casted to 0:
policy:
replicas:
- count: 1
selector: S
containerBackupFactor: 1
selectors:
- name: S
count: 1
clause: DISTINCT
filter: Main
filters:
- name: Main
key: IntegerField
op: LE
value: '8'
filters: []
non-empty string is not casted to a number:
policy:
replicas:
- count: 1
selector: S
containerBackupFactor: 1
selectors:
- name: S
count: 1
clause: DISTINCT
filter: Main
filters:
- name: Main
key: IntegerField
op: GE
value: '0'
filters: []

View file

@ -0,0 +1,224 @@
name: single-op filters
nodes:
- attributes:
- key: Rating
value: '4'
- key: Country
value: Germany
tests:
GE true:
policy:
replicas:
- count: 1
selector: S
containerBackupFactor: 1
selectors:
- name: S
count: 1
clause: DISTINCT
filter: Main
filters:
- name: Main
key: Rating
op: GE
value: '4'
filters: []
result:
- - 0
GE false:
policy:
replicas:
- count: 1
selector: S
containerBackupFactor: 1
selectors:
- name: S
count: 1
clause: DISTINCT
filter: Main
filters:
- name: Main
key: Rating
op: GE
value: '5'
filters: []
GT true:
policy:
replicas:
- count: 1
selector: S
containerBackupFactor: 1
selectors:
- name: S
count: 1
clause: DISTINCT
filter: Main
filters:
- name: Main
key: Rating
op: GT
value: '3'
filters: []
result:
- - 0
GT false:
policy:
replicas:
- count: 1
selector: S
containerBackupFactor: 1
selectors:
- name: S
count: 1
clause: DISTINCT
filter: Main
filters:
- name: Main
key: Rating
op: GT
value: '4'
filters: []
LE true:
policy:
replicas:
- count: 1
selector: S
containerBackupFactor: 1
selectors:
- name: S
count: 1
clause: DISTINCT
filter: Main
filters:
- name: Main
key: Rating
op: LE
value: '4'
filters: []
result:
- - 0
LE false:
policy:
replicas:
- count: 1
selector: S
containerBackupFactor: 1
selectors:
- name: S
count: 1
clause: DISTINCT
filter: Main
filters:
- name: Main
key: Rating
op: LE
value: '3'
filters: []
LT true:
policy:
replicas:
- count: 1
selector: S
containerBackupFactor: 1
selectors:
- name: S
count: 1
clause: DISTINCT
filter: Main
filters:
- name: Main
key: Rating
op: LT
value: '5'
filters: []
result:
- - 0
LT false:
policy:
replicas:
- count: 1
selector: S
containerBackupFactor: 1
selectors:
- name: S
count: 1
clause: DISTINCT
filter: Main
filters:
- name: Main
key: Rating
op: LT
value: '4'
filters: []
EQ true:
policy:
replicas:
- count: 1
selector: S
containerBackupFactor: 1
selectors:
- name: S
count: 1
clause: DISTINCT
filter: Main
filters:
- name: Main
key: Country
op: EQ
value: Germany
filters: []
result:
- - 0
EQ false:
policy:
replicas:
- count: 1
selector: S
containerBackupFactor: 1
selectors:
- name: S
count: 1
clause: DISTINCT
filter: Main
filters:
- name: Main
key: Country
op: EQ
value: China
filters: []
NE true:
policy:
replicas:
- count: 1
selector: S
containerBackupFactor: 1
selectors:
- name: S
count: 1
clause: DISTINCT
filter: Main
filters:
- name: Main
key: Country
op: NE
value: France
filters: []
result:
- - 0
NE false:
policy:
replicas:
- count: 1
selector: S
containerBackupFactor: 1
selectors:
- name: S
count: 1
clause: DISTINCT
filter: Main
filters:
- name: Main
key: Country
op: NE
value: Germany
filters: []

View file

@ -0,0 +1,118 @@
name: HRW ordering
nodes:
- attributes:
- key: Country
value: Germany
- key: Price
value: '2'
- key: Capacity
value: '10000'
- attributes:
- key: Country
value: Germany
- key: Price
value: '4'
- key: Capacity
value: '1'
- attributes:
- key: Country
value: France
- key: Price
value: '3'
- key: Capacity
value: '10'
- attributes:
- key: Country
value: Russia
- key: Price
value: '2'
- key: Capacity
value: '10000'
- attributes:
- key: Country
value: Russia
- key: Price
value: '1'
- key: Capacity
value: '10000'
- attributes:
- key: Country
value: Russia
- key: Capacity
value: '10000'
- attributes:
- key: Country
value: France
- key: Price
value: '100'
- key: Capacity
value: '1'
- attributes:
- key: Country
value: France
- key: Price
value: '7'
- key: Capacity
value: '10000'
- attributes:
- key: Country
value: Russia
- key: Price
value: '2'
- key: Capacity
value: '1'
tests:
select 3 nodes in 3 distinct countries, same placement:
policy:
replicas:
- count: 1
selector: Main
containerBackupFactor: 1
selectors:
- name: Main
count: 3
clause: DISTINCT
attribute: Country
filter: '*'
filters: []
pivot: Y29udGFpbmVySUQ=
result:
- - 5
- 0
- 7
placement:
pivot: b2JqZWN0SUQ=
result:
- - 5
- 0
- 7
select 6 nodes in 3 distinct countries, different placement:
policy:
replicas:
- count: 1
selector: Main
containerBackupFactor: 2
selectors:
- name: Main
count: 3
clause: DISTINCT
attribute: Country
filter: '*'
filters: []
pivot: Y29udGFpbmVySUQ=
result:
- - 5
- 4
- 0
- 1
- 7
- 2
placement:
pivot: b2JqZWN0SUQ=
result:
- - 5
- 4
- 0
- 7
- 2
- 1

View file

@ -0,0 +1,52 @@
name: unnamed selector (nspcc-dev/neofs-api-go#213)
nodes:
- attributes:
- key: Location
value: Europe
- key: Country
value: Russia
- key: City
value: Moscow
- attributes:
- key: Location
value: Europe
- key: Country
value: Russia
- key: City
value: Saint-Petersburg
- attributes:
- key: Location
value: Europe
- key: Country
value: Sweden
- key: City
value: Stockholm
- attributes:
- key: Location
value: Europe
- key: Country
value: Finalnd
- key: City
value: Helsinki
tests:
test:
policy:
replicas:
- count: 4
containerBackupFactor: 1
selectors:
- name: ''
count: 4
clause: DISTINCT
filter: LOC_EU
filters:
- name: LOC_EU
key: Location
op: EQ
value: Europe
filters: []
result:
- - 0
- 1
- 2
- 3

View file

@ -0,0 +1,141 @@
name: single-op filters
nodes:
- attributes:
- key: Country
value: Russia
- key: Rating
value: '1'
- key: City
value: SPB
- attributes:
- key: Country
value: Germany
- key: Rating
value: '5'
- key: City
value: Berlin
- attributes:
- key: Country
value: Russia
- key: Rating
value: '6'
- key: City
value: Moscow
- attributes:
- key: Country
value: France
- key: Rating
value: '4'
- key: City
value: Paris
- attributes:
- key: Country
value: France
- key: Rating
value: '1'
- key: City
value: Lyon
- attributes:
- key: Country
value: Russia
- key: Rating
value: '5'
- key: City
value: SPB
- attributes:
- key: Country
value: Russia
- key: Rating
value: '7'
- key: City
value: Moscow
- attributes:
- key: Country
value: Germany
- key: Rating
value: '3'
- key: City
value: Darmstadt
- attributes:
- key: Country
value: Germany
- key: Rating
value: '7'
- key: City
value: Frankfurt
- attributes:
- key: Country
value: Russia
- key: Rating
value: '9'
- key: City
value: SPB
- attributes:
- key: Country
value: Russia
- key: Rating
value: '9'
- key: City
value: SPB
tests:
Select:
policy:
replicas:
- count: 1
selector: SameRU
- count: 1
selector: DistinctRU
- count: 1
selector: Good
- count: 1
selector: Main
containerBackupFactor: 2
selectors:
- name: SameRU
count: 2
clause: SAME
attribute: City
filter: FromRU
- name: DistinctRU
count: 2
clause: DISTINCT
attribute: City
filter: FromRU
- name: Good
count: 2
clause: DISTINCT
attribute: Country
filter: Good
- name: Main
count: 3
clause: DISTINCT
attribute: Country
filter: '*'
filters:
- name: FromRU
key: Country
op: EQ
value: Russia
- name: Good
key: Rating
op: GE
value: '4'
result:
- - 0
- 5
- 9
- 10
- - 2
- 6
- 0
- 5
- - 1
- 8
- 2
- 5
- - 3
- 4
- 1
- 7
- 0
- 2

View file

@ -0,0 +1,46 @@
name: multiple replicas (#215)
nodes:
- attributes:
- key: City
value: Saint-Petersburg
- attributes:
- key: City
value: Moscow
- attributes:
- key: City
value: Berlin
- attributes:
- key: City
value: Paris
tests:
test:
policy:
replicas:
- count: 1
selector: LOC_SPB_PLACE
- count: 1
selector: LOC_MSK_PLACE
containerBackupFactor: 1
selectors:
- name: LOC_SPB_PLACE
count: 1
clause: CLAUSE_UNSPECIFIED
filter: LOC_SPB
- name: LOC_MSK_PLACE
count: 1
clause: CLAUSE_UNSPECIFIED
filter: LOC_MSK
filters:
- name: LOC_SPB
key: City
op: EQ
value: Saint-Petersburg
filters: []
- name: LOC_MSK
key: City
op: EQ
value: Moscow
filters: []
result:
- - 0
- - 1

View file

@ -0,0 +1,162 @@
name: multiple REP, asymmetric
nodes:
- attributes:
- key: ID
value: '1'
- key: Country
value: RU
- key: City
value: St.Petersburg
- key: SSD
value: '0'
- attributes:
- key: ID
value: '2'
- key: Country
value: RU
- key: City
value: St.Petersburg
- key: SSD
value: '1'
- attributes:
- key: ID
value: '3'
- key: Country
value: RU
- key: City
value: Moscow
- key: SSD
value: '1'
- attributes:
- key: ID
value: '4'
- key: Country
value: RU
- key: City
value: Moscow
- key: SSD
value: '1'
- attributes:
- key: ID
value: '5'
- key: Country
value: RU
- key: City
value: St.Petersburg
- key: SSD
value: '1'
- attributes:
- key: ID
value: '6'
- key: Continent
value: NA
- key: City
value: NewYork
- attributes:
- key: ID
value: '7'
- key: Continent
value: AF
- key: City
value: Cairo
- attributes:
- key: ID
value: '8'
- key: Continent
value: AF
- key: City
value: Cairo
- attributes:
- key: ID
value: '9'
- key: Continent
value: SA
- key: City
value: Lima
- attributes:
- key: ID
value: '10'
- key: Continent
value: AF
- key: City
value: Cairo
- attributes:
- key: ID
value: '11'
- key: Continent
value: NA
- key: City
value: NewYork
- attributes:
- key: ID
value: '12'
- key: Continent
value: NA
- key: City
value: LosAngeles
- attributes:
- key: ID
value: '13'
- key: Continent
value: SA
- key: City
value: Lima
tests:
test:
policy:
replicas:
- count: 1
selector: SPB
- count: 2
selector: Americas
containerBackupFactor: 2
selectors:
- name: SPB
count: 1
clause: SAME
attribute: City
filter: SPBSSD
- name: Americas
count: 2
clause: DISTINCT
attribute: City
filter: Americas
filters:
- name: SPBSSD
op: AND
filters:
- name: ''
key: Country
op: EQ
value: RU
filters: []
- name: ''
key: City
op: EQ
value: St.Petersburg
filters: []
- name: ''
key: SSD
op: EQ
value: '1'
filters: []
- name: Americas
op: OR
filters:
- name: ''
key: Continent
op: EQ
value: NA
filters: []
- name: ''
key: Continent
op: EQ
value: SA
filters: []
result:
- - 1
- 4
- - 8
- 12
- 5
- 10

View file

@ -0,0 +1,52 @@
name: non-strict selections
comment: These test specify loose selection behaviour, to allow fetching already PUT
objects even when there is not enough nodes to select from.
nodes:
- attributes:
- key: Country
value: Russia
- attributes:
- key: Country
value: Germany
- attributes: []
tests:
not enough nodes (backup factor):
policy:
replicas:
- count: 1
selector: MyStore
containerBackupFactor: 2
selectors:
- name: MyStore
count: 2
clause: DISTINCT
attribute: Country
filter: FromRU
filters:
- name: FromRU
key: Country
op: EQ
value: Russia
filters: []
result:
- - 0
not enough nodes (buckets):
policy:
replicas:
- count: 1
selector: MyStore
containerBackupFactor: 1
selectors:
- name: MyStore
count: 2
clause: DISTINCT
attribute: Country
filter: FromRU
filters:
- name: FromRU
key: Country
op: EQ
value: Russia
filters: []
result:
- - 0

View file

@ -0,0 +1,62 @@
name: REP X
nodes:
- publicKey: ''
addresses: []
attributes:
- key: City
value: Saint-Petersburg
state: UNSPECIFIED
- publicKey: ''
addresses: []
attributes:
- key: City
value: Moscow
state: UNSPECIFIED
- publicKey: ''
addresses: []
attributes:
- key: City
value: Berlin
state: UNSPECIFIED
- publicKey: ''
addresses: []
attributes:
- key: City
value: Paris
state: UNSPECIFIED
tests:
REP 1:
policy:
replicas:
- count: 1
containerBackupFactor: 0
selectors: []
filters: []
result:
- - 0
- 1
- 2
REP 3:
policy:
replicas:
- count: 3
containerBackupFactor: 0
selectors: []
filters: []
result:
- - 0
- 3
- 1
- 2
REP 5:
policy:
replicas:
- count: 5
containerBackupFactor: 0
selectors: []
filters: []
result:
- - 0
- 1
- 2
- 3

View file

@ -0,0 +1,56 @@
name: select with unspecified attribute
nodes:
- attributes:
- key: ID
value: '1'
- key: Country
value: RU
- key: City
value: St.Petersburg
- key: SSD
value: '0'
- attributes:
- key: ID
value: '2'
- key: Country
value: RU
- key: City
value: St.Petersburg
- key: SSD
value: '1'
- attributes:
- key: ID
value: '3'
- key: Country
value: RU
- key: City
value: Moscow
- key: SSD
value: '1'
- attributes:
- key: ID
value: '4'
- key: Country
value: RU
- key: City
value: Moscow
- key: SSD
value: '1'
tests:
test:
policy:
replicas:
- count: 1
selector: X
containerBackupFactor: 1
selectors:
- name: X
count: 4
clause: DISTINCT
filter: '*'
filters: []
result:
- - 0
- 1
- 2
- 3

View file

@ -0,0 +1,47 @@
name: invalid selections
nodes:
- attributes:
- key: Country
value: Russia
- attributes:
- key: Country
value: Germany
- attributes: []
tests:
missing filter:
policy:
replicas:
- count: 1
selector: MyStore
containerBackupFactor: 1
selectors:
- name: MyStore
count: 1
clause: DISTINCT
attribute: Country
filter: FromNL
filters:
- name: FromRU
key: Country
op: EQ
value: Russia
filters: []
error: filter not found
not enough nodes (filter results in empty set):
policy:
replicas:
- count: 1
selector: MyStore
containerBackupFactor: 2
selectors:
- name: MyStore
count: 2
clause: DISTINCT
attribute: Country
filter: FromMoon
filters:
- name: FromMoon
key: Country
op: EQ
value: Moon
filters: []

View file

@ -59,6 +59,18 @@ public class ErrorConst {
public static final String STRING_IS_TOO_BIG_TEMPLATE = "string size is too big=%s"; public static final String STRING_IS_TOO_BIG_TEMPLATE = "string size is too big=%s";
public static final String STRING_SIZE_IS_INVALID_TEMPLATE = "invalid string size=%s"; public static final String STRING_SIZE_IS_INVALID_TEMPLATE = "invalid string size=%s";
public static final String FILTER_NAME_IS_EMPTY = "Filter name for selector is empty";
public static final String INVALID_FILTER_NAME_TEMPLATE = "filter name is invalid: '%s' is reserved";
public static final String INVALID_FILTER_OPERATION_TEMPLATE = "invalid filter operation: %s";
public static final String FILTER_NOT_FOUND = "filter not found";
public static final String FILTER_NOT_FOUND_TEMPLATE = "filter not found: SELECT FROM '%s'";
public static final String NON_EMPTY_FILTERS = "simple filter contains sub-filters";
public static final String NOT_ENOUGH_NODES = "not enough nodes";
public static final String NOT_ENOUGH_NODES_TEMPLATE = "not enough nodes to SELECT from '%s'";
public static final String UNNAMED_TOP_FILTER = "unnamed top-level filter";
public static final String VECTORS_IS_NULL = "vectors cannot be null";
public static final String SELECTOR_NOT_FOUND_TEMPLATE = "selector not found: %s";
private ErrorConst() { private ErrorConst() {
} }
} }

View file

@ -1,5 +1,7 @@
package info.frostfs.sdk.constants; package info.frostfs.sdk.constants;
import java.math.BigInteger;
public class AppConst { public class AppConst {
public static final String RESERVED_PREFIX = "__SYSTEM__"; public static final String RESERVED_PREFIX = "__SYSTEM__";
@ -15,6 +17,8 @@ public class AppConst {
public static final int DEFAULT_GRPC_TIMEOUT = 5; public static final int DEFAULT_GRPC_TIMEOUT = 5;
public static final long DEFAULT_POLL_INTERVAL = 10; public static final long DEFAULT_POLL_INTERVAL = 10;
public static final BigInteger UNSIGNED_LONG_MASK = BigInteger.ONE.shiftLeft(Long.SIZE).subtract(BigInteger.ONE);
private AppConst() { private AppConst() {
} }
} }

View file

@ -5,6 +5,17 @@ import static info.frostfs.sdk.constants.AppConst.RESERVED_PREFIX;
public class AttributeConst { public class AttributeConst {
public static final String DISABLE_HOMOMORPHIC_HASHING_ATTRIBUTE = RESERVED_PREFIX + "DISABLE_HOMOMORPHIC_HASHING"; public static final String DISABLE_HOMOMORPHIC_HASHING_ATTRIBUTE = RESERVED_PREFIX + "DISABLE_HOMOMORPHIC_HASHING";
/*
* ATTRIBUTE_PRICE is a key to the node attribute that indicates
* the price in GAS tokens for storing one GB of data during one Epoch.
* */
public static final String ATTRIBUTE_PRICE = "Price";
/*
* ATTRIBUTE_CAPACITY is a key to the node attribute that indicates the total available disk space in Gigabytes.
* */
public static final String ATTRIBUTE_CAPACITY = "Capacity";
private AttributeConst() { private AttributeConst() {
} }
} }

View file

@ -1,6 +1,6 @@
package info.frostfs.sdk.dto.netmap; package info.frostfs.sdk.dto.netmap;
import info.frostfs.sdk.enums.FilterOperation; import info.frostfs.sdk.enums.netmap.FilterOperation;
import lombok.AllArgsConstructor; import lombok.AllArgsConstructor;
import lombok.Getter; import lombok.Getter;

View file

@ -0,0 +1,5 @@
package info.frostfs.sdk.dto.netmap;
public interface Hasher {
long getHash();
}

View file

@ -1,20 +1,31 @@
package info.frostfs.sdk.dto.netmap; package info.frostfs.sdk.dto.netmap;
import info.frostfs.sdk.enums.NodeState; import info.frostfs.sdk.enums.netmap.NodeState;
import lombok.Getter; import lombok.Getter;
import org.apache.commons.codec.digest.MurmurHash3;
import java.math.BigInteger;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import static info.frostfs.sdk.constants.AppConst.UNSIGNED_LONG_MASK;
import static info.frostfs.sdk.constants.AttributeConst.ATTRIBUTE_CAPACITY;
import static info.frostfs.sdk.constants.AttributeConst.ATTRIBUTE_PRICE;
import static java.util.Objects.isNull;
@Getter @Getter
public class NodeInfo { public class NodeInfo implements Hasher {
private final NodeState state; private final NodeState state;
private final Version version; private final Version version;
private final List<String> addresses; private final List<String> addresses;
private final Map<String, String> attributes; private final Map<String, String> attributes;
private final byte[] publicKey; private final byte[] publicKey;
private long hash;
private BigInteger price = UNSIGNED_LONG_MASK;
public NodeInfo(NodeState state, Version version, List<String> addresses, public NodeInfo(NodeState state, Version version, List<String> addresses,
Map<String, String> attributes, byte[] publicKey) { Map<String, String> attributes, byte[] publicKey) {
this.state = state; this.state = state;
@ -23,4 +34,26 @@ public class NodeInfo {
this.attributes = Collections.unmodifiableMap(attributes); this.attributes = Collections.unmodifiableMap(attributes);
this.publicKey = publicKey; this.publicKey = publicKey;
} }
public long getHash() {
if (hash == 0) {
hash = MurmurHash3.hash128x64(publicKey, 0, publicKey.length, 0)[0];
}
return hash;
}
public BigInteger getCapacity() {
var capacity = attributes.get(ATTRIBUTE_CAPACITY);
return isNull(capacity) ? BigInteger.valueOf(0) : new BigInteger(capacity);
}
public BigInteger getPrice() {
if (price.equals(UNSIGNED_LONG_MASK)) {
var priceString = attributes.get(ATTRIBUTE_PRICE);
price = isNull(priceString) ? BigInteger.valueOf(0) : new BigInteger(priceString);
}
return price;
}
} }

View file

@ -13,6 +13,14 @@ import static info.frostfs.sdk.constants.FieldConst.EMPTY_STRING;
public class Replica { public class Replica {
private final int count; private final int count;
private final String selector; private final String selector;
private long ecDataCount;
private long ecParityCount;
public Replica(int count, String selector, int ecDataCount, int ecParityCount) {
this(count, selector);
this.ecDataCount = Integer.toUnsignedLong(ecDataCount);
this.ecParityCount = Integer.toUnsignedLong(ecParityCount);
}
public Replica(int count, String selector) { public Replica(int count, String selector) {
if (count <= 0) { if (count <= 0) {
@ -32,8 +40,11 @@ public class Replica {
); );
} }
this.count = count; this.count = count;
this.selector = EMPTY_STRING; this.selector = EMPTY_STRING;
} }
public int getCountNodes() {
return count != 0 ? count : (int) (ecDataCount + ecParityCount);
}
} }

View file

@ -1,15 +1,17 @@
package info.frostfs.sdk.dto.netmap; package info.frostfs.sdk.dto.netmap;
import info.frostfs.sdk.enums.SelectorClause; import info.frostfs.sdk.enums.netmap.SelectorClause;
import lombok.AllArgsConstructor; import lombok.AllArgsConstructor;
import lombok.Getter; import lombok.Getter;
import lombok.Setter;
@Getter @Getter
@Setter
@AllArgsConstructor @AllArgsConstructor
public class Selector { public class Selector {
private final String name; private final String name;
private final int count; private int count;
private final SelectorClause clause; private SelectorClause clause;
private final String attribute; private String attribute;
private final String filter; private String filter;
} }

View file

@ -1,4 +1,4 @@
package info.frostfs.sdk.enums; package info.frostfs.sdk.enums.netmap;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;

View file

@ -1,4 +1,4 @@
package info.frostfs.sdk.enums; package info.frostfs.sdk.enums.netmap;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;

View file

@ -1,4 +1,4 @@
package info.frostfs.sdk.enums; package info.frostfs.sdk.enums.netmap;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;

View file

@ -2,7 +2,7 @@ package info.frostfs.sdk.mappers.netmap;
import frostfs.netmap.Types; import frostfs.netmap.Types;
import info.frostfs.sdk.dto.netmap.Filter; import info.frostfs.sdk.dto.netmap.Filter;
import info.frostfs.sdk.enums.FilterOperation; import info.frostfs.sdk.enums.netmap.FilterOperation;
import info.frostfs.sdk.exceptions.ProcessFrostFSException; import info.frostfs.sdk.exceptions.ProcessFrostFSException;
import info.frostfs.sdk.exceptions.ValidationFrostFSException; import info.frostfs.sdk.exceptions.ValidationFrostFSException;
import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.collections4.CollectionUtils;

View file

@ -4,7 +4,7 @@ import frostfs.netmap.Service;
import frostfs.netmap.Types; import frostfs.netmap.Types;
import frostfs.netmap.Types.NodeInfo.Attribute; import frostfs.netmap.Types.NodeInfo.Attribute;
import info.frostfs.sdk.dto.netmap.NodeInfo; import info.frostfs.sdk.dto.netmap.NodeInfo;
import info.frostfs.sdk.enums.NodeState; import info.frostfs.sdk.enums.netmap.NodeState;
import info.frostfs.sdk.exceptions.ProcessFrostFSException; import info.frostfs.sdk.exceptions.ProcessFrostFSException;
import java.util.stream.Collectors; import java.util.stream.Collectors;

View file

@ -36,15 +36,17 @@ public class ReplicaMapper {
return Types.Replica.newBuilder() return Types.Replica.newBuilder()
.setCount(replica.getCount()) .setCount(replica.getCount())
.setSelector(replica.getSelector()) .setSelector(replica.getSelector())
.setEcDataCount((int) replica.getEcDataCount())
.setEcParityCount((int) replica.getEcParityCount())
.build(); .build();
} }
public static Replica[] toModels(List<Types.Replica> filters) { public static Replica[] toModels(List<Types.Replica> replicas) {
if (CollectionUtils.isEmpty(filters)) { if (CollectionUtils.isEmpty(replicas)) {
return null; return null;
} }
return filters.stream().map(ReplicaMapper::toModel).toArray(Replica[]::new); return replicas.stream().map(ReplicaMapper::toModel).toArray(Replica[]::new);
} }
public static Replica toModel(Types.Replica replica) { public static Replica toModel(Types.Replica replica) {
@ -52,6 +54,11 @@ public class ReplicaMapper {
return null; return null;
} }
return new Replica(replica.getCount(), replica.getSelector()); return new Replica(
replica.getCount(),
replica.getSelector(),
replica.getEcDataCount(),
replica.getEcParityCount()
);
} }
} }

View file

@ -2,7 +2,7 @@ package info.frostfs.sdk.mappers.netmap;
import frostfs.netmap.Types; import frostfs.netmap.Types;
import info.frostfs.sdk.dto.netmap.Selector; import info.frostfs.sdk.dto.netmap.Selector;
import info.frostfs.sdk.enums.SelectorClause; import info.frostfs.sdk.enums.netmap.SelectorClause;
import info.frostfs.sdk.exceptions.ProcessFrostFSException; import info.frostfs.sdk.exceptions.ProcessFrostFSException;
import info.frostfs.sdk.exceptions.ValidationFrostFSException; import info.frostfs.sdk.exceptions.ValidationFrostFSException;
import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.collections4.CollectionUtils;

View file

@ -2,7 +2,7 @@ package info.frostfs.sdk.mappers.netmap;
import frostfs.netmap.Types; import frostfs.netmap.Types;
import info.frostfs.sdk.dto.netmap.Filter; import info.frostfs.sdk.dto.netmap.Filter;
import info.frostfs.sdk.enums.FilterOperation; import info.frostfs.sdk.enums.netmap.FilterOperation;
import info.frostfs.sdk.exceptions.ProcessFrostFSException; import info.frostfs.sdk.exceptions.ProcessFrostFSException;
import info.frostfs.sdk.exceptions.ValidationFrostFSException; import info.frostfs.sdk.exceptions.ValidationFrostFSException;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;

View file

@ -3,7 +3,7 @@ package info.frostfs.sdk.mappers.netmap;
import com.google.protobuf.ByteString; import com.google.protobuf.ByteString;
import frostfs.netmap.Service; import frostfs.netmap.Service;
import frostfs.netmap.Types; import frostfs.netmap.Types;
import info.frostfs.sdk.enums.NodeState; import info.frostfs.sdk.enums.netmap.NodeState;
import info.frostfs.sdk.exceptions.ProcessFrostFSException; import info.frostfs.sdk.exceptions.ProcessFrostFSException;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.ParameterizedTest;

View file

@ -2,7 +2,7 @@ package info.frostfs.sdk.mappers.netmap;
import frostfs.netmap.Types; import frostfs.netmap.Types;
import info.frostfs.sdk.dto.netmap.Selector; import info.frostfs.sdk.dto.netmap.Selector;
import info.frostfs.sdk.enums.SelectorClause; import info.frostfs.sdk.enums.netmap.SelectorClause;
import info.frostfs.sdk.exceptions.ProcessFrostFSException; import info.frostfs.sdk.exceptions.ProcessFrostFSException;
import info.frostfs.sdk.exceptions.ValidationFrostFSException; import info.frostfs.sdk.exceptions.ValidationFrostFSException;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;

12
pom.xml
View file

@ -17,7 +17,7 @@
</modules> </modules>
<properties> <properties>
<revision>0.10.0</revision> <revision>0.11.0</revision>
<maven.compiler.source>11</maven.compiler.source> <maven.compiler.source>11</maven.compiler.source>
<maven.compiler.target>11</maven.compiler.target> <maven.compiler.target>11</maven.compiler.target>
@ -42,6 +42,11 @@
<artifactId>commons-lang3</artifactId> <artifactId>commons-lang3</artifactId>
<version>3.14.0</version> <version>3.14.0</version>
</dependency> </dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<version>1.18.0</version>
</dependency>
<dependency> <dependency>
<groupId>org.projectlombok</groupId> <groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId> <artifactId>lombok</artifactId>
@ -78,6 +83,11 @@
<version>${mockito.version}</version> <version>${mockito.version}</version>
<scope>test</scope> <scope>test</scope>
</dependency> </dependency>
<dependency>
<groupId>org.yaml</groupId>
<artifactId>snakeyaml</artifactId>
<version>2.4</version>
</dependency>
</dependencies> </dependencies>
<build> <build>