From f5fe2289915c744d2f86ed930f298bfc92c418c4 Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Mon, 15 Apr 2024 16:06:52 -0700 Subject: [PATCH 01/19] Adds Writeable, ToXContent functionality to ImmutableCacheStatsHolder Signed-off-by: Peter Alfonsi --- .../cache/store/disk/EhcacheDiskCache.java | 2 +- .../common/cache/stats/CacheStatsHolder.java | 10 +- .../cache/stats/ImmutableCacheStats.java | 27 +- .../stats/ImmutableCacheStatsHolder.java | 269 +++++++++++++++++- .../cache/store/OpenSearchOnHeapCache.java | 2 +- .../cache/stats/CacheStatsHolderTests.java | 58 ++-- .../stats/ImmutableCacheStatsHolderTests.java | 204 ++++++++++++- 7 files changed, 538 insertions(+), 34 deletions(-) diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java index 185d51732a116..ba0505b1c871d 100644 --- a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java @@ -162,7 +162,7 @@ private EhcacheDiskCache(Builder builder) { this.ehCacheEventListener = new EhCacheEventListener(builder.getRemovalListener(), builder.getWeigher()); this.cache = buildCache(Duration.ofMillis(expireAfterAccess.getMillis()), builder); List dimensionNames = Objects.requireNonNull(builder.dimensionNames, "Dimension names can't be null"); - this.cacheStatsHolder = new CacheStatsHolder(dimensionNames); + this.cacheStatsHolder = new CacheStatsHolder(dimensionNames, EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME); } @SuppressWarnings({ "rawtypes" }) diff --git a/server/src/main/java/org/opensearch/common/cache/stats/CacheStatsHolder.java b/server/src/main/java/org/opensearch/common/cache/stats/CacheStatsHolder.java index a8b7c27ef9e79..296015983fc7f 100644 --- a/server/src/main/java/org/opensearch/common/cache/stats/CacheStatsHolder.java +++ b/server/src/main/java/org/opensearch/common/cache/stats/CacheStatsHolder.java @@ -8,6 +8,8 @@ package org.opensearch.common.cache.stats; +import org.opensearch.common.annotation.ExperimentalApi; + import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -28,6 +30,7 @@ * * @opensearch.experimental */ +@ExperimentalApi public class CacheStatsHolder { // The list of permitted dimensions. Should be ordered from "outermost" to "innermost", as you would like to @@ -41,9 +44,12 @@ public class CacheStatsHolder { // To avoid sync problems, obtain a lock before creating or removing nodes in the stats tree. // No lock is needed to edit stats on existing nodes. private final Lock lock = new ReentrantLock(); + // The name of the cache type using these stats + private final String storeName; - public CacheStatsHolder(List dimensionNames) { + public CacheStatsHolder(List dimensionNames, String storeName) { this.dimensionNames = Collections.unmodifiableList(dimensionNames); + this.storeName = storeName; this.statsRoot = new Node("", true); // The root node has the empty string as its dimension value } @@ -157,7 +163,7 @@ private boolean internalIncrementHelper( * Produce an immutable version of these stats. */ public ImmutableCacheStatsHolder getImmutableCacheStatsHolder() { - return new ImmutableCacheStatsHolder(statsRoot.snapshot(), dimensionNames); + return new ImmutableCacheStatsHolder(statsRoot.snapshot(), dimensionNames, storeName); } public void removeDimensions(List dimensionValues) { diff --git a/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStats.java b/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStats.java index 7549490fd6b74..985f6e86481f3 100644 --- a/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStats.java +++ b/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStats.java @@ -12,6 +12,9 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; import java.util.Objects; @@ -22,7 +25,7 @@ * @opensearch.experimental */ @ExperimentalApi -public class ImmutableCacheStats implements Writeable { // TODO: Make this extend ToXContent (in API PR) +public class ImmutableCacheStats implements Writeable, ToXContent { private final long hits; private final long misses; private final long evictions; @@ -100,4 +103,26 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(hits, misses, evictions, sizeInBytes, entries); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + // We don't write the header in CacheStatsResponse's toXContent, because it doesn't know the name of aggregation it's part of + builder.humanReadableField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, new ByteSizeValue(sizeInBytes)); + builder.field(Fields.EVICTIONS, evictions); + builder.field(Fields.HIT_COUNT, hits); + builder.field(Fields.MISS_COUNT, misses); + builder.field(Fields.ENTRIES, entries); + return builder; + } + + public static final class Fields { + public static final String MEMORY_SIZE = "size"; + public static final String MEMORY_SIZE_IN_BYTES = "size_in_bytes"; + // TODO: This might not be memory as it could be partially on disk, so I've changed it, but should it be consistent with the earlier + // field? + public static final String EVICTIONS = "evictions"; + public static final String HIT_COUNT = "hit_count"; + public static final String MISS_COUNT = "miss_count"; + public static final String ENTRIES = "entries"; + } } diff --git a/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolder.java b/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolder.java index 12e325046d83b..d4b98cc1dac5e 100644 --- a/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolder.java +++ b/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolder.java @@ -9,11 +9,19 @@ package org.opensearch.common.cache.stats; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.TreeMap; /** @@ -23,15 +31,94 @@ */ @ExperimentalApi -public class ImmutableCacheStatsHolder { // TODO: extends Writeable, ToXContent +public class ImmutableCacheStatsHolder implements Writeable, ToXContent { // An immutable snapshot of a stats within a CacheStatsHolder, containing all the stats maintained by the cache. // Pkg-private for testing. final Node statsRoot; final List dimensionNames; + // The name of the cache type producing these stats. Returned in API response. + final String storeName; + public static String STORE_NAME_FIELD = "store_name"; - public ImmutableCacheStatsHolder(Node statsRoot, List dimensionNames) { + public ImmutableCacheStatsHolder(Node statsRoot, List dimensionNames, String storeName) { this.statsRoot = statsRoot; this.dimensionNames = dimensionNames; + this.storeName = storeName; + } + + /** + * Should not be used with StreamOutputs produced using writeToWithClassName. + */ + public ImmutableCacheStatsHolder(StreamInput in) throws IOException { + // Because we write in preorder order, the parent of the next node we read will always be one of the ancestors + // of the last node we read. This allows us to avoid ambiguity if nodes have the same dimension value, without + // having to serialize the whole path to each node. + this.dimensionNames = List.of(in.readStringArray()); + this.statsRoot = new Node("", false, new ImmutableCacheStats(0, 0, 0, 0, 0)); + List ancestorsOfLastRead = List.of(statsRoot); + while (ancestorsOfLastRead != null) { + ancestorsOfLastRead = readAndAttachDimensionNode(in, ancestorsOfLastRead); + } + // Finally, update sum-of-children stats for the root node + CacheStats totalStats = new CacheStats(); + for (Node child : statsRoot.children.values()) { + totalStats.add(child.getStats()); + } + statsRoot.incrementStats(totalStats.immutableSnapshot()); + this.storeName = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + // Write each node in preorder order, along with its depth. + // Then, when rebuilding the tree from the stream, we can always find the correct parent to attach each node to. + out.writeStringArray(dimensionNames.toArray(new String[0])); + for (Node child : statsRoot.children.values()) { + writeDimensionNodeRecursive(out, child, 1); + } + out.writeBoolean(false); // Write false to signal there are no more nodes + out.writeString(storeName); + } + + private void writeDimensionNodeRecursive(StreamOutput out, Node node, int depth) throws IOException { + out.writeBoolean(true); // Signals there is a following node to deserialize + out.writeVInt(depth); + out.writeString(node.getDimensionValue()); + node.getStats().writeTo(out); + + if (!node.children.isEmpty()) { + // Not a leaf node + out.writeBoolean(false); // Write true to indicate this is not a leaf node + for (Node child : node.children.values()) { + writeDimensionNodeRecursive(out, child, depth + 1); + } + } else { + out.writeBoolean(true); // Write true to indicate this is a leaf node + } + } + + /** + * Reads a serialized dimension node, attaches it to its appropriate place in the tree, and returns the list of + * ancestors of the newly attached node. + */ + private List readAndAttachDimensionNode(StreamInput in, List ancestorsOfLastRead) throws IOException { + boolean hasNextNode = in.readBoolean(); + if (hasNextNode) { + int depth = in.readVInt(); + String nodeDimensionValue = in.readString(); + ImmutableCacheStats stats = new ImmutableCacheStats(in); + boolean isLeafNode = in.readBoolean(); + + Node result = new Node(nodeDimensionValue, isLeafNode, stats); + Node parent = ancestorsOfLastRead.get(depth - 1); + parent.getChildren().put(nodeDimensionValue, result); + List ancestors = new ArrayList<>(ancestorsOfLastRead.subList(0, depth)); + ancestors.add(result); + return ancestors; + } else { + // No more nodes + return null; + } } public ImmutableCacheStats getTotalStats() { @@ -69,6 +156,160 @@ public ImmutableCacheStats getStatsForDimensionValues(List dimensionValu return current.stats; } + /** + * Returns a new tree containing the stats aggregated by the levels passed in. The root node is a dummy node, + * whose name and value are null. The new tree only has dimensions matching the levels passed in. + */ + Node aggregateByLevels(List levels) { + List filteredLevels = filterLevels(levels); + Node newRoot = new Node("", false, statsRoot.getStats()); + for (Node child : statsRoot.children.values()) { + aggregateByLevelsHelper(newRoot, child, filteredLevels, 0); + } + return newRoot; + } + + /** + * Because we may have to combine nodes that have the same dimension name, I don't think there's a clean way to aggregate + * fully recursively while also passing in a completed map of children nodes before constructing the parent node. + * For this reason, in this function we have to build the new tree top down rather than bottom up. + * We use private methods allowing us to add children to/increment the stats for an existing node. + * This should be ok because the resulting trees are short-lived objects that are not exposed anywhere outside this class. + */ + private void aggregateByLevelsHelper(Node parentInNewTree, Node currentInOriginalTree, List levels, int depth) { + if (levels.contains(dimensionNames.get(depth))) { + // If this node is in a level we want to aggregate, create a new dimension node with the same value and stats, and connect it to + // the last parent node in the new tree. If it already exists, increment it instead. + String dimensionValue = currentInOriginalTree.getDimensionValue(); + Node nodeInNewTree = parentInNewTree.children.get(dimensionValue); + if (nodeInNewTree == null) { + // Create new node with stats matching the node from the original tree + int indexOfLastLevel = dimensionNames.indexOf(levels.get(levels.size() - 1)); + boolean isLeafNode = depth == indexOfLastLevel; // If this is the last level we aggregate, the new node should be a leaf + // node + nodeInNewTree = new Node(dimensionValue, isLeafNode, currentInOriginalTree.getStats()); + parentInNewTree.addChild(dimensionValue, nodeInNewTree); + } else { + // Otherwise increment existing stats + nodeInNewTree.incrementStats(currentInOriginalTree.getStats()); + } + // Finally set the parent node to be this node for the next callers of this function + parentInNewTree = nodeInNewTree; + } + + if (!currentInOriginalTree.children.isEmpty()) { + // Not a leaf node + for (Map.Entry childEntry : currentInOriginalTree.children.entrySet()) { + Node child = childEntry.getValue(); + aggregateByLevelsHelper(parentInNewTree, child, levels, depth + 1); + } + } + } + + /** + * Filters out levels that aren't in dimensionNames. Unrecognized levels are ignored. + */ + private List filterLevels(List levels) { + List filtered = new ArrayList<>(); + for (String level : levels) { + if (dimensionNames.contains(level)) { + filtered.add(level); + } + } + if (filtered.isEmpty()) { + throw new IllegalArgumentException("Levels cannot have size 0"); + } + return filtered; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + // Always show total stats, regardless of levels + getTotalStats().toXContent(builder, params); + + List levels = getLevels(params); + if (levels != null) { + List filteredLevels = filterLevels(levels); + toXContentForLevels(builder, params, filteredLevels); + } + + // Also add the store name for the cache that produced the stats + builder.field(STORE_NAME_FIELD, storeName); + return builder; + } + + XContentBuilder toXContentForLevels(XContentBuilder builder, Params params, List levels) throws IOException { + Node aggregated = aggregateByLevels(levels); + // Depth -1 corresponds to the dummy root node + toXContentForLevelsHelper(-1, aggregated, levels, builder, params); + return builder; + } + + private void toXContentForLevelsHelper(int depth, Node current, List levels, XContentBuilder builder, Params params) + throws IOException { + if (depth >= 0) { + builder.startObject(current.dimensionValue); + } + + if (depth == levels.size() - 1) { + // This is a leaf node + current.getStats().toXContent(builder, params); + } else { + builder.startObject(levels.get(depth + 1)); + for (Node nextNode : current.children.values()) { + toXContentForLevelsHelper(depth + 1, nextNode, levels, builder, params); + } + builder.endObject(); + } + + if (depth >= 0) { + builder.endObject(); + } + } + + private List getLevels(Params params) { + String levels = params.param("level"); + if (levels == null) { + return null; + } + return List.of(levels.split(",")); + } + + @Override + public boolean equals(Object o) { + if (o == null || o.getClass() != ImmutableCacheStatsHolder.class) { + return false; + } + ImmutableCacheStatsHolder other = (ImmutableCacheStatsHolder) o; + if (!dimensionNames.equals(other.dimensionNames) || !storeName.equals(other.storeName)) { + return false; + } + return equalsHelper(statsRoot, other.getStatsRoot()); + } + + private boolean equalsHelper(Node thisNode, Node otherNode) { + if (otherNode == null) { + return false; + } + if (!thisNode.getStats().equals(otherNode.getStats())) { + return false; + } + boolean allChildrenMatch = true; + for (String childValue : thisNode.getChildren().keySet()) { + allChildrenMatch = equalsHelper(thisNode.children.get(childValue), otherNode.children.get(childValue)); + if (!allChildrenMatch) { + return false; + } + } + return allChildrenMatch; + } + + @Override + public int hashCode() { + // Should be sufficient to hash based on the total stats value (found in the root node) + return Objects.hash(statsRoot.stats, dimensionNames); + } + // A similar class to CacheStatsHolder.Node, which uses an ordered TreeMap and holds immutable CacheStatsSnapshot as its stats. static class Node { private final String dimensionValue; @@ -76,8 +317,8 @@ static class Node { // The stats for this node. If a leaf node, corresponds to the stats for this combination of dimensions; if not, // contains the sum of its children's stats. - private final ImmutableCacheStats stats; - private static final Map EMPTY_CHILDREN_MAP = new HashMap<>(); + private ImmutableCacheStats stats; + private static final Map EMPTY_CHILDREN_MAP = Collections.unmodifiableMap(new HashMap<>()); Node(String dimensionValue, TreeMap snapshotChildren, ImmutableCacheStats stats) { this.dimensionValue = dimensionValue; @@ -89,6 +330,16 @@ static class Node { } } + private Node(String dimensionValue, boolean isLeafNode, ImmutableCacheStats stats) { + this.dimensionValue = dimensionValue; + this.stats = stats; + if (isLeafNode) { + this.children = EMPTY_CHILDREN_MAP; + } else { + this.children = new TreeMap<>(); + } + } + Map getChildren() { return children; } @@ -100,12 +351,18 @@ public ImmutableCacheStats getStats() { public String getDimensionValue() { return dimensionValue; } + + private void addChild(String dimensionValue, Node child) { + this.children.putIfAbsent(dimensionValue, child); + } + + private void incrementStats(ImmutableCacheStats toIncrement) { + stats = ImmutableCacheStats.addSnapshots(stats, toIncrement); + } } // pkg-private for testing Node getStatsRoot() { return statsRoot; } - - // TODO (in API PR): Produce XContent based on aggregateByLevels() } diff --git a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java index 29e5667c9f27d..2c6b70ea3a693 100644 --- a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java +++ b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java @@ -62,7 +62,7 @@ public OpenSearchOnHeapCache(Builder builder) { } cache = cacheBuilder.build(); this.dimensionNames = Objects.requireNonNull(builder.dimensionNames, "Dimension names can't be null"); - this.cacheStatsHolder = new CacheStatsHolder(dimensionNames); + this.cacheStatsHolder = new CacheStatsHolder(dimensionNames, OpenSearchOnHeapCacheFactory.NAME); this.removalListener = builder.getRemovalListener(); this.weigher = builder.getWeigher(); } diff --git a/server/src/test/java/org/opensearch/common/cache/stats/CacheStatsHolderTests.java b/server/src/test/java/org/opensearch/common/cache/stats/CacheStatsHolderTests.java index 390cd4d601a4b..88a4eb840dec2 100644 --- a/server/src/test/java/org/opensearch/common/cache/stats/CacheStatsHolderTests.java +++ b/server/src/test/java/org/opensearch/common/cache/stats/CacheStatsHolderTests.java @@ -22,9 +22,11 @@ import java.util.concurrent.CountDownLatch; public class CacheStatsHolderTests extends OpenSearchTestCase { + private final String storeName = "dummy_store"; + public void testAddAndGet() throws Exception { List dimensionNames = List.of("dim1", "dim2", "dim3", "dim4"); - CacheStatsHolder cacheStatsHolder = new CacheStatsHolder(dimensionNames); + CacheStatsHolder cacheStatsHolder = new CacheStatsHolder(dimensionNames, storeName); Map> usedDimensionValues = CacheStatsHolderTests.getUsedDimensionValues(cacheStatsHolder, 10); Map, CacheStats> expected = CacheStatsHolderTests.populateStats(cacheStatsHolder, usedDimensionValues, 1000, 10); @@ -53,7 +55,7 @@ public void testAddAndGet() throws Exception { public void testReset() throws Exception { List dimensionNames = List.of("dim1", "dim2"); - CacheStatsHolder cacheStatsHolder = new CacheStatsHolder(dimensionNames); + CacheStatsHolder cacheStatsHolder = new CacheStatsHolder(dimensionNames, storeName); Map> usedDimensionValues = getUsedDimensionValues(cacheStatsHolder, 10); Map, CacheStats> expected = populateStats(cacheStatsHolder, usedDimensionValues, 100, 10); @@ -72,7 +74,7 @@ public void testReset() throws Exception { public void testDropStatsForDimensions() throws Exception { List dimensionNames = List.of("dim1", "dim2"); - CacheStatsHolder cacheStatsHolder = new CacheStatsHolder(dimensionNames); + CacheStatsHolder cacheStatsHolder = new CacheStatsHolder(dimensionNames, storeName); // Create stats for the following dimension sets List> populatedStats = List.of(List.of("A1", "B1"), List.of("A2", "B2"), List.of("A2", "B3")); @@ -108,7 +110,7 @@ public void testDropStatsForDimensions() throws Exception { public void testCount() throws Exception { List dimensionNames = List.of("dim1", "dim2"); - CacheStatsHolder cacheStatsHolder = new CacheStatsHolder(dimensionNames); + CacheStatsHolder cacheStatsHolder = new CacheStatsHolder(dimensionNames, storeName); Map> usedDimensionValues = getUsedDimensionValues(cacheStatsHolder, 10); Map, CacheStats> expected = populateStats(cacheStatsHolder, usedDimensionValues, 100, 10); @@ -121,7 +123,7 @@ public void testCount() throws Exception { public void testConcurrentRemoval() throws Exception { List dimensionNames = List.of("dim1", "dim2"); - CacheStatsHolder cacheStatsHolder = new CacheStatsHolder(dimensionNames); + CacheStatsHolder cacheStatsHolder = new CacheStatsHolder(dimensionNames, storeName); // Create stats for the following dimension sets List> populatedStats = List.of(List.of("A1", "B1"), List.of("A2", "B2"), List.of("A2", "B3")); @@ -186,32 +188,46 @@ static Map, CacheStats> populateStats( int numDistinctValuePairs, int numRepetitionsPerValue ) throws InterruptedException { + return populateStats(List.of(cacheStatsHolder), usedDimensionValues, numDistinctValuePairs, numRepetitionsPerValue); + } + + static Map, CacheStats> populateStats( + List cacheStatsHolders, + Map> usedDimensionValues, + int numDistinctValuePairs, + int numRepetitionsPerValue + ) throws InterruptedException { + for (CacheStatsHolder statsHolder : cacheStatsHolders) { + assertEquals(cacheStatsHolders.get(0).getDimensionNames(), statsHolder.getDimensionNames()); + } Map, CacheStats> expected = new ConcurrentHashMap<>(); Thread[] threads = new Thread[numDistinctValuePairs]; CountDownLatch countDownLatch = new CountDownLatch(numDistinctValuePairs); Random rand = Randomness.get(); List> dimensionsForThreads = new ArrayList<>(); for (int i = 0; i < numDistinctValuePairs; i++) { - dimensionsForThreads.add(getRandomDimList(cacheStatsHolder.getDimensionNames(), usedDimensionValues, true, rand)); + dimensionsForThreads.add(getRandomDimList(cacheStatsHolders.get(0).getDimensionNames(), usedDimensionValues, true, rand)); int finalI = i; threads[i] = new Thread(() -> { Random threadRand = Randomness.get(); List dimensions = dimensionsForThreads.get(finalI); expected.computeIfAbsent(dimensions, (key) -> new CacheStats()); - for (int j = 0; j < numRepetitionsPerValue; j++) { - CacheStats statsToInc = new CacheStats( - threadRand.nextInt(10), - threadRand.nextInt(10), - threadRand.nextInt(10), - threadRand.nextInt(5000), - threadRand.nextInt(10) - ); - expected.get(dimensions).hits.inc(statsToInc.getHits()); - expected.get(dimensions).misses.inc(statsToInc.getMisses()); - expected.get(dimensions).evictions.inc(statsToInc.getEvictions()); - expected.get(dimensions).sizeInBytes.inc(statsToInc.getSizeInBytes()); - expected.get(dimensions).entries.inc(statsToInc.getEntries()); - CacheStatsHolderTests.populateStatsHolderFromStatsValueMap(cacheStatsHolder, Map.of(dimensions, statsToInc)); + for (CacheStatsHolder cacheStatsHolder : cacheStatsHolders) { + for (int j = 0; j < numRepetitionsPerValue; j++) { + CacheStats statsToInc = new CacheStats( + threadRand.nextInt(10), + threadRand.nextInt(10), + threadRand.nextInt(10), + threadRand.nextInt(5000), + threadRand.nextInt(10) + ); + expected.get(dimensions).hits.inc(statsToInc.getHits()); + expected.get(dimensions).misses.inc(statsToInc.getMisses()); + expected.get(dimensions).evictions.inc(statsToInc.getEvictions()); + expected.get(dimensions).sizeInBytes.inc(statsToInc.getSizeInBytes()); + expected.get(dimensions).entries.inc(statsToInc.getEntries()); + CacheStatsHolderTests.populateStatsHolderFromStatsValueMap(cacheStatsHolder, Map.of(dimensions, statsToInc)); + } } countDownLatch.countDown(); }); @@ -265,7 +281,7 @@ private void assertSumOfChildrenStats(CacheStatsHolder.Node current) { } } - static void populateStatsHolderFromStatsValueMap(CacheStatsHolder cacheStatsHolder, Map, CacheStats> statsMap) { + public static void populateStatsHolderFromStatsValueMap(CacheStatsHolder cacheStatsHolder, Map, CacheStats> statsMap) { for (Map.Entry, CacheStats> entry : statsMap.entrySet()) { CacheStats stats = entry.getValue(); List dims = entry.getKey(); diff --git a/server/src/test/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolderTests.java b/server/src/test/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolderTests.java index 933b8abd6e392..bf8056232f365 100644 --- a/server/src/test/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolderTests.java +++ b/server/src/test/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolderTests.java @@ -8,16 +8,64 @@ package org.opensearch.common.cache.stats; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.BytesStreamInput; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.test.OpenSearchTestCase; +import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.function.BiConsumer; public class ImmutableCacheStatsHolderTests extends OpenSearchTestCase { + private final String storeName = "dummy_store"; + + public void testSerialization() throws Exception { + List dimensionNames = List.of("dim1", "dim2", "dim3"); + CacheStatsHolder statsHolder = new CacheStatsHolder(dimensionNames, storeName); + Map> usedDimensionValues = CacheStatsHolderTests.getUsedDimensionValues(statsHolder, 10); + CacheStatsHolderTests.populateStats(statsHolder, usedDimensionValues, 100, 10); + ImmutableCacheStatsHolder stats = statsHolder.getImmutableCacheStatsHolder(); + + BytesStreamOutput os = new BytesStreamOutput(); + stats.writeTo(os); + BytesStreamInput is = new BytesStreamInput(BytesReference.toBytes(os.bytes())); + ImmutableCacheStatsHolder deserialized = new ImmutableCacheStatsHolder(is); + + assertEquals(stats.dimensionNames, deserialized.dimensionNames); + assertEquals(stats.storeName, deserialized.storeName); + + assertEquals(stats, deserialized); + } + + public void testEquals() throws Exception { + List dimensionNames = List.of("dim1", "dim2", "dim3"); + CacheStatsHolder statsHolder = new CacheStatsHolder(dimensionNames, storeName); + CacheStatsHolder differentStoreNameStatsHolder = new CacheStatsHolder(dimensionNames, "nonMatchingStoreName"); + CacheStatsHolder nonMatchingStatsHolder = new CacheStatsHolder(dimensionNames, storeName); + Map> usedDimensionValues = CacheStatsHolderTests.getUsedDimensionValues(statsHolder, 10); + CacheStatsHolderTests.populateStats(List.of(statsHolder, differentStoreNameStatsHolder), usedDimensionValues, 100, 10); + CacheStatsHolderTests.populateStats(nonMatchingStatsHolder, usedDimensionValues, 100, 10); + ImmutableCacheStatsHolder stats = statsHolder.getImmutableCacheStatsHolder(); + + ImmutableCacheStatsHolder secondStats = statsHolder.getImmutableCacheStatsHolder(); + assertEquals(stats, secondStats); + ImmutableCacheStatsHolder nonMatchingStats = nonMatchingStatsHolder.getImmutableCacheStatsHolder(); + assertNotEquals(stats, nonMatchingStats); + ImmutableCacheStatsHolder differentStoreNameStats = differentStoreNameStatsHolder.getImmutableCacheStatsHolder(); + assertNotEquals(stats, differentStoreNameStats); + } public void testGet() throws Exception { List dimensionNames = List.of("dim1", "dim2", "dim3", "dim4"); - CacheStatsHolder cacheStatsHolder = new CacheStatsHolder(dimensionNames); + CacheStatsHolder cacheStatsHolder = new CacheStatsHolder(dimensionNames, storeName); Map> usedDimensionValues = CacheStatsHolderTests.getUsedDimensionValues(cacheStatsHolder, 10); Map, CacheStats> expected = CacheStatsHolderTests.populateStats(cacheStatsHolder, usedDimensionValues, 1000, 10); ImmutableCacheStatsHolder stats = cacheStatsHolder.getImmutableCacheStatsHolder(); @@ -52,7 +100,7 @@ public void testGet() throws Exception { public void testEmptyDimsList() throws Exception { // If the dimension list is empty, the tree should have only the root node containing the total stats. - CacheStatsHolder cacheStatsHolder = new CacheStatsHolder(List.of()); + CacheStatsHolder cacheStatsHolder = new CacheStatsHolder(List.of(), storeName); Map> usedDimensionValues = CacheStatsHolderTests.getUsedDimensionValues(cacheStatsHolder, 100); CacheStatsHolderTests.populateStats(cacheStatsHolder, usedDimensionValues, 10, 100); ImmutableCacheStatsHolder stats = cacheStatsHolder.getImmutableCacheStatsHolder(); @@ -62,6 +110,158 @@ public void testEmptyDimsList() throws Exception { assertEquals(stats.getTotalStats(), statsRoot.getStats()); } + public void testAggregateByAllDimensions() throws Exception { + // Aggregating with all dimensions as levels should just give us the same values that were in the original map + List dimensionNames = List.of("dim1", "dim2", "dim3", "dim4"); + CacheStatsHolder statsHolder = new CacheStatsHolder(dimensionNames, storeName); + Map> usedDimensionValues = CacheStatsHolderTests.getUsedDimensionValues(statsHolder, 10); + Map, CacheStats> expected = CacheStatsHolderTests.populateStats(statsHolder, usedDimensionValues, 1000, 10); + ImmutableCacheStatsHolder stats = statsHolder.getImmutableCacheStatsHolder(); + + ImmutableCacheStatsHolder.Node aggregated = stats.aggregateByLevels(dimensionNames); + for (Map.Entry, CacheStats> expectedEntry : expected.entrySet()) { + List dimensionValues = new ArrayList<>(); + for (String dimValue : expectedEntry.getKey()) { + dimensionValues.add(dimValue); + } + assertEquals(expectedEntry.getValue().immutableSnapshot(), getNode(dimensionValues, aggregated).getStats()); + } + assertSumOfChildrenStats(aggregated); + } + + public void testAggregateBySomeDimensions() throws Exception { + List dimensionNames = List.of("dim1", "dim2", "dim3", "dim4"); + CacheStatsHolder statsHolder = new CacheStatsHolder(dimensionNames, storeName); + Map> usedDimensionValues = CacheStatsHolderTests.getUsedDimensionValues(statsHolder, 10); + Map, CacheStats> expected = CacheStatsHolderTests.populateStats(statsHolder, usedDimensionValues, 1000, 10); + ImmutableCacheStatsHolder stats = statsHolder.getImmutableCacheStatsHolder(); + + for (int i = 0; i < (1 << dimensionNames.size()); i++) { + // Test each combination of possible levels + List levels = new ArrayList<>(); + for (int nameIndex = 0; nameIndex < dimensionNames.size(); nameIndex++) { + if ((i & (1 << nameIndex)) != 0) { + levels.add(dimensionNames.get(nameIndex)); + } + } + if (levels.size() == 0) { + assertThrows(IllegalArgumentException.class, () -> stats.aggregateByLevels(levels)); + } else { + ImmutableCacheStatsHolder.Node aggregated = stats.aggregateByLevels(levels); + Map, ImmutableCacheStatsHolder.Node> aggregatedLeafNodes = getAllLeafNodes(aggregated); + + for (Map.Entry, ImmutableCacheStatsHolder.Node> aggEntry : aggregatedLeafNodes.entrySet()) { + CacheStats expectedCounter = new CacheStats(); + for (List expectedDims : expected.keySet()) { + if (expectedDims.containsAll(aggEntry.getKey())) { + expectedCounter.add(expected.get(expectedDims)); + } + } + assertEquals(expectedCounter.immutableSnapshot(), aggEntry.getValue().getStats()); + } + assertSumOfChildrenStats(aggregated); + } + } + } + + public void testXContentForLevels() throws Exception { + List dimensionNames = List.of("A", "B", "C"); + + CacheStatsHolder statsHolder = new CacheStatsHolder(dimensionNames, storeName); + CacheStatsHolderTests.populateStatsHolderFromStatsValueMap( + statsHolder, + Map.of( + List.of("A1", "B1", "C1"), + new CacheStats(1, 1, 1, 1, 1), + List.of("A1", "B1", "C2"), + new CacheStats(2, 2, 2, 2, 2), + List.of("A1", "B2", "C1"), + new CacheStats(3, 3, 3, 3, 3), + List.of("A2", "B1", "C3"), + new CacheStats(4, 4, 4, 4, 4) + ) + ); + ImmutableCacheStatsHolder stats = statsHolder.getImmutableCacheStatsHolder(); + + XContentBuilder builder = XContentFactory.jsonBuilder(); + ToXContent.Params params = ToXContent.EMPTY_PARAMS; + + builder.startObject(); + stats.toXContentForLevels(builder, params, List.of("A", "B", "C")); + builder.endObject(); + String resultString = builder.toString(); + Map result = XContentHelper.convertToMap(MediaTypeRegistry.JSON.xContent(), resultString, true); + + Map> fieldNamesMap = Map.of( + ImmutableCacheStats.Fields.MEMORY_SIZE_IN_BYTES, + (counter, value) -> counter.sizeInBytes.inc(value), + ImmutableCacheStats.Fields.EVICTIONS, + (counter, value) -> counter.evictions.inc(value), + ImmutableCacheStats.Fields.HIT_COUNT, + (counter, value) -> counter.hits.inc(value), + ImmutableCacheStats.Fields.MISS_COUNT, + (counter, value) -> counter.misses.inc(value), + ImmutableCacheStats.Fields.ENTRIES, + (counter, value) -> counter.entries.inc(value) + ); + + Map, ImmutableCacheStatsHolder.Node> leafNodes = getAllLeafNodes(stats.getStatsRoot()); + for (Map.Entry, ImmutableCacheStatsHolder.Node> entry : leafNodes.entrySet()) { + List xContentKeys = new ArrayList<>(); + for (int i = 0; i < dimensionNames.size(); i++) { + xContentKeys.add(dimensionNames.get(i)); + xContentKeys.add(entry.getKey().get(i)); + } + CacheStats counterFromXContent = new CacheStats(); + + for (Map.Entry> fieldNamesEntry : fieldNamesMap.entrySet()) { + List fullXContentKeys = new ArrayList<>(xContentKeys); + fullXContentKeys.add(fieldNamesEntry.getKey()); + int valueInXContent = (int) getValueFromNestedXContentMap(result, fullXContentKeys); + BiConsumer incrementer = fieldNamesEntry.getValue(); + incrementer.accept(counterFromXContent, valueInXContent); + } + + ImmutableCacheStats expected = entry.getValue().getStats(); + assertEquals(counterFromXContent.immutableSnapshot(), expected); + } + } + + public static Object getValueFromNestedXContentMap(Map xContentMap, List keys) { + Map current = xContentMap; + for (int i = 0; i < keys.size() - 1; i++) { + Object next = current.get(keys.get(i)); + if (next == null) { + return null; + } + current = (Map) next; + } + return current.get(keys.get(keys.size() - 1)); + } + + // Get a map from the list of dimension values to the corresponding leaf node. + private Map, ImmutableCacheStatsHolder.Node> getAllLeafNodes(ImmutableCacheStatsHolder.Node root) { + Map, ImmutableCacheStatsHolder.Node> result = new HashMap<>(); + getAllLeafNodesHelper(result, root, new ArrayList<>()); + return result; + } + + private void getAllLeafNodesHelper( + Map, ImmutableCacheStatsHolder.Node> result, + ImmutableCacheStatsHolder.Node current, + List pathToCurrent + ) { + if (current.children.isEmpty()) { + result.put(pathToCurrent, current); + } else { + for (Map.Entry entry : current.children.entrySet()) { + List newPath = new ArrayList<>(pathToCurrent); + newPath.add(entry.getKey()); + getAllLeafNodesHelper(result, entry.getValue(), newPath); + } + } + } + private ImmutableCacheStatsHolder.Node getNode(List dimensionValues, ImmutableCacheStatsHolder.Node root) { ImmutableCacheStatsHolder.Node current = root; for (String dimensionValue : dimensionValues) { From 7270850f8b53fb6b723d9850e2e556969c717c54 Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Mon, 15 Apr 2024 16:07:59 -0700 Subject: [PATCH 02/19] Adds NodeCacheStats class which will provide cache stats API response Signed-off-by: Peter Alfonsi --- .../opensearch/common/cache/CacheType.java | 30 ++++++- .../common/cache/service/CacheService.java | 10 +++ .../common/cache/service/NodeCacheStats.java | 81 +++++++++++++++++++ 3 files changed, 119 insertions(+), 2 deletions(-) create mode 100644 server/src/main/java/org/opensearch/common/cache/service/NodeCacheStats.java diff --git a/server/src/main/java/org/opensearch/common/cache/CacheType.java b/server/src/main/java/org/opensearch/common/cache/CacheType.java index c5aeb7cd1fa40..61442db148067 100644 --- a/server/src/main/java/org/opensearch/common/cache/CacheType.java +++ b/server/src/main/java/org/opensearch/common/cache/CacheType.java @@ -10,20 +10,46 @@ import org.opensearch.common.annotation.ExperimentalApi; +import java.util.HashSet; +import java.util.Set; + /** * Cache types available within OpenSearch. */ @ExperimentalApi public enum CacheType { - INDICES_REQUEST_CACHE("indices.requests.cache"); + INDICES_REQUEST_CACHE("indices.requests.cache", "request_cache"); private final String settingPrefix; + private final String apiRepresentation; - CacheType(String settingPrefix) { + CacheType(String settingPrefix, String representation) { this.settingPrefix = settingPrefix; + this.apiRepresentation = representation; } public String getSettingPrefix() { return settingPrefix; } + + public String getApiRepresentation() { + return apiRepresentation; + } + + public static CacheType getByRepresentation(String representation) { + for (CacheType cacheType : values()) { + if (cacheType.apiRepresentation.equals(representation)) { + return cacheType; + } + } + throw new IllegalArgumentException("No CacheType with representation = " + representation); + } + + public static Set allRepresentations() { + Set reprs = new HashSet<>(); + for (CacheType cacheType : values()) { + reprs.add(cacheType.apiRepresentation); + } + return reprs; + } } diff --git a/server/src/main/java/org/opensearch/common/cache/service/CacheService.java b/server/src/main/java/org/opensearch/common/cache/service/CacheService.java index b6710e5e4b424..2d560fd648571 100644 --- a/server/src/main/java/org/opensearch/common/cache/service/CacheService.java +++ b/server/src/main/java/org/opensearch/common/cache/service/CacheService.java @@ -8,10 +8,12 @@ package org.opensearch.common.cache.service; +import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.cache.CacheType; import org.opensearch.common.cache.ICache; import org.opensearch.common.cache.settings.CacheSettings; +import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder; import org.opensearch.common.cache.store.OpenSearchOnHeapCache; import org.opensearch.common.cache.store.config.CacheConfig; import org.opensearch.common.settings.Setting; @@ -62,4 +64,12 @@ public ICache createCache(CacheConfig config, CacheType cache cacheTypeMap.put(cacheType, iCache); return iCache; } + + public NodeCacheStats stats(CommonStatsFlags flags) { + Map statsMap = new HashMap<>(); + for (CacheType type : cacheTypeMap.keySet()) { + statsMap.put(type, cacheTypeMap.get(type).stats()); + } + return new NodeCacheStats(statsMap, flags); + } } diff --git a/server/src/main/java/org/opensearch/common/cache/service/NodeCacheStats.java b/server/src/main/java/org/opensearch/common/cache/service/NodeCacheStats.java new file mode 100644 index 0000000000000..76f00570e8edf --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/service/NodeCacheStats.java @@ -0,0 +1,81 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.service; + +import org.opensearch.action.admin.indices.stats.CommonStatsFlags; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; +import java.util.TreeMap; + +/** + * A class creating XContent responses to cache stats API requests. + * + * @opensearch.experimental + */ +@ExperimentalApi +public class NodeCacheStats implements ToXContentFragment, Writeable { + private final TreeMap statsByCache; + private final CommonStatsFlags flags; + + public NodeCacheStats(Map statsByCache, CommonStatsFlags flags) { + this.statsByCache = new TreeMap<>(statsByCache); // Use TreeMap to force consistent ordering of caches in API responses + this.flags = flags; + } + + public NodeCacheStats(StreamInput in) throws IOException { + this.flags = new CommonStatsFlags(in); + Map readMap = in.readMap(i -> i.readEnum(CacheType.class), ImmutableCacheStatsHolder::new); + this.statsByCache = new TreeMap<>(readMap); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + flags.writeTo(out); + out.writeMap(statsByCache, StreamOutput::writeEnum, (o, immutableCacheStatsHolder) -> immutableCacheStatsHolder.writeTo(o)); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + for (CacheType type : statsByCache.keySet()) { + if (flags.getIncludeCaches().contains(type)) { + builder.startObject(type.getApiRepresentation()); + statsByCache.get(type).toXContent(builder, params); + builder.endObject(); + } + } + return builder; + } + + @Override + public boolean equals(Object o) { + if (o == null) { + return false; + } + if (o.getClass() != NodeCacheStats.class) { + return false; + } + NodeCacheStats other = (NodeCacheStats) o; + return statsByCache.equals(other.statsByCache) && flags.getIncludeCaches().equals(other.flags.getIncludeCaches()); + } + + @Override + public int hashCode() { + return Objects.hash(statsByCache, flags); + } +} From b47ee09f56a959ac0270976a5719cbee4bf4f0cc Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Mon, 15 Apr 2024 16:11:02 -0700 Subject: [PATCH 03/19] Connects up API Signed-off-by: Peter Alfonsi --- .../admin/cluster/node/stats/NodeStats.java | 24 +++++++++- .../cluster/node/stats/NodesStatsRequest.java | 3 +- .../node/stats/TransportNodesStatsAction.java | 3 +- .../stats/TransportClusterStatsAction.java | 1 + .../admin/indices/stats/CommonStatsFlags.java | 25 ++++++++++ .../main/java/org/opensearch/node/Node.java | 3 +- .../java/org/opensearch/node/NodeService.java | 12 +++-- .../admin/cluster/RestNodesStatsAction.java | 20 ++++++++ .../cluster/node/stats/NodeStatsTests.java | 46 ++++++++++++++++++- .../opensearch/cluster/DiskUsageTests.java | 6 +++ .../MockInternalClusterInfoService.java | 3 +- .../opensearch/test/InternalTestCluster.java | 1 + 12 files changed, 138 insertions(+), 9 deletions(-) diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java index 8562a7eb37709..ac2daf57f248b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java @@ -39,6 +39,7 @@ import org.opensearch.cluster.routing.WeightedRoutingStats; import org.opensearch.cluster.service.ClusterManagerThrottlingStats; import org.opensearch.common.Nullable; +import org.opensearch.common.cache.service.NodeCacheStats; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.indices.breaker.AllCircuitBreakerStats; @@ -158,6 +159,9 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment { @Nullable private AdmissionControlStats admissionControlStats; + @Nullable + private NodeCacheStats nodeCacheStats; + public NodeStats(StreamInput in) throws IOException { super(in); timestamp = in.readVLong(); @@ -234,6 +238,11 @@ public NodeStats(StreamInput in) throws IOException { } else { admissionControlStats = null; } + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + nodeCacheStats = in.readOptionalWriteable(NodeCacheStats::new); + } else { + nodeCacheStats = null; + } } public NodeStats( @@ -264,7 +273,8 @@ public NodeStats( @Nullable SearchPipelineStats searchPipelineStats, @Nullable SegmentReplicationRejectionStats segmentReplicationRejectionStats, @Nullable RepositoriesStats repositoriesStats, - @Nullable AdmissionControlStats admissionControlStats + @Nullable AdmissionControlStats admissionControlStats, + @Nullable NodeCacheStats nodeCacheStats ) { super(node); this.timestamp = timestamp; @@ -294,6 +304,7 @@ public NodeStats( this.segmentReplicationRejectionStats = segmentReplicationRejectionStats; this.repositoriesStats = repositoriesStats; this.admissionControlStats = admissionControlStats; + this.nodeCacheStats = nodeCacheStats; } public long getTimestamp() { @@ -451,6 +462,11 @@ public AdmissionControlStats getAdmissionControlStats() { return admissionControlStats; } + @Nullable + public NodeCacheStats getNodeCacheStats() { + return nodeCacheStats; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -506,6 +522,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeOptionalWriteable(admissionControlStats); } + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeOptionalWriteable(nodeCacheStats); + } } @Override @@ -609,6 +628,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (getAdmissionControlStats() != null) { getAdmissionControlStats().toXContent(builder, params); } + if (getNodeCacheStats() != null) { + getNodeCacheStats().toXContent(builder, params); + } return builder; } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java index 1af56f10b95ee..379836cf442e3 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -219,7 +219,8 @@ public enum Metric { RESOURCE_USAGE_STATS("resource_usage_stats"), SEGMENT_REPLICATION_BACKPRESSURE("segment_replication_backpressure"), REPOSITORIES("repositories"), - ADMISSION_CONTROL("admission_control"); + ADMISSION_CONTROL("admission_control"), + CACHE_STATS("caches"); private String metricName; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 1df73d3b4394d..2e93e5e7841cb 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -128,7 +128,8 @@ protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) { NodesStatsRequest.Metric.RESOURCE_USAGE_STATS.containedIn(metrics), NodesStatsRequest.Metric.SEGMENT_REPLICATION_BACKPRESSURE.containedIn(metrics), NodesStatsRequest.Metric.REPOSITORIES.containedIn(metrics), - NodesStatsRequest.Metric.ADMISSION_CONTROL.containedIn(metrics) + NodesStatsRequest.Metric.ADMISSION_CONTROL.containedIn(metrics), + NodesStatsRequest.Metric.CACHE_STATS.containedIn(metrics) ); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 9c5dcc9e9de3f..e4f483f796f44 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -172,6 +172,7 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq false, false, false, + false, false ); List shardsStats = new ArrayList<>(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java index a7d9f95b80f7b..6950bc22677c6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java @@ -34,6 +34,7 @@ import org.opensearch.Version; import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.cache.CacheType; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -62,6 +63,8 @@ public class CommonStatsFlags implements Writeable, Cloneable { private boolean includeUnloadedSegments = false; private boolean includeAllShardIndexingPressureTrackers = false; private boolean includeOnlyTopIndexingPressureMetrics = false; + // Used for metric CACHE_STATS, to determine which caches to report stats for + private EnumSet includeCaches = EnumSet.noneOf(CacheType.class); /** * @param flags flags to set. If no flags are supplied, default flags will be set. @@ -91,6 +94,9 @@ public CommonStatsFlags(StreamInput in) throws IOException { includeUnloadedSegments = in.readBoolean(); includeAllShardIndexingPressureTrackers = in.readBoolean(); includeOnlyTopIndexingPressureMetrics = in.readBoolean(); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + includeCaches = in.readEnumSet(CacheType.class); + } } @Override @@ -111,6 +117,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(includeUnloadedSegments); out.writeBoolean(includeAllShardIndexingPressureTrackers); out.writeBoolean(includeOnlyTopIndexingPressureMetrics); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeEnumSet(includeCaches); + } } /** @@ -125,6 +134,7 @@ public CommonStatsFlags all() { includeUnloadedSegments = false; includeAllShardIndexingPressureTrackers = false; includeOnlyTopIndexingPressureMetrics = false; + includeCaches = EnumSet.noneOf(CacheType.class); return this; } @@ -140,6 +150,7 @@ public CommonStatsFlags clear() { includeUnloadedSegments = false; includeAllShardIndexingPressureTrackers = false; includeOnlyTopIndexingPressureMetrics = false; + includeCaches = EnumSet.noneOf(CacheType.class); return this; } @@ -151,6 +162,10 @@ public Flag[] getFlags() { return flags.toArray(new Flag[0]); } + public EnumSet getIncludeCaches() { + return includeCaches; + } + /** * Sets specific search group stats to retrieve the stats for. Mainly affects search * when enabled. @@ -206,6 +221,16 @@ public CommonStatsFlags includeOnlyTopIndexingPressureMetrics(boolean includeOnl return this; } + public CommonStatsFlags includeCacheType(CacheType cacheType) { + includeCaches.add(cacheType); + return this; + } + + public CommonStatsFlags includeAllCacheTypes() { + includeCaches = EnumSet.allOf(CacheType.class); + return this; + } + public boolean includeUnloadedSegments() { return this.includeUnloadedSegments; } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 7fa2b6c8ff497..628381beda3f9 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -1180,7 +1180,8 @@ protected Node( resourceUsageCollectorService, segmentReplicationStatsTracker, repositoryService, - admissionControlService + admissionControlService, + cacheService ); final SearchService searchService = newSearchService( diff --git a/server/src/main/java/org/opensearch/node/NodeService.java b/server/src/main/java/org/opensearch/node/NodeService.java index 15cc8f3d20bb3..1eb38ea63ad5a 100644 --- a/server/src/main/java/org/opensearch/node/NodeService.java +++ b/server/src/main/java/org/opensearch/node/NodeService.java @@ -41,6 +41,7 @@ import org.opensearch.cluster.routing.WeightedRoutingStats; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; +import org.opensearch.common.cache.service.CacheService; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsFilter; import org.opensearch.common.util.io.IOUtils; @@ -99,6 +100,7 @@ public class NodeService implements Closeable { private final RepositoriesService repositoriesService; private final AdmissionControlService admissionControlService; private final SegmentReplicationStatsTracker segmentReplicationStatsTracker; + private final CacheService cacheService; NodeService( Settings settings, @@ -125,7 +127,8 @@ public class NodeService implements Closeable { ResourceUsageCollectorService resourceUsageCollectorService, SegmentReplicationStatsTracker segmentReplicationStatsTracker, RepositoriesService repositoriesService, - AdmissionControlService admissionControlService + AdmissionControlService admissionControlService, + CacheService cacheService ) { this.settings = settings; this.threadPool = threadPool; @@ -154,6 +157,7 @@ public class NodeService implements Closeable { clusterService.addStateApplier(ingestService); clusterService.addStateApplier(searchPipelineService); this.segmentReplicationStatsTracker = segmentReplicationStatsTracker; + this.cacheService = cacheService; } public NodeInfo info( @@ -236,7 +240,8 @@ public NodeStats stats( boolean resourceUsageStats, boolean segmentReplicationTrackerStats, boolean repositoriesStats, - boolean admissionControl + boolean admissionControl, + boolean cacheService ) { // for indices stats we want to include previous allocated shards stats as well (it will // only be applied to the sensible ones to use, like refresh/merge/flush/indexing stats) @@ -268,7 +273,8 @@ public NodeStats stats( searchPipelineStats ? this.searchPipelineService.stats() : null, segmentReplicationTrackerStats ? this.segmentReplicationStatsTracker.getTotalRejectionStats() : null, repositoriesStats ? this.repositoriesService.getRepositoriesStats() : null, - admissionControl ? this.admissionControlService.stats() : null + admissionControl ? this.admissionControlService.stats() : null, + cacheService ? this.cacheService.stats(indices) : null ); } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsAction.java index 66b9afda06eb6..f62eaeb37f41f 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsAction.java @@ -36,6 +36,7 @@ import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.admin.indices.stats.CommonStatsFlags.Flag; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.cache.CacheType; import org.opensearch.core.common.Strings; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; @@ -175,6 +176,25 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC nodesStatsRequest.indices(flags); } + } else if (metrics.contains("caches")) { + // Extract the list of caches we want to get stats for from the submetrics (which we get from index_metric) + Set cacheMetrics = Strings.tokenizeByCommaToSet(request.param("index_metric", "_all")); + CommonStatsFlags cacheFlags = new CommonStatsFlags(); + cacheFlags.clear(); + if (cacheMetrics.size() == 1 && cacheMetrics.contains("_all")) { + cacheFlags.includeAllCacheTypes(); + } else { + for (String cacheName : cacheMetrics) { + try { + cacheFlags.includeCacheType(CacheType.getByRepresentation(cacheName)); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException( + unrecognized(request, Set.of(cacheName), CacheType.allRepresentations(), "cache type") + ); + } + } + } + nodesStatsRequest.indices(cacheFlags); } else if (request.hasParam("index_metric")) { throw new IllegalArgumentException( String.format( diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index 1b8b6243aa805..d268b11e1c61c 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -42,6 +42,12 @@ import org.opensearch.cluster.routing.WeightedRoutingStats; import org.opensearch.cluster.service.ClusterManagerThrottlingStats; import org.opensearch.cluster.service.ClusterStateStats; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.service.NodeCacheStats; +import org.opensearch.common.cache.stats.CacheStats; +import org.opensearch.common.cache.stats.CacheStatsHolder; +import org.opensearch.common.cache.stats.CacheStatsHolderTests; +import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.metrics.OperationStats; import org.opensearch.common.settings.ClusterSettings; @@ -577,6 +583,13 @@ public void testSerialization() throws IOException { deserializedAdmissionControllerStats.getRejectionCount().get(AdmissionControlActionType.INDEXING.getType()) ); } + NodeCacheStats nodeCacheStats = nodeStats.getNodeCacheStats(); + NodeCacheStats deserializedNodeCacheStats = deserializedNodeStats.getNodeCacheStats(); + if (nodeCacheStats == null) { + assertNull(deserializedNodeCacheStats); + } else { + assertEquals(nodeCacheStats, deserializedNodeCacheStats); + } } } } @@ -928,6 +941,36 @@ public void apply(String action, AdmissionControlActionType admissionControlActi NodeIndicesStats indicesStats = getNodeIndicesStats(remoteStoreStats); + NodeCacheStats nodeCacheStats = null; + if (frequently()) { + int numIndices = randomIntBetween(1, 10); + int numShardsPerIndex = randomIntBetween(1, 50); + + List dimensionNames = List.of("index", "shard", "tier"); + CacheStatsHolder statsHolder = new CacheStatsHolder(dimensionNames, "dummyStoreName"); + for (int indexNum = 0; indexNum < numIndices; indexNum++) { + String indexName = "index" + indexNum; + for (int shardNum = 0; shardNum < numShardsPerIndex; shardNum++) { + String shardName = "[" + indexName + "][" + shardNum + "]"; + for (String tierName : new String[] { "dummy_tier_1", "dummy_tier_2" }) { + List dimensionValues = List.of(indexName, shardName, tierName); + CacheStats toIncrement = new CacheStats(randomInt(20), randomInt(20), randomInt(20), randomInt(20), randomInt(20)); + CacheStatsHolderTests.populateStatsHolderFromStatsValueMap(statsHolder, Map.of(dimensionValues, toIncrement)); + } + } + } + CommonStatsFlags flags = new CommonStatsFlags(); + for (CacheType cacheType : CacheType.values()) { + if (frequently()) { + flags.includeCacheType(cacheType); + } + } + ImmutableCacheStatsHolder cacheStats = statsHolder.getImmutableCacheStatsHolder(); + Map cacheStatsMap = new HashMap<>(); + cacheStatsMap.put(CacheType.INDICES_REQUEST_CACHE, cacheStats); + nodeCacheStats = new NodeCacheStats(cacheStatsMap, flags); + } + // TODO: Only remote_store based aspects of NodeIndicesStats are being tested here. // It is possible to test other metrics in NodeIndicesStats as well since it extends Writeable now return new NodeStats( @@ -958,7 +1001,8 @@ public void apply(String action, AdmissionControlActionType admissionControlActi null, segmentReplicationRejectionStats, null, - admissionControlStats + admissionControlStats, + nodeCacheStats ); } diff --git a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java index ff47ec3015697..5539dd26dd52d 100644 --- a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java +++ b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java @@ -194,6 +194,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ), new NodeStats( @@ -224,6 +225,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ), new NodeStats( @@ -254,6 +256,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ) ); @@ -315,6 +318,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ), new NodeStats( @@ -345,6 +349,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ), new NodeStats( @@ -375,6 +380,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ) ); diff --git a/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java b/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java index 1ad6083074025..35ca5d80aeb4e 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java +++ b/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java @@ -124,7 +124,8 @@ List adjustNodesStats(List nodesStats) { nodeStats.getSearchPipelineStats(), nodeStats.getSegmentReplicationRejectionStats(), nodeStats.getRepositoriesStats(), - nodeStats.getAdmissionControlStats() + nodeStats.getAdmissionControlStats(), + nodeStats.getNodeCacheStats() ); }).collect(Collectors.toList()); } diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index c2b964aa96212..ca80c65e58522 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -2736,6 +2736,7 @@ public void ensureEstimatedStats() { false, false, false, + false, false ); assertThat( From 4b792b6efb9af7fe790e12225e8a5fab9b1b806a Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Mon, 15 Apr 2024 16:11:17 -0700 Subject: [PATCH 04/19] Adds IT Signed-off-by: Peter Alfonsi --- .../CacheStatsAPIIndicesRequestCacheIT.java | 239 ++++++++++++++++++ 1 file changed, 239 insertions(+) create mode 100644 server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java new file mode 100644 index 0000000000000..2012ffd6a3bb5 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java @@ -0,0 +1,239 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.indices.stats.CommonStatsFlags; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.service.NodeCacheStats; +import org.opensearch.common.cache.stats.ImmutableCacheStats; +import org.opensearch.common.cache.stats.ImmutableCacheStatsHolderTests; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; +import org.opensearch.test.hamcrest.OpenSearchAssertions; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; + +// Use a single data node to simplify logic about cache stats across different shards. +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 1) +public class CacheStatsAPIIndicesRequestCacheIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public CacheStatsAPIIndicesRequestCacheIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE, "true").build() }, + new Object[] { Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE, "false").build() } + ); + } + + public void testCacheStatsAPIWIthOnHeapCache() throws Exception { + String index1Name = "index1"; + String index2Name = "index2"; + Client client = client(); + + startIndex(client, index1Name); + startIndex(client, index2Name); + + // Search twice for the same doc in index 1 + for (int i = 0; i < 2; i++) { + SearchResponse resp = client.prepareSearch(index1Name) + .setRequestCache(true) + .setQuery(QueryBuilders.termQuery("k", "hello")) + .get(); + assertSearchResponse(resp); + OpenSearchAssertions.assertAllSuccessful(resp); + assertEquals(1, resp.getHits().getTotalHits().value); + } + + // Search once for a doc in index 2 + SearchResponse resp = client.prepareSearch(index2Name).setRequestCache(true).setQuery(QueryBuilders.termQuery("k", "hello")).get(); + assertSearchResponse(resp); + OpenSearchAssertions.assertAllSuccessful(resp); + assertEquals(1, resp.getHits().getTotalHits().value); + + // First, aggregate by indices only + Map xContentMap = getNodeCacheStatsXContentMap(client, "", List.of(IndicesRequestCache.INDEX_DIMENSION_NAME)); + + List index1Keys = List.of( + CacheType.INDICES_REQUEST_CACHE.getApiRepresentation(), + IndicesRequestCache.INDEX_DIMENSION_NAME, + index1Name + ); + // Since we searched twice, we expect to see 1 hit, 1 miss and 1 entry for index 1 + ImmutableCacheStats expectedStats = new ImmutableCacheStats(1, 1, 0, 0, 1); + checkCacheStatsAPIResponse(xContentMap, index1Keys, expectedStats, false); + // Get the request size for one request, so we can reuse it for next index + int requestSize = (int) ((Map) ImmutableCacheStatsHolderTests.getValueFromNestedXContentMap( + xContentMap, + index1Keys + )).get(ImmutableCacheStats.Fields.MEMORY_SIZE_IN_BYTES); + assertTrue(requestSize > 0); + + List index2Keys = List.of( + CacheType.INDICES_REQUEST_CACHE.getApiRepresentation(), + IndicesRequestCache.INDEX_DIMENSION_NAME, + index2Name + ); + // We searched once in index 2, we expect 1 miss + 1 entry + expectedStats = new ImmutableCacheStats(0, 1, 0, requestSize, 1); + checkCacheStatsAPIResponse(xContentMap, index2Keys, expectedStats, true); + + // The total stats for the node should be 1 hit, 2 misses, and 2 entries + expectedStats = new ImmutableCacheStats(1, 2, 0, 2 * requestSize, 2); + List totalStatsKeys = List.of(CacheType.INDICES_REQUEST_CACHE.getApiRepresentation()); + checkCacheStatsAPIResponse(xContentMap, totalStatsKeys, expectedStats, true); + + // Aggregate by shards only + xContentMap = getNodeCacheStatsXContentMap(client, "", List.of(IndicesRequestCache.SHARD_ID_DIMENSION_NAME)); + + List index1Shard0Keys = List.of( + CacheType.INDICES_REQUEST_CACHE.getApiRepresentation(), + IndicesRequestCache.SHARD_ID_DIMENSION_NAME, + "[" + index1Name + "][0]" + ); + + expectedStats = new ImmutableCacheStats(1, 1, 0, requestSize, 1); + checkCacheStatsAPIResponse(xContentMap, index1Shard0Keys, expectedStats, true); + + List index2Shard0Keys = List.of( + CacheType.INDICES_REQUEST_CACHE.getApiRepresentation(), + IndicesRequestCache.SHARD_ID_DIMENSION_NAME, + "[" + index2Name + "][0]" + ); + expectedStats = new ImmutableCacheStats(0, 1, 0, requestSize, 1); + checkCacheStatsAPIResponse(xContentMap, index2Shard0Keys, expectedStats, true); + + // Aggregate by indices and shards + xContentMap = getNodeCacheStatsXContentMap( + client, + "", + List.of(IndicesRequestCache.INDEX_DIMENSION_NAME, IndicesRequestCache.SHARD_ID_DIMENSION_NAME) + ); + + index1Keys = List.of( + CacheType.INDICES_REQUEST_CACHE.getApiRepresentation(), + IndicesRequestCache.INDEX_DIMENSION_NAME, + index1Name, + IndicesRequestCache.SHARD_ID_DIMENSION_NAME, + "[" + index1Name + "][0]" + ); + + expectedStats = new ImmutableCacheStats(1, 1, 0, requestSize, 1); + checkCacheStatsAPIResponse(xContentMap, index1Keys, expectedStats, true); + + index2Keys = List.of( + CacheType.INDICES_REQUEST_CACHE.getApiRepresentation(), + IndicesRequestCache.INDEX_DIMENSION_NAME, + index2Name, + IndicesRequestCache.SHARD_ID_DIMENSION_NAME, + "[" + index2Name + "][0]" + ); + + expectedStats = new ImmutableCacheStats(0, 1, 0, requestSize, 1); + checkCacheStatsAPIResponse(xContentMap, index2Keys, expectedStats, true); + + } + + // TODO: Add testCacheStatsAPIWithTieredCache when TSC stats implementation PR is merged + + private void startIndex(Client client, String indexName) throws InterruptedException { + assertAcked( + client.admin() + .indices() + .prepareCreate(indexName) + .setMapping("k", "type=keyword") + .setSettings( + Settings.builder() + .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ) + .get() + ); + indexRandom(true, client.prepareIndex(indexName).setSource("k", "hello")); + ensureSearchable(indexName); + } + + private static Map getNodeCacheStatsXContentMap(Client client, String nodeId, List aggregationLevels) + throws IOException { + + CommonStatsFlags statsFlags = new CommonStatsFlags(); + statsFlags.includeAllCacheTypes(); + + NodesStatsResponse nodeStatsResponse = client.admin() + .cluster() + .prepareNodesStats("data:true") + .addMetric(NodesStatsRequest.Metric.CACHE_STATS.metricName()) + .setIndices(statsFlags) + .get(); + // Can always get the first data node as there's only one in this test suite + assertEquals(1, nodeStatsResponse.getNodes().size()); + NodeCacheStats ncs = nodeStatsResponse.getNodes().get(0).getNodeCacheStats(); + + XContentBuilder builder = XContentFactory.jsonBuilder(); + Map paramMap = Map.of("level", String.join(",", aggregationLevels)); + ToXContent.Params params = new ToXContent.MapParams(paramMap); + + builder.startObject(); + ncs.toXContent(builder, params); + builder.endObject(); + + String resultString = builder.toString(); + return XContentHelper.convertToMap(MediaTypeRegistry.JSON.xContent(), resultString, true); + } + + private static void checkCacheStatsAPIResponse( + Map xContentMap, + List xContentMapKeys, + ImmutableCacheStats expectedStats, + boolean checkMemorySize + ) { + // Assumes the keys point to a level whose keys are the field values ("size_in_bytes", "evictions", etc) and whose values store + // those stats + Map aggregatedStatsResponse = (Map) ImmutableCacheStatsHolderTests.getValueFromNestedXContentMap( + xContentMap, + xContentMapKeys + ); + assertNotNull(aggregatedStatsResponse); + assertEquals(expectedStats.getHits(), (int) aggregatedStatsResponse.get(ImmutableCacheStats.Fields.HIT_COUNT)); + assertEquals(expectedStats.getMisses(), (int) aggregatedStatsResponse.get(ImmutableCacheStats.Fields.MISS_COUNT)); + assertEquals(expectedStats.getEvictions(), (int) aggregatedStatsResponse.get(ImmutableCacheStats.Fields.EVICTIONS)); + if (checkMemorySize) { + assertEquals( + expectedStats.getSizeInBytes(), + (int) aggregatedStatsResponse.get(ImmutableCacheStats.Fields.MEMORY_SIZE_IN_BYTES) + ); + } + assertEquals(expectedStats.getEntries(), (int) aggregatedStatsResponse.get(ImmutableCacheStats.Fields.ENTRIES)); + } +} From 62d1c0491f8c98ee71fcd17647f0fcdb259f9209 Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Mon, 15 Apr 2024 16:15:57 -0700 Subject: [PATCH 05/19] Comment reword Signed-off-by: Peter Alfonsi --- .../common/cache/stats/ImmutableCacheStatsHolder.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolder.java b/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolder.java index d4b98cc1dac5e..445a6c4bb77b6 100644 --- a/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolder.java +++ b/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolder.java @@ -174,7 +174,8 @@ Node aggregateByLevels(List levels) { * fully recursively while also passing in a completed map of children nodes before constructing the parent node. * For this reason, in this function we have to build the new tree top down rather than bottom up. * We use private methods allowing us to add children to/increment the stats for an existing node. - * This should be ok because the resulting trees are short-lived objects that are not exposed anywhere outside this class. + * This should be ok because the resulting trees are short-lived objects that are not exposed anywhere outside this class, + * and the original tree is never changed. */ private void aggregateByLevelsHelper(Node parentInNewTree, Node currentInOriginalTree, List levels, int depth) { if (levels.contains(dimensionNames.get(depth))) { From 2cf1d2e96b1289d55670478cad6d4492cd3913fb Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Tue, 16 Apr 2024 09:43:41 -0700 Subject: [PATCH 06/19] Changelog Signed-off-by: Peter Alfonsi --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 88a8f57c0afdc..eb870ac0fde00 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Tiered Caching] Add dimension-based stats to ICache implementations. ([#12531](https://github.com/opensearch-project/OpenSearch/pull/12531)) - Add changes for overriding remote store and replication settings during snapshot restore. ([#11868](https://github.com/opensearch-project/OpenSearch/pull/11868)) - Add an individual setting of rate limiter for segment replication ([#12959](https://github.com/opensearch-project/OpenSearch/pull/12959)) +- [Tiered Caching] Expose new cache stats API ([#13237](https://github.com/opensearch-project/OpenSearch/pull/13237)) ### Dependencies - Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) From 2d67e8fe0e613a92a62b2708d692b98945f7cade Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Tue, 16 Apr 2024 11:53:46 -0700 Subject: [PATCH 07/19] javadoc Signed-off-by: Peter Alfonsi --- .../org/opensearch/common/cache/stats/ImmutableCacheStats.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStats.java b/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStats.java index 985f6e86481f3..80e03ff28cbb5 100644 --- a/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStats.java +++ b/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStats.java @@ -115,6 +115,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + /** + * Field names used to write the values in this object to XContent. + */ public static final class Fields { public static final String MEMORY_SIZE = "size"; public static final String MEMORY_SIZE_IN_BYTES = "size_in_bytes"; From 948c737b1bbcee46862d58788d4e68592c4e2569 Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Tue, 16 Apr 2024 13:35:39 -0700 Subject: [PATCH 08/19] Changed old API to report correct sizes of ICacheKey instead of K Signed-off-by: Peter Alfonsi --- .../CacheStatsAPIIndicesRequestCacheIT.java | 99 ++++++++++++++----- .../cache/request/ShardRequestCache.java | 12 +-- .../AbstractIndexShardCacheEntity.java | 21 +++- .../indices/IndicesRequestCache.java | 15 +-- 4 files changed, 98 insertions(+), 49 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java index 2012ffd6a3bb5..796af6acffc16 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java @@ -16,6 +16,7 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.Randomness; import org.opensearch.common.cache.CacheType; import org.opensearch.common.cache.service.NodeCacheStats; import org.opensearch.common.cache.stats.ImmutableCacheStats; @@ -27,6 +28,7 @@ import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.cache.request.RequestCacheStats; import org.opensearch.index.query.QueryBuilders; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; @@ -35,6 +37,7 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collection; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -66,23 +69,14 @@ public void testCacheStatsAPIWIthOnHeapCache() throws Exception { // Search twice for the same doc in index 1 for (int i = 0; i < 2; i++) { - SearchResponse resp = client.prepareSearch(index1Name) - .setRequestCache(true) - .setQuery(QueryBuilders.termQuery("k", "hello")) - .get(); - assertSearchResponse(resp); - OpenSearchAssertions.assertAllSuccessful(resp); - assertEquals(1, resp.getHits().getTotalHits().value); + searchIndex(client, index1Name, ""); } // Search once for a doc in index 2 - SearchResponse resp = client.prepareSearch(index2Name).setRequestCache(true).setQuery(QueryBuilders.termQuery("k", "hello")).get(); - assertSearchResponse(resp); - OpenSearchAssertions.assertAllSuccessful(resp); - assertEquals(1, resp.getHits().getTotalHits().value); + searchIndex(client, index2Name, ""); // First, aggregate by indices only - Map xContentMap = getNodeCacheStatsXContentMap(client, "", List.of(IndicesRequestCache.INDEX_DIMENSION_NAME)); + Map xContentMap = getNodeCacheStatsXContentMap(client, List.of(IndicesRequestCache.INDEX_DIMENSION_NAME)); List index1Keys = List.of( CacheType.INDICES_REQUEST_CACHE.getApiRepresentation(), @@ -91,7 +85,7 @@ public void testCacheStatsAPIWIthOnHeapCache() throws Exception { ); // Since we searched twice, we expect to see 1 hit, 1 miss and 1 entry for index 1 ImmutableCacheStats expectedStats = new ImmutableCacheStats(1, 1, 0, 0, 1); - checkCacheStatsAPIResponse(xContentMap, index1Keys, expectedStats, false); + checkCacheStatsAPIResponse(xContentMap, index1Keys, expectedStats, false, true); // Get the request size for one request, so we can reuse it for next index int requestSize = (int) ((Map) ImmutableCacheStatsHolderTests.getValueFromNestedXContentMap( xContentMap, @@ -106,15 +100,15 @@ public void testCacheStatsAPIWIthOnHeapCache() throws Exception { ); // We searched once in index 2, we expect 1 miss + 1 entry expectedStats = new ImmutableCacheStats(0, 1, 0, requestSize, 1); - checkCacheStatsAPIResponse(xContentMap, index2Keys, expectedStats, true); + checkCacheStatsAPIResponse(xContentMap, index2Keys, expectedStats, true, true); // The total stats for the node should be 1 hit, 2 misses, and 2 entries expectedStats = new ImmutableCacheStats(1, 2, 0, 2 * requestSize, 2); List totalStatsKeys = List.of(CacheType.INDICES_REQUEST_CACHE.getApiRepresentation()); - checkCacheStatsAPIResponse(xContentMap, totalStatsKeys, expectedStats, true); + checkCacheStatsAPIResponse(xContentMap, totalStatsKeys, expectedStats, true, true); // Aggregate by shards only - xContentMap = getNodeCacheStatsXContentMap(client, "", List.of(IndicesRequestCache.SHARD_ID_DIMENSION_NAME)); + xContentMap = getNodeCacheStatsXContentMap(client, List.of(IndicesRequestCache.SHARD_ID_DIMENSION_NAME)); List index1Shard0Keys = List.of( CacheType.INDICES_REQUEST_CACHE.getApiRepresentation(), @@ -123,7 +117,7 @@ public void testCacheStatsAPIWIthOnHeapCache() throws Exception { ); expectedStats = new ImmutableCacheStats(1, 1, 0, requestSize, 1); - checkCacheStatsAPIResponse(xContentMap, index1Shard0Keys, expectedStats, true); + checkCacheStatsAPIResponse(xContentMap, index1Shard0Keys, expectedStats, true, true); List index2Shard0Keys = List.of( CacheType.INDICES_REQUEST_CACHE.getApiRepresentation(), @@ -131,12 +125,11 @@ public void testCacheStatsAPIWIthOnHeapCache() throws Exception { "[" + index2Name + "][0]" ); expectedStats = new ImmutableCacheStats(0, 1, 0, requestSize, 1); - checkCacheStatsAPIResponse(xContentMap, index2Shard0Keys, expectedStats, true); + checkCacheStatsAPIResponse(xContentMap, index2Shard0Keys, expectedStats, true, true); // Aggregate by indices and shards xContentMap = getNodeCacheStatsXContentMap( client, - "", List.of(IndicesRequestCache.INDEX_DIMENSION_NAME, IndicesRequestCache.SHARD_ID_DIMENSION_NAME) ); @@ -149,7 +142,7 @@ public void testCacheStatsAPIWIthOnHeapCache() throws Exception { ); expectedStats = new ImmutableCacheStats(1, 1, 0, requestSize, 1); - checkCacheStatsAPIResponse(xContentMap, index1Keys, expectedStats, true); + checkCacheStatsAPIResponse(xContentMap, index1Keys, expectedStats, true, true); index2Keys = List.of( CacheType.INDICES_REQUEST_CACHE.getApiRepresentation(), @@ -160,12 +153,50 @@ public void testCacheStatsAPIWIthOnHeapCache() throws Exception { ); expectedStats = new ImmutableCacheStats(0, 1, 0, requestSize, 1); - checkCacheStatsAPIResponse(xContentMap, index2Keys, expectedStats, true); + checkCacheStatsAPIResponse(xContentMap, index2Keys, expectedStats, true, true); } // TODO: Add testCacheStatsAPIWithTieredCache when TSC stats implementation PR is merged + public void testStatsMatchOldApi() throws Exception { + // The main purpose of this test is to check that the new and old APIs are both correctly estimating memory size, + // using the logic that includes the overhead memory in ICacheKey. + String index = "index"; + Client client = client(); + startIndex(client, index); + + int numKeys = Randomness.get().nextInt(100); + for (int i = 0; i < numKeys; i++) { + searchIndex(client, index, String.valueOf(i)); + } + // Get some hits as well + for (int i = 0; i < numKeys / 2; i++) { + searchIndex(client, index, String.valueOf(i)); + } + + RequestCacheStats oldApiStats = client.admin() + .indices() + .prepareStats(index) + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache(); + assertNotEquals(0, oldApiStats.getMemorySizeInBytes()); + + List xContentMapKeys = List.of(CacheType.INDICES_REQUEST_CACHE.getApiRepresentation()); + Map xContentMap = getNodeCacheStatsXContentMap(client, List.of()); + ImmutableCacheStats expected = new ImmutableCacheStats( + oldApiStats.getHitCount(), + oldApiStats.getMissCount(), + oldApiStats.getEvictions(), + oldApiStats.getMemorySizeInBytes(), + 0 + ); + // Don't check entries, as the old API doesn't track this + checkCacheStatsAPIResponse(xContentMap, xContentMapKeys, expected, true, false); + } + private void startIndex(Client client, String indexName) throws InterruptedException { assertAcked( client.admin() @@ -184,8 +215,18 @@ private void startIndex(Client client, String indexName) throws InterruptedExcep ensureSearchable(indexName); } - private static Map getNodeCacheStatsXContentMap(Client client, String nodeId, List aggregationLevels) - throws IOException { + private SearchResponse searchIndex(Client client, String index, String searchSuffix) { + SearchResponse resp = client.prepareSearch(index) + .setRequestCache(true) + .setQuery(QueryBuilders.termQuery("k", "hello" + searchSuffix)) + .get(); + assertSearchResponse(resp); + OpenSearchAssertions.assertAllSuccessful(resp); + // assertEquals(1, resp.getHits().getTotalHits().value); + return resp; + } + + private static Map getNodeCacheStatsXContentMap(Client client, List aggregationLevels) throws IOException { CommonStatsFlags statsFlags = new CommonStatsFlags(); statsFlags.includeAllCacheTypes(); @@ -201,7 +242,10 @@ private static Map getNodeCacheStatsXContentMap(Client client, S NodeCacheStats ncs = nodeStatsResponse.getNodes().get(0).getNodeCacheStats(); XContentBuilder builder = XContentFactory.jsonBuilder(); - Map paramMap = Map.of("level", String.join(",", aggregationLevels)); + Map paramMap = new HashMap<>(); + if (!aggregationLevels.isEmpty()) { + paramMap.put("level", String.join(",", aggregationLevels)); + } ToXContent.Params params = new ToXContent.MapParams(paramMap); builder.startObject(); @@ -216,7 +260,8 @@ private static void checkCacheStatsAPIResponse( Map xContentMap, List xContentMapKeys, ImmutableCacheStats expectedStats, - boolean checkMemorySize + boolean checkMemorySize, + boolean checkEntries ) { // Assumes the keys point to a level whose keys are the field values ("size_in_bytes", "evictions", etc) and whose values store // those stats @@ -234,6 +279,8 @@ private static void checkCacheStatsAPIResponse( (int) aggregatedStatsResponse.get(ImmutableCacheStats.Fields.MEMORY_SIZE_IN_BYTES) ); } - assertEquals(expectedStats.getEntries(), (int) aggregatedStatsResponse.get(ImmutableCacheStats.Fields.ENTRIES)); + if (checkEntries) { + assertEquals(expectedStats.getEntries(), (int) aggregatedStatsResponse.get(ImmutableCacheStats.Fields.ENTRIES)); + } } } diff --git a/server/src/main/java/org/opensearch/index/cache/request/ShardRequestCache.java b/server/src/main/java/org/opensearch/index/cache/request/ShardRequestCache.java index bb35a09ccab46..c08ff73e3d6b2 100644 --- a/server/src/main/java/org/opensearch/index/cache/request/ShardRequestCache.java +++ b/server/src/main/java/org/opensearch/index/cache/request/ShardRequestCache.java @@ -32,7 +32,6 @@ package org.opensearch.index.cache.request; -import org.apache.lucene.util.Accountable; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.metrics.CounterMetric; import org.opensearch.core.common.bytes.BytesReference; @@ -62,18 +61,15 @@ public void onMiss() { missCount.inc(); } - public void onCached(Accountable key, BytesReference value) { - totalMetric.inc(key.ramBytesUsed() + value.ramBytesUsed()); + public void onCached(long keyRamBytesUsed, BytesReference value) { + totalMetric.inc(keyRamBytesUsed + value.ramBytesUsed()); } - public void onRemoval(Accountable key, BytesReference value, boolean evicted) { + public void onRemoval(long keyRamBytesUsed, BytesReference value, boolean evicted) { if (evicted) { evictionsMetric.inc(); } - long dec = 0; - if (key != null) { - dec += key.ramBytesUsed(); - } + long dec = keyRamBytesUsed; if (value != null) { dec += value.ramBytesUsed(); } diff --git a/server/src/main/java/org/opensearch/indices/AbstractIndexShardCacheEntity.java b/server/src/main/java/org/opensearch/indices/AbstractIndexShardCacheEntity.java index bb1201cb910a9..6b4c53654d871 100644 --- a/server/src/main/java/org/opensearch/indices/AbstractIndexShardCacheEntity.java +++ b/server/src/main/java/org/opensearch/indices/AbstractIndexShardCacheEntity.java @@ -32,6 +32,7 @@ package org.opensearch.indices; +import org.opensearch.common.cache.ICacheKey; import org.opensearch.common.cache.RemovalNotification; import org.opensearch.common.cache.RemovalReason; import org.opensearch.core.common.bytes.BytesReference; @@ -51,8 +52,8 @@ abstract class AbstractIndexShardCacheEntity implements IndicesRequestCache.Cach protected abstract ShardRequestCache stats(); @Override - public final void onCached(IndicesRequestCache.Key key, BytesReference value) { - stats().onCached(key, value); + public final void onCached(ICacheKey key, BytesReference value) { + stats().onCached(getRamBytesUsedInKey(key), value); } @Override @@ -66,7 +67,19 @@ public final void onMiss() { } @Override - public final void onRemoval(RemovalNotification notification) { - stats().onRemoval(notification.getKey(), notification.getValue(), notification.getRemovalReason() == RemovalReason.EVICTED); + public final void onRemoval(RemovalNotification, BytesReference> notification) { + stats().onRemoval( + getRamBytesUsedInKey(notification.getKey()), + notification.getValue(), + notification.getRemovalReason() == RemovalReason.EVICTED + ); + } + + private long getRamBytesUsedInKey(ICacheKey key) { + long innerKeyRamBytesUsed = 0; + if (key.key != null) { + innerKeyRamBytesUsed = key.key.ramBytesUsed(); + } + return key.ramBytesUsed(innerKeyRamBytesUsed); } } diff --git a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java index eab772cda3213..7c57c760ed1ee 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java +++ b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java @@ -217,15 +217,8 @@ public void onRemoval(RemovalNotification, BytesReference> notifi // In case this event happens for an old shard, we can safely ignore this as we don't keep track for old // shards as part of request cache. - // Pass a new removal notification containing Key rather than ICacheKey to the CacheEntity for backwards compatibility. Key key = notification.getKey().key; - RemovalNotification newNotification = new RemovalNotification<>( - key, - notification.getValue(), - notification.getRemovalReason() - ); - - cacheEntityLookup.apply(key.shardId).ifPresent(entity -> entity.onRemoval(newNotification)); + cacheEntityLookup.apply(key.shardId).ifPresent(entity -> entity.onRemoval(notification)); cacheCleanupManager.updateCleanupKeyToCountMapOnCacheEviction( new CleanupKey(cacheEntityLookup.apply(key.shardId).orElse(null), key.readerCacheKeyId) ); @@ -318,7 +311,7 @@ public boolean isLoaded() { @Override public BytesReference load(ICacheKey key) throws Exception { BytesReference value = loader.get(); - entity.onCached(key.key, value); + entity.onCached(key, value); loaded = true; return value; } @@ -332,7 +325,7 @@ interface CacheEntity extends Accountable { /** * Called after the value was loaded. */ - void onCached(Key key, BytesReference value); + void onCached(ICacheKey key, BytesReference value); /** * Returns true iff the resource behind this entity is still open ie. @@ -359,7 +352,7 @@ interface CacheEntity extends Accountable { /** * Called when this entity instance is removed */ - void onRemoval(RemovalNotification notification); + void onRemoval(RemovalNotification, BytesReference> notification); } From 802a2050ad1b6877c412937fb48477376f2286b4 Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Wed, 17 Apr 2024 15:09:02 -0700 Subject: [PATCH 09/19] Tweaked XContent level filtering logic + added tests for this Signed-off-by: Peter Alfonsi --- .../stats/ImmutableCacheStatsHolder.java | 42 +++++++------- .../stats/ImmutableCacheStatsHolderTests.java | 58 +++++++++++++++++++ 2 files changed, 79 insertions(+), 21 deletions(-) diff --git a/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolder.java b/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolder.java index 445a6c4bb77b6..07ed853052014 100644 --- a/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolder.java +++ b/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolder.java @@ -32,7 +32,7 @@ @ExperimentalApi public class ImmutableCacheStatsHolder implements Writeable, ToXContent { - // An immutable snapshot of a stats within a CacheStatsHolder, containing all the stats maintained by the cache. + // Root node of immutable snapshot of stats within a CacheStatsHolder, containing all the stats maintained by the cache. // Pkg-private for testing. final Node statsRoot; final List dimensionNames; @@ -46,9 +46,6 @@ public ImmutableCacheStatsHolder(Node statsRoot, List dimensionNames, St this.storeName = storeName; } - /** - * Should not be used with StreamOutputs produced using writeToWithClassName. - */ public ImmutableCacheStatsHolder(StreamInput in) throws IOException { // Because we write in preorder order, the parent of the next node we read will always be one of the ancestors // of the last node we read. This allows us to avoid ambiguity if nodes have the same dimension value, without @@ -157,11 +154,14 @@ public ImmutableCacheStats getStatsForDimensionValues(List dimensionValu } /** - * Returns a new tree containing the stats aggregated by the levels passed in. The root node is a dummy node, - * whose name and value are null. The new tree only has dimensions matching the levels passed in. + * Returns a new tree containing the stats aggregated by the levels passed in. + * The new tree only has dimensions matching the levels passed in. + * The levels passed in must be in the proper order, as they would be in the output of filterLevels(). */ - Node aggregateByLevels(List levels) { - List filteredLevels = filterLevels(levels); + Node aggregateByLevels(List filteredLevels) { + if (filteredLevels.isEmpty()) { + throw new IllegalArgumentException("Filtered levels passed to aggregateByLevels() cannot be empty"); + } Node newRoot = new Node("", false, statsRoot.getStats()); for (Node child : statsRoot.children.values()) { aggregateByLevelsHelper(newRoot, child, filteredLevels, 0); @@ -208,18 +208,19 @@ private void aggregateByLevelsHelper(Node parentInNewTree, Node currentInOrigina } /** - * Filters out levels that aren't in dimensionNames. Unrecognized levels are ignored. + * Filters out levels that aren't in dimensionNames, and orders the resulting list to match the order in dimensionNames. + * Unrecognized levels are ignored. */ private List filterLevels(List levels) { + if (levels == null) { + return new ArrayList<>(); + } List filtered = new ArrayList<>(); - for (String level : levels) { - if (dimensionNames.contains(level)) { - filtered.add(level); + for (String dimensionName : dimensionNames) { + if (levels.contains(dimensionName)) { + filtered.add(dimensionName); } } - if (filtered.isEmpty()) { - throw new IllegalArgumentException("Levels cannot have size 0"); - } return filtered; } @@ -228,9 +229,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws // Always show total stats, regardless of levels getTotalStats().toXContent(builder, params); - List levels = getLevels(params); - if (levels != null) { - List filteredLevels = filterLevels(levels); + List filteredLevels = filterLevels(getLevels(params)); + if (!filteredLevels.isEmpty()) { toXContentForLevels(builder, params, filteredLevels); } @@ -239,10 +239,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - XContentBuilder toXContentForLevels(XContentBuilder builder, Params params, List levels) throws IOException { - Node aggregated = aggregateByLevels(levels); + XContentBuilder toXContentForLevels(XContentBuilder builder, Params params, List filteredLevels) throws IOException { + Node aggregated = aggregateByLevels(filteredLevels); // Depth -1 corresponds to the dummy root node - toXContentForLevelsHelper(-1, aggregated, levels, builder, params); + toXContentForLevelsHelper(-1, aggregated, filteredLevels, builder, params); return builder; } diff --git a/server/src/test/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolderTests.java b/server/src/test/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolderTests.java index bf8056232f365..9e4bc19f615cd 100644 --- a/server/src/test/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolderTests.java +++ b/server/src/test/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolderTests.java @@ -227,6 +227,64 @@ public void testXContentForLevels() throws Exception { } } + public void testXContent() throws Exception { + // Tests logic of filtering levels out, logic for aggregating by those levels is already covered + List dimensionNames = List.of("A", "B", "C"); + CacheStatsHolder statsHolder = new CacheStatsHolder(dimensionNames, storeName); + Map> usedDimensionValues = CacheStatsHolderTests.getUsedDimensionValues(statsHolder, 10); + CacheStatsHolderTests.populateStats(statsHolder, usedDimensionValues, 100, 10); + ImmutableCacheStatsHolder stats = statsHolder.getImmutableCacheStatsHolder(); + + // If the levels in the params are empty or contains only unrecognized levels, we should only see the total stats and no level + // aggregation + List paramsList = List.of(ToXContent.EMPTY_PARAMS, getLevelParams(List.of()), getLevelParams(List.of("D"))); + for (ToXContent.Params params : paramsList) { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + stats.toXContent(builder, params); + builder.endObject(); + + String resultString = builder.toString(); + Map result = XContentHelper.convertToMap(MediaTypeRegistry.JSON.xContent(), resultString, true); + + assertTotalStatsPresentInXContentResponse(result); + // assert there are no other entries in the map besides these 6 + assertEquals(6, result.size()); + } + + // if we pass recognized levels in any order, alongside ignored unrecognized levels, we should see the above plus level aggregation + ToXContent.Params params = getLevelParams(List.of("C", "A", "E")); + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + stats.toXContent(builder, params); + builder.endObject(); + + String resultString = builder.toString(); + Map result = XContentHelper.convertToMap(MediaTypeRegistry.JSON.xContent(), resultString, true); + assertTotalStatsPresentInXContentResponse(result); + assertNotNull(result.get("A")); + assertEquals(7, result.size()); + } + + private void assertTotalStatsPresentInXContentResponse(Map result) { + // assert the total stats are present + assertNotEquals(0, (int) result.get(ImmutableCacheStats.Fields.MEMORY_SIZE_IN_BYTES)); + assertNotEquals(0, (int) result.get(ImmutableCacheStats.Fields.EVICTIONS)); + assertNotEquals(0, (int) result.get(ImmutableCacheStats.Fields.HIT_COUNT)); + assertNotEquals(0, (int) result.get(ImmutableCacheStats.Fields.MISS_COUNT)); + assertNotEquals(0, (int) result.get(ImmutableCacheStats.Fields.ENTRIES)); + // assert the store name is present + assertEquals(storeName, (String) result.get(ImmutableCacheStatsHolder.STORE_NAME_FIELD)); + } + + private ToXContent.Params getLevelParams(List levels) { + Map paramMap = new HashMap<>(); + if (!levels.isEmpty()) { + paramMap.put("level", String.join(",", levels)); + } + return new ToXContent.MapParams(paramMap); + } + public static Object getValueFromNestedXContentMap(Map xContentMap, List keys) { Map current = xContentMap; for (int i = 0; i < keys.size() - 1; i++) { From 86737f57354d7e4de00aa61cd40e7e5dcb78f96e Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Thu, 18 Apr 2024 11:45:13 -0700 Subject: [PATCH 10/19] Addressed Ankit's comments Signed-off-by: Peter Alfonsi --- .../indices/CacheStatsAPIIndicesRequestCacheIT.java | 3 +-- .../opensearch/action/admin/cluster/node/stats/NodeStats.java | 4 ++-- .../action/admin/cluster/node/stats/NodesStatsRequest.java | 2 +- .../admin/cluster/node/stats/TransportNodesStatsAction.java | 2 +- .../rest/action/admin/cluster/RestNodesStatsAction.java | 2 +- 5 files changed, 6 insertions(+), 7 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java index 796af6acffc16..fba57808543ff 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java @@ -222,7 +222,6 @@ private SearchResponse searchIndex(Client client, String index, String searchSuf .get(); assertSearchResponse(resp); OpenSearchAssertions.assertAllSuccessful(resp); - // assertEquals(1, resp.getHits().getTotalHits().value); return resp; } @@ -234,7 +233,7 @@ private static Map getNodeCacheStatsXContentMap(Client client, L NodesStatsResponse nodeStatsResponse = client.admin() .cluster() .prepareNodesStats("data:true") - .addMetric(NodesStatsRequest.Metric.CACHE_STATS.metricName()) + .addMetric(NodesStatsRequest.Metric.SEARCH_CACHE_STATS.metricName()) .setIndices(statsFlags) .get(); // Can always get the first data node as there's only one in this test suite diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java index ac2daf57f248b..0917a0baff1ab 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java @@ -238,7 +238,7 @@ public NodeStats(StreamInput in) throws IOException { } else { admissionControlStats = null; } - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_14_0)) { nodeCacheStats = in.readOptionalWriteable(NodeCacheStats::new); } else { nodeCacheStats = null; @@ -522,7 +522,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeOptionalWriteable(admissionControlStats); } - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_14_0)) { out.writeOptionalWriteable(nodeCacheStats); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java index 379836cf442e3..9c0cba1322b21 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -220,7 +220,7 @@ public enum Metric { SEGMENT_REPLICATION_BACKPRESSURE("segment_replication_backpressure"), REPOSITORIES("repositories"), ADMISSION_CONTROL("admission_control"), - CACHE_STATS("caches"); + SEARCH_CACHE_STATS("caches"); private String metricName; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 2e93e5e7841cb..14a52163c5bf6 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -129,7 +129,7 @@ protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) { NodesStatsRequest.Metric.SEGMENT_REPLICATION_BACKPRESSURE.containedIn(metrics), NodesStatsRequest.Metric.REPOSITORIES.containedIn(metrics), NodesStatsRequest.Metric.ADMISSION_CONTROL.containedIn(metrics), - NodesStatsRequest.Metric.CACHE_STATS.containedIn(metrics) + NodesStatsRequest.Metric.SEARCH_CACHE_STATS.containedIn(metrics) ); } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsAction.java index f62eaeb37f41f..e33959abcc4e9 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsAction.java @@ -181,7 +181,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC Set cacheMetrics = Strings.tokenizeByCommaToSet(request.param("index_metric", "_all")); CommonStatsFlags cacheFlags = new CommonStatsFlags(); cacheFlags.clear(); - if (cacheMetrics.size() == 1 && cacheMetrics.contains("_all")) { + if (cacheMetrics.contains("_all")) { cacheFlags.includeAllCacheTypes(); } else { for (String cacheName : cacheMetrics) { From b4182ec1aa9b970c2ea67c8fd4d5c77331a3e1c5 Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Tue, 23 Apr 2024 10:10:34 -0700 Subject: [PATCH 11/19] ImmutableStatsHolder now aggregates by level at creation time Signed-off-by: Peter Alfonsi --- .../common/tier/TieredSpilloverCache.java | 5 ++ .../cache/common/tier/MockDiskCache.java | 5 ++ .../cache/store/disk/EhcacheDiskCache.java | 15 +++- .../CacheStatsAPIIndicesRequestCacheIT.java | 24 +++++- .../admin/indices/stats/CommonStatsFlags.java | 14 ++++ .../org/opensearch/common/cache/ICache.java | 4 + .../common/cache/service/CacheService.java | 2 +- .../common/cache/stats/CacheStatsHolder.java | 14 +++- .../stats/ImmutableCacheStatsHolder.java | 80 ++++++++++--------- .../cache/store/OpenSearchOnHeapCache.java | 7 +- .../admin/cluster/RestNodesStatsAction.java | 4 + .../cluster/node/stats/NodeStatsTests.java | 2 +- .../stats/ImmutableCacheStatsHolderTests.java | 65 +++++++++------ 13 files changed, 168 insertions(+), 73 deletions(-) diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java index ae3d9f1dbcf62..86dc939c3c4b6 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java @@ -209,6 +209,11 @@ public ImmutableCacheStatsHolder stats() { return null; // TODO: in TSC stats PR } + @Override + public ImmutableCacheStatsHolder stats(String[] levels) { + return null; // TODO: in TSC stats PR + } + private Function, V> getValueFromTieredCache() { return key -> { try (ReleasableLock ignore = readLock.acquire()) { diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java index 0d98503af635f..8aed3f004e7b2 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java @@ -99,6 +99,11 @@ public ImmutableCacheStatsHolder stats() { return null; } + @Override + public ImmutableCacheStatsHolder stats(String[] levels) { + return null; + } + @Override public void close() { diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java index ba0505b1c871d..b05814d4865ee 100644 --- a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java @@ -444,12 +444,21 @@ public void close() { } /** - * Relevant stats for this cache. - * @return CacheStats + * Relevant stats for this cache, with no aggregation. + * @return ImmutableCacheStatsHolder */ @Override public ImmutableCacheStatsHolder stats() { - return cacheStatsHolder.getImmutableCacheStatsHolder(); + return stats(null); + } + + /** + * Relevant stats for this cache, aggregated by levels.. + * @return ImmutableCacheStatsHolder + */ + @Override + public ImmutableCacheStatsHolder stats(String[] levels) { + return cacheStatsHolder.getImmutableCacheStatsHolder(levels); } /** diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java index fba57808543ff..e282278c2d24c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java @@ -166,7 +166,7 @@ public void testStatsMatchOldApi() throws Exception { Client client = client(); startIndex(client, index); - int numKeys = Randomness.get().nextInt(100); + int numKeys = Randomness.get().nextInt(100) + 1; for (int i = 0; i < numKeys; i++) { searchIndex(client, index, String.valueOf(i)); } @@ -197,6 +197,19 @@ public void testStatsMatchOldApi() throws Exception { checkCacheStatsAPIResponse(xContentMap, xContentMapKeys, expected, true, false); } + public void testNullLevels() throws Exception { + String index = "index"; + Client client = client(); + startIndex(client, index); + int numKeys = Randomness.get().nextInt(100) + 1; + for (int i = 0; i < numKeys; i++) { + searchIndex(client, index, String.valueOf(i)); + } + Map xContentMap = getNodeCacheStatsXContentMap(client, null); + // Null levels should result in only the total cache stats being returned -> 6 fields inside the response. + assertEquals(6, ((Map) xContentMap.get("request_cache")).size()); + } + private void startIndex(Client client, String indexName) throws InterruptedException { assertAcked( client.admin() @@ -229,6 +242,13 @@ private static Map getNodeCacheStatsXContentMap(Client client, L CommonStatsFlags statsFlags = new CommonStatsFlags(); statsFlags.includeAllCacheTypes(); + String[] flagsLevels; + if (aggregationLevels == null) { + flagsLevels = null; + } else { + flagsLevels = aggregationLevels.toArray(new String[0]); + } + statsFlags.setLevels(flagsLevels); NodesStatsResponse nodeStatsResponse = client.admin() .cluster() @@ -242,7 +262,7 @@ private static Map getNodeCacheStatsXContentMap(Client client, L XContentBuilder builder = XContentFactory.jsonBuilder(); Map paramMap = new HashMap<>(); - if (!aggregationLevels.isEmpty()) { + if (aggregationLevels != null && !aggregationLevels.isEmpty()) { paramMap.put("level", String.join(",", aggregationLevels)); } ToXContent.Params params = new ToXContent.MapParams(paramMap); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java index 6950bc22677c6..505002d82b045 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java @@ -65,6 +65,7 @@ public class CommonStatsFlags implements Writeable, Cloneable { private boolean includeOnlyTopIndexingPressureMetrics = false; // Used for metric CACHE_STATS, to determine which caches to report stats for private EnumSet includeCaches = EnumSet.noneOf(CacheType.class); + private String[] levels; /** * @param flags flags to set. If no flags are supplied, default flags will be set. @@ -96,6 +97,7 @@ public CommonStatsFlags(StreamInput in) throws IOException { includeOnlyTopIndexingPressureMetrics = in.readBoolean(); if (in.getVersion().onOrAfter(Version.V_3_0_0)) { includeCaches = in.readEnumSet(CacheType.class); + levels = in.readStringArray(); } } @@ -119,6 +121,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(includeOnlyTopIndexingPressureMetrics); if (out.getVersion().onOrAfter(Version.V_3_0_0)) { out.writeEnumSet(includeCaches); + out.writeStringArrayNullable(levels); } } @@ -135,6 +138,7 @@ public CommonStatsFlags all() { includeAllShardIndexingPressureTrackers = false; includeOnlyTopIndexingPressureMetrics = false; includeCaches = EnumSet.noneOf(CacheType.class); + levels = null; return this; } @@ -151,6 +155,7 @@ public CommonStatsFlags clear() { includeAllShardIndexingPressureTrackers = false; includeOnlyTopIndexingPressureMetrics = false; includeCaches = EnumSet.noneOf(CacheType.class); + levels = null; return this; } @@ -166,6 +171,10 @@ public EnumSet getIncludeCaches() { return includeCaches; } + public String[] getLevels() { + return levels; + } + /** * Sets specific search group stats to retrieve the stats for. Mainly affects search * when enabled. @@ -231,6 +240,11 @@ public CommonStatsFlags includeAllCacheTypes() { return this; } + public CommonStatsFlags setLevels(String[] inputLevels) { + levels = inputLevels; + return this; + } + public boolean includeUnloadedSegments() { return this.includeUnloadedSegments; } diff --git a/server/src/main/java/org/opensearch/common/cache/ICache.java b/server/src/main/java/org/opensearch/common/cache/ICache.java index 8d8964abf0829..8a0f7c0c6464e 100644 --- a/server/src/main/java/org/opensearch/common/cache/ICache.java +++ b/server/src/main/java/org/opensearch/common/cache/ICache.java @@ -45,8 +45,12 @@ public interface ICache extends Closeable { void refresh(); + // Return all stats without aggregation. ImmutableCacheStatsHolder stats(); + // Return stats aggregated by the provided levels. If levels is null, do not aggregate and return all stats. + ImmutableCacheStatsHolder stats(String[] levels); + /** * Factory to create objects. */ diff --git a/server/src/main/java/org/opensearch/common/cache/service/CacheService.java b/server/src/main/java/org/opensearch/common/cache/service/CacheService.java index 2d560fd648571..76658a8995614 100644 --- a/server/src/main/java/org/opensearch/common/cache/service/CacheService.java +++ b/server/src/main/java/org/opensearch/common/cache/service/CacheService.java @@ -68,7 +68,7 @@ public ICache createCache(CacheConfig config, CacheType cache public NodeCacheStats stats(CommonStatsFlags flags) { Map statsMap = new HashMap<>(); for (CacheType type : cacheTypeMap.keySet()) { - statsMap.put(type, cacheTypeMap.get(type).stats()); + statsMap.put(type, cacheTypeMap.get(type).stats(flags.getLevels())); } return new NodeCacheStats(statsMap, flags); } diff --git a/server/src/main/java/org/opensearch/common/cache/stats/CacheStatsHolder.java b/server/src/main/java/org/opensearch/common/cache/stats/CacheStatsHolder.java index 296015983fc7f..2172442b47e75 100644 --- a/server/src/main/java/org/opensearch/common/cache/stats/CacheStatsHolder.java +++ b/server/src/main/java/org/opensearch/common/cache/stats/CacheStatsHolder.java @@ -10,6 +10,7 @@ import org.opensearch.common.annotation.ExperimentalApi; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -160,10 +161,17 @@ private boolean internalIncrementHelper( } /** - * Produce an immutable version of these stats. + * Produce an immutable version of these stats, aggregated according to levels. + * If levels is null, do not aggregate and return an immutable version of the original tree. */ - public ImmutableCacheStatsHolder getImmutableCacheStatsHolder() { - return new ImmutableCacheStatsHolder(statsRoot.snapshot(), dimensionNames, storeName); + public ImmutableCacheStatsHolder getImmutableCacheStatsHolder(String[] levels) { + List levelsList; + if (levels == null) { + levelsList = dimensionNames; + } else { + levelsList = Arrays.asList(levels); + } + return new ImmutableCacheStatsHolder(this.statsRoot, levelsList, dimensionNames, storeName); } public void removeDimensions(List dimensionValues) { diff --git a/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolder.java b/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolder.java index 07ed853052014..b3a48f8659f02 100644 --- a/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolder.java +++ b/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolder.java @@ -35,15 +35,24 @@ public class ImmutableCacheStatsHolder implements Writeable, ToXContent { // Root node of immutable snapshot of stats within a CacheStatsHolder, containing all the stats maintained by the cache. // Pkg-private for testing. final Node statsRoot; + // The dimension names for each level in this tree. final List dimensionNames; // The name of the cache type producing these stats. Returned in API response. final String storeName; public static String STORE_NAME_FIELD = "store_name"; - public ImmutableCacheStatsHolder(Node statsRoot, List dimensionNames, String storeName) { - this.statsRoot = statsRoot; - this.dimensionNames = dimensionNames; + ImmutableCacheStatsHolder( + CacheStatsHolder.Node originalStatsRoot, + List levels, + List originalDimensionNames, + String storeName + ) { + // Aggregate from the original CacheStatsHolder according to the levels passed in. + List filteredLevels = filterLevels(levels, originalDimensionNames); + this.dimensionNames = filteredLevels; // The dimension names for this immutable snapshot should reflect the levels we aggregate in + // the snapshot this.storeName = storeName; + this.statsRoot = aggregateByLevels(filteredLevels, originalStatsRoot, originalDimensionNames); } public ImmutableCacheStatsHolder(StreamInput in) throws IOException { @@ -51,17 +60,12 @@ public ImmutableCacheStatsHolder(StreamInput in) throws IOException { // of the last node we read. This allows us to avoid ambiguity if nodes have the same dimension value, without // having to serialize the whole path to each node. this.dimensionNames = List.of(in.readStringArray()); - this.statsRoot = new Node("", false, new ImmutableCacheStats(0, 0, 0, 0, 0)); + ImmutableCacheStats rootNodeStats = new ImmutableCacheStats(in); + this.statsRoot = new Node("", false, rootNodeStats); List ancestorsOfLastRead = List.of(statsRoot); while (ancestorsOfLastRead != null) { ancestorsOfLastRead = readAndAttachDimensionNode(in, ancestorsOfLastRead); } - // Finally, update sum-of-children stats for the root node - CacheStats totalStats = new CacheStats(); - for (Node child : statsRoot.children.values()) { - totalStats.add(child.getStats()); - } - statsRoot.incrementStats(totalStats.immutableSnapshot()); this.storeName = in.readString(); } @@ -70,6 +74,7 @@ public void writeTo(StreamOutput out) throws IOException { // Write each node in preorder order, along with its depth. // Then, when rebuilding the tree from the stream, we can always find the correct parent to attach each node to. out.writeStringArray(dimensionNames.toArray(new String[0])); + statsRoot.stats.writeTo(out); for (Node child : statsRoot.children.values()) { writeDimensionNodeRecursive(out, child, 1); } @@ -158,13 +163,10 @@ public ImmutableCacheStats getStatsForDimensionValues(List dimensionValu * The new tree only has dimensions matching the levels passed in. * The levels passed in must be in the proper order, as they would be in the output of filterLevels(). */ - Node aggregateByLevels(List filteredLevels) { - if (filteredLevels.isEmpty()) { - throw new IllegalArgumentException("Filtered levels passed to aggregateByLevels() cannot be empty"); - } - Node newRoot = new Node("", false, statsRoot.getStats()); - for (Node child : statsRoot.children.values()) { - aggregateByLevelsHelper(newRoot, child, filteredLevels, 0); + Node aggregateByLevels(List filteredLevels, CacheStatsHolder.Node originalStatsRoot, List originalDimensionNames) { + Node newRoot = new Node("", false, originalStatsRoot.getImmutableStats()); + for (CacheStatsHolder.Node child : originalStatsRoot.children.values()) { + aggregateByLevelsHelper(newRoot, child, filteredLevels, originalDimensionNames, 0); } return newRoot; } @@ -177,22 +179,28 @@ Node aggregateByLevels(List filteredLevels) { * This should be ok because the resulting trees are short-lived objects that are not exposed anywhere outside this class, * and the original tree is never changed. */ - private void aggregateByLevelsHelper(Node parentInNewTree, Node currentInOriginalTree, List levels, int depth) { - if (levels.contains(dimensionNames.get(depth))) { + private void aggregateByLevelsHelper( + Node parentInNewTree, + CacheStatsHolder.Node currentInOriginalTree, + List levels, + List originalDimensionNames, + int depth + ) { + if (levels.contains(originalDimensionNames.get(depth))) { // If this node is in a level we want to aggregate, create a new dimension node with the same value and stats, and connect it to // the last parent node in the new tree. If it already exists, increment it instead. String dimensionValue = currentInOriginalTree.getDimensionValue(); Node nodeInNewTree = parentInNewTree.children.get(dimensionValue); if (nodeInNewTree == null) { // Create new node with stats matching the node from the original tree - int indexOfLastLevel = dimensionNames.indexOf(levels.get(levels.size() - 1)); + int indexOfLastLevel = originalDimensionNames.indexOf(levels.get(levels.size() - 1)); boolean isLeafNode = depth == indexOfLastLevel; // If this is the last level we aggregate, the new node should be a leaf // node - nodeInNewTree = new Node(dimensionValue, isLeafNode, currentInOriginalTree.getStats()); + nodeInNewTree = new Node(dimensionValue, isLeafNode, currentInOriginalTree.getImmutableStats()); parentInNewTree.addChild(dimensionValue, nodeInNewTree); } else { // Otherwise increment existing stats - nodeInNewTree.incrementStats(currentInOriginalTree.getStats()); + nodeInNewTree.incrementStats(currentInOriginalTree.getImmutableStats()); } // Finally set the parent node to be this node for the next callers of this function parentInNewTree = nodeInNewTree; @@ -200,9 +208,9 @@ private void aggregateByLevelsHelper(Node parentInNewTree, Node currentInOrigina if (!currentInOriginalTree.children.isEmpty()) { // Not a leaf node - for (Map.Entry childEntry : currentInOriginalTree.children.entrySet()) { - Node child = childEntry.getValue(); - aggregateByLevelsHelper(parentInNewTree, child, levels, depth + 1); + for (Map.Entry childEntry : currentInOriginalTree.children.entrySet()) { + CacheStatsHolder.Node child = childEntry.getValue(); + aggregateByLevelsHelper(parentInNewTree, child, levels, originalDimensionNames, depth + 1); } } } @@ -211,12 +219,12 @@ private void aggregateByLevelsHelper(Node parentInNewTree, Node currentInOrigina * Filters out levels that aren't in dimensionNames, and orders the resulting list to match the order in dimensionNames. * Unrecognized levels are ignored. */ - private List filterLevels(List levels) { + private List filterLevels(List levels, List originalDimensionNames) { if (levels == null) { return new ArrayList<>(); } List filtered = new ArrayList<>(); - for (String dimensionName : dimensionNames) { + for (String dimensionName : originalDimensionNames) { if (levels.contains(dimensionName)) { filtered.add(dimensionName); } @@ -229,9 +237,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws // Always show total stats, regardless of levels getTotalStats().toXContent(builder, params); - List filteredLevels = filterLevels(getLevels(params)); + List filteredLevels = filterLevels(getLevels(params), dimensionNames); if (!filteredLevels.isEmpty()) { - toXContentForLevels(builder, params, filteredLevels); + toXContentForLevels(builder, params); } // Also add the store name for the cache that produced the stats @@ -239,26 +247,24 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - XContentBuilder toXContentForLevels(XContentBuilder builder, Params params, List filteredLevels) throws IOException { - Node aggregated = aggregateByLevels(filteredLevels); + XContentBuilder toXContentForLevels(XContentBuilder builder, Params params) throws IOException { // Depth -1 corresponds to the dummy root node - toXContentForLevelsHelper(-1, aggregated, filteredLevels, builder, params); + toXContentForLevelsHelper(-1, statsRoot, builder, params); return builder; } - private void toXContentForLevelsHelper(int depth, Node current, List levels, XContentBuilder builder, Params params) - throws IOException { + private void toXContentForLevelsHelper(int depth, Node current, XContentBuilder builder, Params params) throws IOException { if (depth >= 0) { builder.startObject(current.dimensionValue); } - if (depth == levels.size() - 1) { + if (depth == dimensionNames.size() - 1) { // This is a leaf node current.getStats().toXContent(builder, params); } else { - builder.startObject(levels.get(depth + 1)); + builder.startObject(dimensionNames.get(depth + 1)); for (Node nextNode : current.children.values()) { - toXContentForLevelsHelper(depth + 1, nextNode, levels, builder, params); + toXContentForLevelsHelper(depth + 1, nextNode, builder, params); } builder.endObject(); } diff --git a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java index 2c6b70ea3a693..b1082e7d5a2a9 100644 --- a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java +++ b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java @@ -134,7 +134,12 @@ public void close() {} @Override public ImmutableCacheStatsHolder stats() { - return cacheStatsHolder.getImmutableCacheStatsHolder(); + return stats(null); + } + + @Override + public ImmutableCacheStatsHolder stats(String[] levels) { + return cacheStatsHolder.getImmutableCacheStatsHolder(levels); } @Override diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsAction.java index e33959abcc4e9..c9c21420a0f06 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsAction.java @@ -229,6 +229,10 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC nodesStatsRequest.indices().includeOnlyTopIndexingPressureMetrics(request.paramAsBoolean("top", false)); } + // If no levels are passed in this results in an empty array. + String[] levels = Strings.splitStringByCommaToArray(request.param("level")); + nodesStatsRequest.indices().setLevels(levels); + return channel -> client.admin().cluster().nodesStats(nodesStatsRequest, new NodesResponseRestListener<>(channel)); } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index d268b11e1c61c..f4cf91481b494 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -965,7 +965,7 @@ public void apply(String action, AdmissionControlActionType admissionControlActi flags.includeCacheType(cacheType); } } - ImmutableCacheStatsHolder cacheStats = statsHolder.getImmutableCacheStatsHolder(); + ImmutableCacheStatsHolder cacheStats = statsHolder.getImmutableCacheStatsHolder(dimensionNames.toArray(new String[0])); Map cacheStatsMap = new HashMap<>(); cacheStatsMap.put(CacheType.INDICES_REQUEST_CACHE, cacheStats); nodeCacheStats = new NodeCacheStats(cacheStatsMap, flags); diff --git a/server/src/test/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolderTests.java b/server/src/test/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolderTests.java index 9e4bc19f615cd..a2a21b897e458 100644 --- a/server/src/test/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolderTests.java +++ b/server/src/test/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolderTests.java @@ -32,17 +32,27 @@ public void testSerialization() throws Exception { CacheStatsHolder statsHolder = new CacheStatsHolder(dimensionNames, storeName); Map> usedDimensionValues = CacheStatsHolderTests.getUsedDimensionValues(statsHolder, 10); CacheStatsHolderTests.populateStats(statsHolder, usedDimensionValues, 100, 10); - ImmutableCacheStatsHolder stats = statsHolder.getImmutableCacheStatsHolder(); + ImmutableCacheStatsHolder stats = statsHolder.getImmutableCacheStatsHolder(null); + assertNotEquals(0, stats.getStatsRoot().children.size()); BytesStreamOutput os = new BytesStreamOutput(); stats.writeTo(os); BytesStreamInput is = new BytesStreamInput(BytesReference.toBytes(os.bytes())); ImmutableCacheStatsHolder deserialized = new ImmutableCacheStatsHolder(is); - assertEquals(stats.dimensionNames, deserialized.dimensionNames); - assertEquals(stats.storeName, deserialized.storeName); - assertEquals(stats, deserialized); + + // also test empty dimension stats + ImmutableCacheStatsHolder emptyDims = statsHolder.getImmutableCacheStatsHolder(new String[] {}); + assertEquals(0, emptyDims.getStatsRoot().children.size()); + assertEquals(stats.getTotalStats(), emptyDims.getTotalStats()); + + os = new BytesStreamOutput(); + emptyDims.writeTo(os); + is = new BytesStreamInput(BytesReference.toBytes(os.bytes())); + deserialized = new ImmutableCacheStatsHolder(is); + + assertEquals(emptyDims, deserialized); } public void testEquals() throws Exception { @@ -53,13 +63,13 @@ public void testEquals() throws Exception { Map> usedDimensionValues = CacheStatsHolderTests.getUsedDimensionValues(statsHolder, 10); CacheStatsHolderTests.populateStats(List.of(statsHolder, differentStoreNameStatsHolder), usedDimensionValues, 100, 10); CacheStatsHolderTests.populateStats(nonMatchingStatsHolder, usedDimensionValues, 100, 10); - ImmutableCacheStatsHolder stats = statsHolder.getImmutableCacheStatsHolder(); + ImmutableCacheStatsHolder stats = statsHolder.getImmutableCacheStatsHolder(null); - ImmutableCacheStatsHolder secondStats = statsHolder.getImmutableCacheStatsHolder(); + ImmutableCacheStatsHolder secondStats = statsHolder.getImmutableCacheStatsHolder(null); assertEquals(stats, secondStats); - ImmutableCacheStatsHolder nonMatchingStats = nonMatchingStatsHolder.getImmutableCacheStatsHolder(); + ImmutableCacheStatsHolder nonMatchingStats = nonMatchingStatsHolder.getImmutableCacheStatsHolder(null); assertNotEquals(stats, nonMatchingStats); - ImmutableCacheStatsHolder differentStoreNameStats = differentStoreNameStatsHolder.getImmutableCacheStatsHolder(); + ImmutableCacheStatsHolder differentStoreNameStats = differentStoreNameStatsHolder.getImmutableCacheStatsHolder(null); assertNotEquals(stats, differentStoreNameStats); } @@ -68,7 +78,7 @@ public void testGet() throws Exception { CacheStatsHolder cacheStatsHolder = new CacheStatsHolder(dimensionNames, storeName); Map> usedDimensionValues = CacheStatsHolderTests.getUsedDimensionValues(cacheStatsHolder, 10); Map, CacheStats> expected = CacheStatsHolderTests.populateStats(cacheStatsHolder, usedDimensionValues, 1000, 10); - ImmutableCacheStatsHolder stats = cacheStatsHolder.getImmutableCacheStatsHolder(); + ImmutableCacheStatsHolder stats = cacheStatsHolder.getImmutableCacheStatsHolder(dimensionNames.toArray(new String[0])); // test the value in the map is as expected for each distinct combination of values for (List dimensionValues : expected.keySet()) { @@ -103,7 +113,7 @@ public void testEmptyDimsList() throws Exception { CacheStatsHolder cacheStatsHolder = new CacheStatsHolder(List.of(), storeName); Map> usedDimensionValues = CacheStatsHolderTests.getUsedDimensionValues(cacheStatsHolder, 100); CacheStatsHolderTests.populateStats(cacheStatsHolder, usedDimensionValues, 10, 100); - ImmutableCacheStatsHolder stats = cacheStatsHolder.getImmutableCacheStatsHolder(); + ImmutableCacheStatsHolder stats = cacheStatsHolder.getImmutableCacheStatsHolder(null); ImmutableCacheStatsHolder.Node statsRoot = stats.getStatsRoot(); assertEquals(0, statsRoot.children.size()); @@ -116,17 +126,16 @@ public void testAggregateByAllDimensions() throws Exception { CacheStatsHolder statsHolder = new CacheStatsHolder(dimensionNames, storeName); Map> usedDimensionValues = CacheStatsHolderTests.getUsedDimensionValues(statsHolder, 10); Map, CacheStats> expected = CacheStatsHolderTests.populateStats(statsHolder, usedDimensionValues, 1000, 10); - ImmutableCacheStatsHolder stats = statsHolder.getImmutableCacheStatsHolder(); + ImmutableCacheStatsHolder stats = statsHolder.getImmutableCacheStatsHolder(dimensionNames.toArray(new String[0])); - ImmutableCacheStatsHolder.Node aggregated = stats.aggregateByLevels(dimensionNames); for (Map.Entry, CacheStats> expectedEntry : expected.entrySet()) { List dimensionValues = new ArrayList<>(); for (String dimValue : expectedEntry.getKey()) { dimensionValues.add(dimValue); } - assertEquals(expectedEntry.getValue().immutableSnapshot(), getNode(dimensionValues, aggregated).getStats()); + assertEquals(expectedEntry.getValue().immutableSnapshot(), getNode(dimensionValues, stats.statsRoot).getStats()); } - assertSumOfChildrenStats(aggregated); + assertSumOfChildrenStats(stats.statsRoot); } public void testAggregateBySomeDimensions() throws Exception { @@ -134,7 +143,6 @@ public void testAggregateBySomeDimensions() throws Exception { CacheStatsHolder statsHolder = new CacheStatsHolder(dimensionNames, storeName); Map> usedDimensionValues = CacheStatsHolderTests.getUsedDimensionValues(statsHolder, 10); Map, CacheStats> expected = CacheStatsHolderTests.populateStats(statsHolder, usedDimensionValues, 1000, 10); - ImmutableCacheStatsHolder stats = statsHolder.getImmutableCacheStatsHolder(); for (int i = 0; i < (1 << dimensionNames.size()); i++) { // Test each combination of possible levels @@ -144,11 +152,15 @@ public void testAggregateBySomeDimensions() throws Exception { levels.add(dimensionNames.get(nameIndex)); } } + if (levels.size() == 0) { - assertThrows(IllegalArgumentException.class, () -> stats.aggregateByLevels(levels)); + // If we pass empty levels to CacheStatsHolder to aggregate by, we should only get a root node with the total stats in it + ImmutableCacheStatsHolder stats = statsHolder.getImmutableCacheStatsHolder(levels.toArray(new String[0])); + assertEquals(statsHolder.getStatsRoot().getImmutableStats(), stats.getStatsRoot().getStats()); + assertEquals(0, stats.getStatsRoot().children.size()); } else { - ImmutableCacheStatsHolder.Node aggregated = stats.aggregateByLevels(levels); - Map, ImmutableCacheStatsHolder.Node> aggregatedLeafNodes = getAllLeafNodes(aggregated); + ImmutableCacheStatsHolder stats = statsHolder.getImmutableCacheStatsHolder(levels.toArray(new String[0])); + Map, ImmutableCacheStatsHolder.Node> aggregatedLeafNodes = getAllLeafNodes(stats.statsRoot); for (Map.Entry, ImmutableCacheStatsHolder.Node> aggEntry : aggregatedLeafNodes.entrySet()) { CacheStats expectedCounter = new CacheStats(); @@ -159,7 +171,7 @@ public void testAggregateBySomeDimensions() throws Exception { } assertEquals(expectedCounter.immutableSnapshot(), aggEntry.getValue().getStats()); } - assertSumOfChildrenStats(aggregated); + assertSumOfChildrenStats(stats.statsRoot); } } } @@ -181,13 +193,13 @@ public void testXContentForLevels() throws Exception { new CacheStats(4, 4, 4, 4, 4) ) ); - ImmutableCacheStatsHolder stats = statsHolder.getImmutableCacheStatsHolder(); + ImmutableCacheStatsHolder stats = statsHolder.getImmutableCacheStatsHolder(dimensionNames.toArray(new String[0])); XContentBuilder builder = XContentFactory.jsonBuilder(); ToXContent.Params params = ToXContent.EMPTY_PARAMS; builder.startObject(); - stats.toXContentForLevels(builder, params, List.of("A", "B", "C")); + stats.toXContentForLevels(builder, params); builder.endObject(); String resultString = builder.toString(); Map result = XContentHelper.convertToMap(MediaTypeRegistry.JSON.xContent(), resultString, true); @@ -233,12 +245,13 @@ public void testXContent() throws Exception { CacheStatsHolder statsHolder = new CacheStatsHolder(dimensionNames, storeName); Map> usedDimensionValues = CacheStatsHolderTests.getUsedDimensionValues(statsHolder, 10); CacheStatsHolderTests.populateStats(statsHolder, usedDimensionValues, 100, 10); - ImmutableCacheStatsHolder stats = statsHolder.getImmutableCacheStatsHolder(); // If the levels in the params are empty or contains only unrecognized levels, we should only see the total stats and no level // aggregation - List paramsList = List.of(ToXContent.EMPTY_PARAMS, getLevelParams(List.of()), getLevelParams(List.of("D"))); - for (ToXContent.Params params : paramsList) { + List> levelsList = List.of(List.of(), List.of("D")); + for (List levels : levelsList) { + ImmutableCacheStatsHolder stats = statsHolder.getImmutableCacheStatsHolder(levels.toArray(new String[0])); + ToXContent.Params params = getLevelParams(levels); XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject(); stats.toXContent(builder, params); @@ -253,7 +266,9 @@ public void testXContent() throws Exception { } // if we pass recognized levels in any order, alongside ignored unrecognized levels, we should see the above plus level aggregation - ToXContent.Params params = getLevelParams(List.of("C", "A", "E")); + List levels = List.of("C", "A", "E"); + ImmutableCacheStatsHolder stats = statsHolder.getImmutableCacheStatsHolder(levels.toArray(new String[0])); + ToXContent.Params params = getLevelParams(levels); XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject(); stats.toXContent(builder, params); From 6764e86b1e3aff23ad886d3a77f659204b8328a9 Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Tue, 23 Apr 2024 12:17:19 -0700 Subject: [PATCH 12/19] Addressed misc comments Signed-off-by: Peter Alfonsi --- .../org/opensearch/cache/store/disk/EhcacheDiskCache.java | 3 ++- .../indices/CacheStatsAPIIndicesRequestCacheIT.java | 7 ++----- .../org/opensearch/common/cache/service/CacheService.java | 3 ++- .../opensearch/common/cache/service/NodeCacheStats.java | 5 +++-- .../common/cache/stats/ImmutableCacheStats.java | 8 +++----- .../action/admin/cluster/node/stats/NodeStatsTests.java | 3 ++- .../cache/stats/ImmutableCacheStatsHolderTests.java | 4 ++-- 7 files changed, 16 insertions(+), 17 deletions(-) diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java index b05814d4865ee..2d14d91ebbff5 100644 --- a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java @@ -453,7 +453,8 @@ public ImmutableCacheStatsHolder stats() { } /** - * Relevant stats for this cache, aggregated by levels.. + * Relevant stats for this cache, aggregated by levels. + * @param levels The levels to aggregate by. * @return ImmutableCacheStatsHolder */ @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java index e282278c2d24c..32e04403959ac 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java @@ -90,7 +90,7 @@ public void testCacheStatsAPIWIthOnHeapCache() throws Exception { int requestSize = (int) ((Map) ImmutableCacheStatsHolderTests.getValueFromNestedXContentMap( xContentMap, index1Keys - )).get(ImmutableCacheStats.Fields.MEMORY_SIZE_IN_BYTES); + )).get(ImmutableCacheStats.Fields.SIZE_IN_BYTES); assertTrue(requestSize > 0); List index2Keys = List.of( @@ -293,10 +293,7 @@ private static void checkCacheStatsAPIResponse( assertEquals(expectedStats.getMisses(), (int) aggregatedStatsResponse.get(ImmutableCacheStats.Fields.MISS_COUNT)); assertEquals(expectedStats.getEvictions(), (int) aggregatedStatsResponse.get(ImmutableCacheStats.Fields.EVICTIONS)); if (checkMemorySize) { - assertEquals( - expectedStats.getSizeInBytes(), - (int) aggregatedStatsResponse.get(ImmutableCacheStats.Fields.MEMORY_SIZE_IN_BYTES) - ); + assertEquals(expectedStats.getSizeInBytes(), (int) aggregatedStatsResponse.get(ImmutableCacheStats.Fields.SIZE_IN_BYTES)); } if (checkEntries) { assertEquals(expectedStats.getEntries(), (int) aggregatedStatsResponse.get(ImmutableCacheStats.Fields.ENTRIES)); diff --git a/server/src/main/java/org/opensearch/common/cache/service/CacheService.java b/server/src/main/java/org/opensearch/common/cache/service/CacheService.java index 76658a8995614..8b87af6200124 100644 --- a/server/src/main/java/org/opensearch/common/cache/service/CacheService.java +++ b/server/src/main/java/org/opensearch/common/cache/service/CacheService.java @@ -22,6 +22,7 @@ import java.util.HashMap; import java.util.Map; +import java.util.TreeMap; /** * Service responsible to create caches. @@ -66,7 +67,7 @@ public ICache createCache(CacheConfig config, CacheType cache } public NodeCacheStats stats(CommonStatsFlags flags) { - Map statsMap = new HashMap<>(); + TreeMap statsMap = new TreeMap<>(); for (CacheType type : cacheTypeMap.keySet()) { statsMap.put(type, cacheTypeMap.get(type).stats(flags.getLevels())); } diff --git a/server/src/main/java/org/opensearch/common/cache/service/NodeCacheStats.java b/server/src/main/java/org/opensearch/common/cache/service/NodeCacheStats.java index 76f00570e8edf..dc3b69590c906 100644 --- a/server/src/main/java/org/opensearch/common/cache/service/NodeCacheStats.java +++ b/server/src/main/java/org/opensearch/common/cache/service/NodeCacheStats.java @@ -30,11 +30,12 @@ */ @ExperimentalApi public class NodeCacheStats implements ToXContentFragment, Writeable { + // Use TreeMap to force consistent ordering of caches in API responses private final TreeMap statsByCache; private final CommonStatsFlags flags; - public NodeCacheStats(Map statsByCache, CommonStatsFlags flags) { - this.statsByCache = new TreeMap<>(statsByCache); // Use TreeMap to force consistent ordering of caches in API responses + public NodeCacheStats(TreeMap statsByCache, CommonStatsFlags flags) { + this.statsByCache = statsByCache; this.flags = flags; } diff --git a/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStats.java b/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStats.java index 80e03ff28cbb5..ea6af3b140588 100644 --- a/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStats.java +++ b/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStats.java @@ -107,7 +107,7 @@ public int hashCode() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { // We don't write the header in CacheStatsResponse's toXContent, because it doesn't know the name of aggregation it's part of - builder.humanReadableField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, new ByteSizeValue(sizeInBytes)); + builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(sizeInBytes)); builder.field(Fields.EVICTIONS, evictions); builder.field(Fields.HIT_COUNT, hits); builder.field(Fields.MISS_COUNT, misses); @@ -119,10 +119,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws * Field names used to write the values in this object to XContent. */ public static final class Fields { - public static final String MEMORY_SIZE = "size"; - public static final String MEMORY_SIZE_IN_BYTES = "size_in_bytes"; - // TODO: This might not be memory as it could be partially on disk, so I've changed it, but should it be consistent with the earlier - // field? + public static final String SIZE = "size"; + public static final String SIZE_IN_BYTES = "size_in_bytes"; public static final String EVICTIONS = "evictions"; public static final String HIT_COUNT = "hit_count"; public static final String MISS_COUNT = "miss_count"; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index f4cf91481b494..afa60f66faec0 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -95,6 +95,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.TreeMap; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.StreamSupport; @@ -966,7 +967,7 @@ public void apply(String action, AdmissionControlActionType admissionControlActi } } ImmutableCacheStatsHolder cacheStats = statsHolder.getImmutableCacheStatsHolder(dimensionNames.toArray(new String[0])); - Map cacheStatsMap = new HashMap<>(); + TreeMap cacheStatsMap = new TreeMap<>(); cacheStatsMap.put(CacheType.INDICES_REQUEST_CACHE, cacheStats); nodeCacheStats = new NodeCacheStats(cacheStatsMap, flags); } diff --git a/server/src/test/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolderTests.java b/server/src/test/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolderTests.java index a2a21b897e458..6d97a5cbb4e5a 100644 --- a/server/src/test/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolderTests.java +++ b/server/src/test/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolderTests.java @@ -205,7 +205,7 @@ public void testXContentForLevels() throws Exception { Map result = XContentHelper.convertToMap(MediaTypeRegistry.JSON.xContent(), resultString, true); Map> fieldNamesMap = Map.of( - ImmutableCacheStats.Fields.MEMORY_SIZE_IN_BYTES, + ImmutableCacheStats.Fields.SIZE_IN_BYTES, (counter, value) -> counter.sizeInBytes.inc(value), ImmutableCacheStats.Fields.EVICTIONS, (counter, value) -> counter.evictions.inc(value), @@ -283,7 +283,7 @@ public void testXContent() throws Exception { private void assertTotalStatsPresentInXContentResponse(Map result) { // assert the total stats are present - assertNotEquals(0, (int) result.get(ImmutableCacheStats.Fields.MEMORY_SIZE_IN_BYTES)); + assertNotEquals(0, (int) result.get(ImmutableCacheStats.Fields.SIZE_IN_BYTES)); assertNotEquals(0, (int) result.get(ImmutableCacheStats.Fields.EVICTIONS)); assertNotEquals(0, (int) result.get(ImmutableCacheStats.Fields.HIT_COUNT)); assertNotEquals(0, (int) result.get(ImmutableCacheStats.Fields.MISS_COUNT)); From 2140baf598adceda80e228674cfd3b1b97c570fb Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Tue, 23 Apr 2024 13:01:34 -0700 Subject: [PATCH 13/19] Changed serialization method Signed-off-by: Peter Alfonsi --- .../stats/ImmutableCacheStatsHolder.java | 99 +++++++++---------- 1 file changed, 48 insertions(+), 51 deletions(-) diff --git a/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolder.java b/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolder.java index b3a48f8659f02..960ce74ad1689 100644 --- a/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolder.java +++ b/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolder.java @@ -22,6 +22,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Stack; import java.util.TreeMap; /** @@ -41,6 +42,12 @@ public class ImmutableCacheStatsHolder implements Writeable, ToXContent { final String storeName; public static String STORE_NAME_FIELD = "store_name"; + // Values used for serializing/deserializing the tree. + private static final String SERIALIZATION_CHILDREN_OPEN_BRACKET = "<"; + private static final String SERIALIZATION_CHILDREN_CLOSE_BRACKET = ">"; + private static final String SERIALIZATION_BEGIN_NODE = "_"; + private static final String SERIALIZATION_DONE = "end"; + ImmutableCacheStatsHolder( CacheStatsHolder.Node originalStatsRoot, List levels, @@ -56,71 +63,61 @@ public class ImmutableCacheStatsHolder implements Writeable, ToXContent { } public ImmutableCacheStatsHolder(StreamInput in) throws IOException { - // Because we write in preorder order, the parent of the next node we read will always be one of the ancestors - // of the last node we read. This allows us to avoid ambiguity if nodes have the same dimension value, without - // having to serialize the whole path to each node. this.dimensionNames = List.of(in.readStringArray()); - ImmutableCacheStats rootNodeStats = new ImmutableCacheStats(in); - this.statsRoot = new Node("", false, rootNodeStats); - List ancestorsOfLastRead = List.of(statsRoot); - while (ancestorsOfLastRead != null) { - ancestorsOfLastRead = readAndAttachDimensionNode(in, ancestorsOfLastRead); - } this.storeName = in.readString(); + this.statsRoot = deserializeTree(in); } - @Override public void writeTo(StreamOutput out) throws IOException { - // Write each node in preorder order, along with its depth. - // Then, when rebuilding the tree from the stream, we can always find the correct parent to attach each node to. out.writeStringArray(dimensionNames.toArray(new String[0])); - statsRoot.stats.writeTo(out); - for (Node child : statsRoot.children.values()) { - writeDimensionNodeRecursive(out, child, 1); - } - out.writeBoolean(false); // Write false to signal there are no more nodes out.writeString(storeName); + writeNode(statsRoot, out); + out.writeString(SERIALIZATION_DONE); } - private void writeDimensionNodeRecursive(StreamOutput out, Node node, int depth) throws IOException { - out.writeBoolean(true); // Signals there is a following node to deserialize - out.writeVInt(depth); - out.writeString(node.getDimensionValue()); - node.getStats().writeTo(out); + private void writeNode(Node node, StreamOutput out) throws IOException { + out.writeString(SERIALIZATION_BEGIN_NODE); + out.writeString(node.dimensionValue); + out.writeBoolean(node.children.isEmpty()); // Write whether this is a leaf node + node.stats.writeTo(out); - if (!node.children.isEmpty()) { - // Not a leaf node - out.writeBoolean(false); // Write true to indicate this is not a leaf node - for (Node child : node.children.values()) { - writeDimensionNodeRecursive(out, child, depth + 1); - } - } else { - out.writeBoolean(true); // Write true to indicate this is a leaf node + out.writeString(SERIALIZATION_CHILDREN_OPEN_BRACKET); + for (Map.Entry entry : node.children.entrySet()) { + out.writeString(entry.getKey()); + writeNode(entry.getValue(), out); } + out.writeString(SERIALIZATION_CHILDREN_CLOSE_BRACKET); } - /** - * Reads a serialized dimension node, attaches it to its appropriate place in the tree, and returns the list of - * ancestors of the newly attached node. - */ - private List readAndAttachDimensionNode(StreamInput in, List ancestorsOfLastRead) throws IOException { - boolean hasNextNode = in.readBoolean(); - if (hasNextNode) { - int depth = in.readVInt(); - String nodeDimensionValue = in.readString(); - ImmutableCacheStats stats = new ImmutableCacheStats(in); - boolean isLeafNode = in.readBoolean(); - - Node result = new Node(nodeDimensionValue, isLeafNode, stats); - Node parent = ancestorsOfLastRead.get(depth - 1); - parent.getChildren().put(nodeDimensionValue, result); - List ancestors = new ArrayList<>(ancestorsOfLastRead.subList(0, depth)); - ancestors.add(result); - return ancestors; - } else { - // No more nodes - return null; + private Node deserializeTree(StreamInput in) throws IOException { + Stack stack = new Stack<>(); + in.readString(); // Read and discard SERIALIZATION_BEGIN_NODE for the root node + Node statsRoot = readSingleNode(in); + Node current = statsRoot; + stack.push(statsRoot); + String nextSymbol = in.readString(); + while (!nextSymbol.equals(SERIALIZATION_DONE)) { + switch (nextSymbol) { + case SERIALIZATION_CHILDREN_OPEN_BRACKET: + stack.push(current); + break; + case SERIALIZATION_CHILDREN_CLOSE_BRACKET: + stack.pop(); + break; + case SERIALIZATION_BEGIN_NODE: + current = readSingleNode(in); + stack.peek().children.put(current.dimensionValue, current); + } + nextSymbol = in.readString(); } + return statsRoot; + } + + private Node readSingleNode(StreamInput in) throws IOException { + String dimensionValue = in.readString(); + boolean isLeafNode = in.readBoolean(); + ImmutableCacheStats stats = new ImmutableCacheStats(in); + return new Node(dimensionValue, isLeafNode, stats); } public ImmutableCacheStats getTotalStats() { From 6da2a9f85d399f1abf78085d38f984a3e2e6f0a3 Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Tue, 23 Apr 2024 16:22:13 -0700 Subject: [PATCH 14/19] Addressed Ankit's comments Signed-off-by: Peter Alfonsi --- .../core/common/io/stream/StreamInput.java | 40 +++++++- .../common/tier/TieredSpilloverCache.java | 5 - .../cache/store/disk/EhcacheDiskCache.java | 9 -- .../opensearch/common/cache/CacheType.java | 28 +++--- .../org/opensearch/common/cache/ICache.java | 4 +- .../common/cache/service/CacheService.java | 2 +- .../common/cache/service/NodeCacheStats.java | 10 +- .../common/cache/stats/CacheStatsHolder.java | 21 +---- .../stats/ImmutableCacheStatsHolder.java | 94 +++++++++---------- .../cache/store/OpenSearchOnHeapCache.java | 5 - .../stats/ImmutableCacheStatsHolderTests.java | 2 +- 11 files changed, 112 insertions(+), 108 deletions(-) diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java index ea23b3d81a775..f4c52cb8a6506 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java @@ -80,6 +80,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.Comparator; import java.util.Date; import java.util.EnumSet; import java.util.HashMap; @@ -90,6 +91,8 @@ import java.util.Locale; import java.util.Map; import java.util.Set; +import java.util.SortedMap; +import java.util.TreeMap; import java.util.concurrent.TimeUnit; import java.util.function.IntFunction; @@ -642,12 +645,47 @@ public Map readMap(Writeable.Reader keyReader, Writeable.Reader< return Collections.emptyMap(); } Map map = new HashMap<>(size); + readIntoMap(keyReader, valueReader, map, size); + return map; + } + + /** + * Read a serialized map into a SortedMap using the default ordering for the keys. If the result is empty it might be immutable. + */ + public , V> SortedMap readOrderedMap(Writeable.Reader keyReader, Writeable.Reader valueReader) + throws IOException { + return readOrderedMap(keyReader, valueReader, null); + } + + /** + * Read a serialized map into a SortedMap, specifying a Comparator for the keys. If the result is empty it might be immutable. + */ + public , V> SortedMap readOrderedMap( + Writeable.Reader keyReader, + Writeable.Reader valueReader, + @Nullable Comparator keyComparator + ) throws IOException { + int size = readArraySize(); + if (size == 0) { + return Collections.emptySortedMap(); + } + SortedMap sortedMap; + if (keyComparator == null) { + sortedMap = new TreeMap<>(); + } else { + sortedMap = new TreeMap<>(keyComparator); + } + readIntoMap(keyReader, valueReader, sortedMap, size); + return sortedMap; + } + + private void readIntoMap(Writeable.Reader keyReader, Writeable.Reader valueReader, Map map, int size) + throws IOException { for (int i = 0; i < size; i++) { K key = keyReader.read(this); V value = valueReader.read(this); map.put(key, value); } - return map; } /** diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java index 86dc939c3c4b6..0f807a1051d72 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java @@ -204,11 +204,6 @@ public void close() throws IOException { } } - @Override - public ImmutableCacheStatsHolder stats() { - return null; // TODO: in TSC stats PR - } - @Override public ImmutableCacheStatsHolder stats(String[] levels) { return null; // TODO: in TSC stats PR diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java index 2d14d91ebbff5..585ca9a5868e1 100644 --- a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java @@ -443,15 +443,6 @@ public void close() { } } - /** - * Relevant stats for this cache, with no aggregation. - * @return ImmutableCacheStatsHolder - */ - @Override - public ImmutableCacheStatsHolder stats() { - return stats(null); - } - /** * Relevant stats for this cache, aggregated by levels. * @param levels The levels to aggregate by. diff --git a/server/src/main/java/org/opensearch/common/cache/CacheType.java b/server/src/main/java/org/opensearch/common/cache/CacheType.java index 61442db148067..0acc08be47d89 100644 --- a/server/src/main/java/org/opensearch/common/cache/CacheType.java +++ b/server/src/main/java/org/opensearch/common/cache/CacheType.java @@ -10,7 +10,9 @@ import org.opensearch.common.annotation.ExperimentalApi; -import java.util.HashSet; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; import java.util.Set; /** @@ -23,6 +25,15 @@ public enum CacheType { private final String settingPrefix; private final String apiRepresentation; + private static final Map representationsMap; + static { + Map reprs = new HashMap<>(); + for (CacheType cacheType : values()) { + reprs.put(cacheType.apiRepresentation, cacheType); + } + representationsMap = Collections.unmodifiableMap(reprs); + } + CacheType(String settingPrefix, String representation) { this.settingPrefix = settingPrefix; this.apiRepresentation = representation; @@ -37,19 +48,14 @@ public String getApiRepresentation() { } public static CacheType getByRepresentation(String representation) { - for (CacheType cacheType : values()) { - if (cacheType.apiRepresentation.equals(representation)) { - return cacheType; - } + CacheType result = representationsMap.get(representation); + if (result == null) { + throw new IllegalArgumentException("No CacheType with representation = " + representation); } - throw new IllegalArgumentException("No CacheType with representation = " + representation); + return result; } public static Set allRepresentations() { - Set reprs = new HashSet<>(); - for (CacheType cacheType : values()) { - reprs.add(cacheType.apiRepresentation); - } - return reprs; + return representationsMap.keySet(); } } diff --git a/server/src/main/java/org/opensearch/common/cache/ICache.java b/server/src/main/java/org/opensearch/common/cache/ICache.java index 8a0f7c0c6464e..bc69ccee0c2fb 100644 --- a/server/src/main/java/org/opensearch/common/cache/ICache.java +++ b/server/src/main/java/org/opensearch/common/cache/ICache.java @@ -46,7 +46,9 @@ public interface ICache extends Closeable { void refresh(); // Return all stats without aggregation. - ImmutableCacheStatsHolder stats(); + default ImmutableCacheStatsHolder stats() { + return stats(null); + } // Return stats aggregated by the provided levels. If levels is null, do not aggregate and return all stats. ImmutableCacheStatsHolder stats(String[] levels); diff --git a/server/src/main/java/org/opensearch/common/cache/service/CacheService.java b/server/src/main/java/org/opensearch/common/cache/service/CacheService.java index 8b87af6200124..19843354b48e5 100644 --- a/server/src/main/java/org/opensearch/common/cache/service/CacheService.java +++ b/server/src/main/java/org/opensearch/common/cache/service/CacheService.java @@ -67,7 +67,7 @@ public ICache createCache(CacheConfig config, CacheType cache } public NodeCacheStats stats(CommonStatsFlags flags) { - TreeMap statsMap = new TreeMap<>(); + final TreeMap statsMap = new TreeMap<>(); for (CacheType type : cacheTypeMap.keySet()) { statsMap.put(type, cacheTypeMap.get(type).stats(flags.getLevels())); } diff --git a/server/src/main/java/org/opensearch/common/cache/service/NodeCacheStats.java b/server/src/main/java/org/opensearch/common/cache/service/NodeCacheStats.java index dc3b69590c906..eb37e21e0b925 100644 --- a/server/src/main/java/org/opensearch/common/cache/service/NodeCacheStats.java +++ b/server/src/main/java/org/opensearch/common/cache/service/NodeCacheStats.java @@ -19,9 +19,8 @@ import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; -import java.util.Map; import java.util.Objects; -import java.util.TreeMap; +import java.util.SortedMap; /** * A class creating XContent responses to cache stats API requests. @@ -31,18 +30,17 @@ @ExperimentalApi public class NodeCacheStats implements ToXContentFragment, Writeable { // Use TreeMap to force consistent ordering of caches in API responses - private final TreeMap statsByCache; + private final SortedMap statsByCache; private final CommonStatsFlags flags; - public NodeCacheStats(TreeMap statsByCache, CommonStatsFlags flags) { + public NodeCacheStats(SortedMap statsByCache, CommonStatsFlags flags) { this.statsByCache = statsByCache; this.flags = flags; } public NodeCacheStats(StreamInput in) throws IOException { this.flags = new CommonStatsFlags(in); - Map readMap = in.readMap(i -> i.readEnum(CacheType.class), ImmutableCacheStatsHolder::new); - this.statsByCache = new TreeMap<>(readMap); + this.statsByCache = in.readOrderedMap(i -> i.readEnum(CacheType.class), ImmutableCacheStatsHolder::new); } @Override diff --git a/server/src/main/java/org/opensearch/common/cache/stats/CacheStatsHolder.java b/server/src/main/java/org/opensearch/common/cache/stats/CacheStatsHolder.java index 2172442b47e75..d8743e0a82590 100644 --- a/server/src/main/java/org/opensearch/common/cache/stats/CacheStatsHolder.java +++ b/server/src/main/java/org/opensearch/common/cache/stats/CacheStatsHolder.java @@ -10,12 +10,10 @@ import org.opensearch.common.annotation.ExperimentalApi; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.TreeMap; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; @@ -165,13 +163,7 @@ private boolean internalIncrementHelper( * If levels is null, do not aggregate and return an immutable version of the original tree. */ public ImmutableCacheStatsHolder getImmutableCacheStatsHolder(String[] levels) { - List levelsList; - if (levels == null) { - levelsList = dimensionNames; - } else { - levelsList = Arrays.asList(levels); - } - return new ImmutableCacheStatsHolder(this.statsRoot, levelsList, dimensionNames, storeName); + return new ImmutableCacheStatsHolder(this.statsRoot, levels, dimensionNames, storeName); } public void removeDimensions(List dimensionValues) { @@ -294,16 +286,5 @@ Node getChild(String dimensionValue) { Node createChild(String dimensionValue, boolean createMapInChild) { return children.computeIfAbsent(dimensionValue, (key) -> new Node(dimensionValue, createMapInChild)); } - - ImmutableCacheStatsHolder.Node snapshot() { - TreeMap snapshotChildren = null; - if (!children.isEmpty()) { - snapshotChildren = new TreeMap<>(); - for (Node child : children.values()) { - snapshotChildren.put(child.getDimensionValue(), child.snapshot()); - } - } - return new ImmutableCacheStatsHolder.Node(dimensionValue, snapshotChildren, getImmutableStats()); - } } } diff --git a/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolder.java b/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolder.java index 960ce74ad1689..6e031c3d7993f 100644 --- a/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolder.java +++ b/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolder.java @@ -17,11 +17,12 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.SortedMap; import java.util.Stack; import java.util.TreeMap; @@ -50,22 +51,23 @@ public class ImmutableCacheStatsHolder implements Writeable, ToXContent { ImmutableCacheStatsHolder( CacheStatsHolder.Node originalStatsRoot, - List levels, + String[] levels, List originalDimensionNames, String storeName ) { // Aggregate from the original CacheStatsHolder according to the levels passed in. - List filteredLevels = filterLevels(levels, originalDimensionNames); - this.dimensionNames = filteredLevels; // The dimension names for this immutable snapshot should reflect the levels we aggregate in - // the snapshot + // The dimension names for this immutable snapshot should reflect the levels we aggregate in the snapshot + this.dimensionNames = filterLevels(levels, originalDimensionNames); this.storeName = storeName; - this.statsRoot = aggregateByLevels(filteredLevels, originalStatsRoot, originalDimensionNames); + this.statsRoot = aggregateByLevels(originalStatsRoot, originalDimensionNames); + makeNodeUnmodifiable(statsRoot); } public ImmutableCacheStatsHolder(StreamInput in) throws IOException { this.dimensionNames = List.of(in.readStringArray()); this.storeName = in.readString(); this.statsRoot = deserializeTree(in); + makeNodeUnmodifiable(statsRoot); } public void writeTo(StreamOutput out) throws IOException { @@ -90,7 +92,7 @@ private void writeNode(Node node, StreamOutput out) throws IOException { } private Node deserializeTree(StreamInput in) throws IOException { - Stack stack = new Stack<>(); + final Stack stack = new Stack<>(); in.readString(); // Read and discard SERIALIZATION_BEGIN_NODE for the root node Node statsRoot = readSingleNode(in); Node current = statsRoot; @@ -120,6 +122,15 @@ private Node readSingleNode(StreamInput in) throws IOException { return new Node(dimensionValue, isLeafNode, stats); } + private void makeNodeUnmodifiable(Node node) { + if (!node.children.isEmpty()) { + node.children = Collections.unmodifiableSortedMap(node.children); + } + for (Node child : node.children.values()) { + makeNodeUnmodifiable(child); + } + } + public ImmutableCacheStats getTotalStats() { return statsRoot.getStats(); } @@ -160,10 +171,10 @@ public ImmutableCacheStats getStatsForDimensionValues(List dimensionValu * The new tree only has dimensions matching the levels passed in. * The levels passed in must be in the proper order, as they would be in the output of filterLevels(). */ - Node aggregateByLevels(List filteredLevels, CacheStatsHolder.Node originalStatsRoot, List originalDimensionNames) { + Node aggregateByLevels(CacheStatsHolder.Node originalStatsRoot, List originalDimensionNames) { Node newRoot = new Node("", false, originalStatsRoot.getImmutableStats()); for (CacheStatsHolder.Node child : originalStatsRoot.children.values()) { - aggregateByLevelsHelper(newRoot, child, filteredLevels, originalDimensionNames, 0); + aggregateByLevelsHelper(newRoot, child, originalDimensionNames, 0); } return newRoot; } @@ -173,24 +184,25 @@ Node aggregateByLevels(List filteredLevels, CacheStatsHolder.Node origin * fully recursively while also passing in a completed map of children nodes before constructing the parent node. * For this reason, in this function we have to build the new tree top down rather than bottom up. * We use private methods allowing us to add children to/increment the stats for an existing node. - * This should be ok because the resulting trees are short-lived objects that are not exposed anywhere outside this class, - * and the original tree is never changed. + * This should be ok because the resulting tree is unmodifiable after creation in the constructor. + * + * @param allDimensions the list of all dimensions present in the original CacheStatsHolder which produced + * the CacheStatsHolder.Node object we are traversing. */ private void aggregateByLevelsHelper( Node parentInNewTree, CacheStatsHolder.Node currentInOriginalTree, - List levels, - List originalDimensionNames, + List allDimensions, int depth ) { - if (levels.contains(originalDimensionNames.get(depth))) { + if (dimensionNames.contains(allDimensions.get(depth))) { // If this node is in a level we want to aggregate, create a new dimension node with the same value and stats, and connect it to // the last parent node in the new tree. If it already exists, increment it instead. String dimensionValue = currentInOriginalTree.getDimensionValue(); Node nodeInNewTree = parentInNewTree.children.get(dimensionValue); if (nodeInNewTree == null) { // Create new node with stats matching the node from the original tree - int indexOfLastLevel = originalDimensionNames.indexOf(levels.get(levels.size() - 1)); + int indexOfLastLevel = allDimensions.indexOf(dimensionNames.get(dimensionNames.size() - 1)); boolean isLeafNode = depth == indexOfLastLevel; // If this is the last level we aggregate, the new node should be a leaf // node nodeInNewTree = new Node(dimensionValue, isLeafNode, currentInOriginalTree.getImmutableStats()); @@ -203,12 +215,9 @@ private void aggregateByLevelsHelper( parentInNewTree = nodeInNewTree; } - if (!currentInOriginalTree.children.isEmpty()) { - // Not a leaf node - for (Map.Entry childEntry : currentInOriginalTree.children.entrySet()) { - CacheStatsHolder.Node child = childEntry.getValue(); - aggregateByLevelsHelper(parentInNewTree, child, levels, originalDimensionNames, depth + 1); - } + for (Map.Entry childEntry : currentInOriginalTree.children.entrySet()) { + CacheStatsHolder.Node child = childEntry.getValue(); + aggregateByLevelsHelper(parentInNewTree, child, allDimensions, depth + 1); } } @@ -216,13 +225,14 @@ private void aggregateByLevelsHelper( * Filters out levels that aren't in dimensionNames, and orders the resulting list to match the order in dimensionNames. * Unrecognized levels are ignored. */ - private List filterLevels(List levels, List originalDimensionNames) { + private List filterLevels(String[] levels, List originalDimensionNames) { if (levels == null) { - return new ArrayList<>(); + return originalDimensionNames; } + List levelsList = Arrays.asList(levels); List filtered = new ArrayList<>(); for (String dimensionName : originalDimensionNames) { - if (levels.contains(dimensionName)) { + if (levelsList.contains(dimensionName)) { filtered.add(dimensionName); } } @@ -235,8 +245,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws getTotalStats().toXContent(builder, params); List filteredLevels = filterLevels(getLevels(params), dimensionNames); + assert filteredLevels.equals(dimensionNames); if (!filteredLevels.isEmpty()) { - toXContentForLevels(builder, params); + // Depth -1 corresponds to the dummy root node + toXContentForLevels(-1, statsRoot, builder, params); } // Also add the store name for the cache that produced the stats @@ -244,13 +256,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - XContentBuilder toXContentForLevels(XContentBuilder builder, Params params) throws IOException { - // Depth -1 corresponds to the dummy root node - toXContentForLevelsHelper(-1, statsRoot, builder, params); - return builder; - } - - private void toXContentForLevelsHelper(int depth, Node current, XContentBuilder builder, Params params) throws IOException { + private void toXContentForLevels(int depth, Node current, XContentBuilder builder, Params params) throws IOException { if (depth >= 0) { builder.startObject(current.dimensionValue); } @@ -261,7 +267,7 @@ private void toXContentForLevelsHelper(int depth, Node current, XContentBuilder } else { builder.startObject(dimensionNames.get(depth + 1)); for (Node nextNode : current.children.values()) { - toXContentForLevelsHelper(depth + 1, nextNode, builder, params); + toXContentForLevels(depth + 1, nextNode, builder, params); } builder.endObject(); } @@ -271,12 +277,12 @@ private void toXContentForLevelsHelper(int depth, Node current, XContentBuilder } } - private List getLevels(Params params) { + private String[] getLevels(Params params) { String levels = params.param("level"); if (levels == null) { return null; } - return List.of(levels.split(",")); + return levels.split(","); } @Override @@ -314,25 +320,17 @@ public int hashCode() { return Objects.hash(statsRoot.stats, dimensionNames); } - // A similar class to CacheStatsHolder.Node, which uses an ordered TreeMap and holds immutable CacheStatsSnapshot as its stats. + // A similar class to CacheStatsHolder.Node, which uses a SortedMap and holds immutable CacheStatsSnapshot as its stats. static class Node { private final String dimensionValue; - final Map children; // Map from dimensionValue to the Node for that dimension value + // Map from dimensionValue to the Node for that dimension value. Not final so we can set it to be unmodifiable before we are done in + // the constructor. + SortedMap children; // The stats for this node. If a leaf node, corresponds to the stats for this combination of dimensions; if not, // contains the sum of its children's stats. private ImmutableCacheStats stats; - private static final Map EMPTY_CHILDREN_MAP = Collections.unmodifiableMap(new HashMap<>()); - - Node(String dimensionValue, TreeMap snapshotChildren, ImmutableCacheStats stats) { - this.dimensionValue = dimensionValue; - this.stats = stats; - if (snapshotChildren == null) { - this.children = EMPTY_CHILDREN_MAP; - } else { - this.children = Collections.unmodifiableMap(snapshotChildren); - } - } + private static final SortedMap EMPTY_CHILDREN_MAP = Collections.unmodifiableSortedMap(new TreeMap<>()); private Node(String dimensionValue, boolean isLeafNode, ImmutableCacheStats stats) { this.dimensionValue = dimensionValue; diff --git a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java index b1082e7d5a2a9..742e5d93d12f4 100644 --- a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java +++ b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java @@ -132,11 +132,6 @@ public void refresh() { @Override public void close() {} - @Override - public ImmutableCacheStatsHolder stats() { - return stats(null); - } - @Override public ImmutableCacheStatsHolder stats(String[] levels) { return cacheStatsHolder.getImmutableCacheStatsHolder(levels); diff --git a/server/src/test/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolderTests.java b/server/src/test/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolderTests.java index 6d97a5cbb4e5a..3fe702ddf8307 100644 --- a/server/src/test/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolderTests.java +++ b/server/src/test/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolderTests.java @@ -199,7 +199,7 @@ public void testXContentForLevels() throws Exception { ToXContent.Params params = ToXContent.EMPTY_PARAMS; builder.startObject(); - stats.toXContentForLevels(builder, params); + stats.toXContent(builder, params); builder.endObject(); String resultString = builder.toString(); Map result = XContentHelper.convertToMap(MediaTypeRegistry.JSON.xContent(), resultString, true); From 4f3419b16b74d32a56f1402a72ccf6b9b82b4baa Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Wed, 24 Apr 2024 10:48:11 -0700 Subject: [PATCH 15/19] Addressed misc comments Signed-off-by: Peter Alfonsi --- .../CacheStatsAPIIndicesRequestCacheIT.java | 24 ++++++----------- .../admin/cluster/node/stats/NodeStats.java | 4 +-- .../opensearch/common/cache/CacheType.java | 26 +++++++++---------- .../common/cache/service/CacheService.java | 3 ++- .../common/cache/service/NodeCacheStats.java | 4 +-- .../admin/cluster/RestNodesStatsAction.java | 4 +-- 6 files changed, 29 insertions(+), 36 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java index 32e04403959ac..7cf535c88eb9b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java @@ -78,11 +78,7 @@ public void testCacheStatsAPIWIthOnHeapCache() throws Exception { // First, aggregate by indices only Map xContentMap = getNodeCacheStatsXContentMap(client, List.of(IndicesRequestCache.INDEX_DIMENSION_NAME)); - List index1Keys = List.of( - CacheType.INDICES_REQUEST_CACHE.getApiRepresentation(), - IndicesRequestCache.INDEX_DIMENSION_NAME, - index1Name - ); + List index1Keys = List.of(CacheType.INDICES_REQUEST_CACHE.getValue(), IndicesRequestCache.INDEX_DIMENSION_NAME, index1Name); // Since we searched twice, we expect to see 1 hit, 1 miss and 1 entry for index 1 ImmutableCacheStats expectedStats = new ImmutableCacheStats(1, 1, 0, 0, 1); checkCacheStatsAPIResponse(xContentMap, index1Keys, expectedStats, false, true); @@ -93,25 +89,21 @@ public void testCacheStatsAPIWIthOnHeapCache() throws Exception { )).get(ImmutableCacheStats.Fields.SIZE_IN_BYTES); assertTrue(requestSize > 0); - List index2Keys = List.of( - CacheType.INDICES_REQUEST_CACHE.getApiRepresentation(), - IndicesRequestCache.INDEX_DIMENSION_NAME, - index2Name - ); + List index2Keys = List.of(CacheType.INDICES_REQUEST_CACHE.getValue(), IndicesRequestCache.INDEX_DIMENSION_NAME, index2Name); // We searched once in index 2, we expect 1 miss + 1 entry expectedStats = new ImmutableCacheStats(0, 1, 0, requestSize, 1); checkCacheStatsAPIResponse(xContentMap, index2Keys, expectedStats, true, true); // The total stats for the node should be 1 hit, 2 misses, and 2 entries expectedStats = new ImmutableCacheStats(1, 2, 0, 2 * requestSize, 2); - List totalStatsKeys = List.of(CacheType.INDICES_REQUEST_CACHE.getApiRepresentation()); + List totalStatsKeys = List.of(CacheType.INDICES_REQUEST_CACHE.getValue()); checkCacheStatsAPIResponse(xContentMap, totalStatsKeys, expectedStats, true, true); // Aggregate by shards only xContentMap = getNodeCacheStatsXContentMap(client, List.of(IndicesRequestCache.SHARD_ID_DIMENSION_NAME)); List index1Shard0Keys = List.of( - CacheType.INDICES_REQUEST_CACHE.getApiRepresentation(), + CacheType.INDICES_REQUEST_CACHE.getValue(), IndicesRequestCache.SHARD_ID_DIMENSION_NAME, "[" + index1Name + "][0]" ); @@ -120,7 +112,7 @@ public void testCacheStatsAPIWIthOnHeapCache() throws Exception { checkCacheStatsAPIResponse(xContentMap, index1Shard0Keys, expectedStats, true, true); List index2Shard0Keys = List.of( - CacheType.INDICES_REQUEST_CACHE.getApiRepresentation(), + CacheType.INDICES_REQUEST_CACHE.getValue(), IndicesRequestCache.SHARD_ID_DIMENSION_NAME, "[" + index2Name + "][0]" ); @@ -134,7 +126,7 @@ public void testCacheStatsAPIWIthOnHeapCache() throws Exception { ); index1Keys = List.of( - CacheType.INDICES_REQUEST_CACHE.getApiRepresentation(), + CacheType.INDICES_REQUEST_CACHE.getValue(), IndicesRequestCache.INDEX_DIMENSION_NAME, index1Name, IndicesRequestCache.SHARD_ID_DIMENSION_NAME, @@ -145,7 +137,7 @@ public void testCacheStatsAPIWIthOnHeapCache() throws Exception { checkCacheStatsAPIResponse(xContentMap, index1Keys, expectedStats, true, true); index2Keys = List.of( - CacheType.INDICES_REQUEST_CACHE.getApiRepresentation(), + CacheType.INDICES_REQUEST_CACHE.getValue(), IndicesRequestCache.INDEX_DIMENSION_NAME, index2Name, IndicesRequestCache.SHARD_ID_DIMENSION_NAME, @@ -184,7 +176,7 @@ public void testStatsMatchOldApi() throws Exception { .getRequestCache(); assertNotEquals(0, oldApiStats.getMemorySizeInBytes()); - List xContentMapKeys = List.of(CacheType.INDICES_REQUEST_CACHE.getApiRepresentation()); + List xContentMapKeys = List.of(CacheType.INDICES_REQUEST_CACHE.getValue()); Map xContentMap = getNodeCacheStatsXContentMap(client, List.of()); ImmutableCacheStats expected = new ImmutableCacheStats( oldApiStats.getHitCount(), diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java index 0917a0baff1ab..ac2daf57f248b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java @@ -238,7 +238,7 @@ public NodeStats(StreamInput in) throws IOException { } else { admissionControlStats = null; } - if (in.getVersion().onOrAfter(Version.V_2_14_0)) { + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { nodeCacheStats = in.readOptionalWriteable(NodeCacheStats::new); } else { nodeCacheStats = null; @@ -522,7 +522,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeOptionalWriteable(admissionControlStats); } - if (out.getVersion().onOrAfter(Version.V_2_14_0)) { + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { out.writeOptionalWriteable(nodeCacheStats); } } diff --git a/server/src/main/java/org/opensearch/common/cache/CacheType.java b/server/src/main/java/org/opensearch/common/cache/CacheType.java index 0acc08be47d89..e8cb3ee54fab6 100644 --- a/server/src/main/java/org/opensearch/common/cache/CacheType.java +++ b/server/src/main/java/org/opensearch/common/cache/CacheType.java @@ -23,39 +23,39 @@ public enum CacheType { INDICES_REQUEST_CACHE("indices.requests.cache", "request_cache"); private final String settingPrefix; - private final String apiRepresentation; + private final String value; // The value displayed for this cache type in API responses - private static final Map representationsMap; + private static final Map valuesMap; static { - Map reprs = new HashMap<>(); + Map values = new HashMap<>(); for (CacheType cacheType : values()) { - reprs.put(cacheType.apiRepresentation, cacheType); + values.put(cacheType.value, cacheType); } - representationsMap = Collections.unmodifiableMap(reprs); + valuesMap = Collections.unmodifiableMap(values); } CacheType(String settingPrefix, String representation) { this.settingPrefix = settingPrefix; - this.apiRepresentation = representation; + this.value = representation; } public String getSettingPrefix() { return settingPrefix; } - public String getApiRepresentation() { - return apiRepresentation; + public String getValue() { + return value; } - public static CacheType getByRepresentation(String representation) { - CacheType result = representationsMap.get(representation); + public static CacheType getByValue(String value) { + CacheType result = valuesMap.get(value); if (result == null) { - throw new IllegalArgumentException("No CacheType with representation = " + representation); + throw new IllegalArgumentException("No CacheType with value = " + value); } return result; } - public static Set allRepresentations() { - return representationsMap.keySet(); + public static Set allValues() { + return valuesMap.keySet(); } } diff --git a/server/src/main/java/org/opensearch/common/cache/service/CacheService.java b/server/src/main/java/org/opensearch/common/cache/service/CacheService.java index 19843354b48e5..01da78ecec52e 100644 --- a/server/src/main/java/org/opensearch/common/cache/service/CacheService.java +++ b/server/src/main/java/org/opensearch/common/cache/service/CacheService.java @@ -22,6 +22,7 @@ import java.util.HashMap; import java.util.Map; +import java.util.SortedMap; import java.util.TreeMap; /** @@ -67,7 +68,7 @@ public ICache createCache(CacheConfig config, CacheType cache } public NodeCacheStats stats(CommonStatsFlags flags) { - final TreeMap statsMap = new TreeMap<>(); + final SortedMap statsMap = new TreeMap<>(); for (CacheType type : cacheTypeMap.keySet()) { statsMap.put(type, cacheTypeMap.get(type).stats(flags.getLevels())); } diff --git a/server/src/main/java/org/opensearch/common/cache/service/NodeCacheStats.java b/server/src/main/java/org/opensearch/common/cache/service/NodeCacheStats.java index eb37e21e0b925..07c75eab34194 100644 --- a/server/src/main/java/org/opensearch/common/cache/service/NodeCacheStats.java +++ b/server/src/main/java/org/opensearch/common/cache/service/NodeCacheStats.java @@ -29,7 +29,7 @@ */ @ExperimentalApi public class NodeCacheStats implements ToXContentFragment, Writeable { - // Use TreeMap to force consistent ordering of caches in API responses + // Use SortedMap to force consistent ordering of caches in API responses private final SortedMap statsByCache; private final CommonStatsFlags flags; @@ -53,7 +53,7 @@ public void writeTo(StreamOutput out) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { for (CacheType type : statsByCache.keySet()) { if (flags.getIncludeCaches().contains(type)) { - builder.startObject(type.getApiRepresentation()); + builder.startObject(type.getValue()); statsByCache.get(type).toXContent(builder, params); builder.endObject(); } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsAction.java index c9c21420a0f06..267bfde576dec 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsAction.java @@ -186,10 +186,10 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC } else { for (String cacheName : cacheMetrics) { try { - cacheFlags.includeCacheType(CacheType.getByRepresentation(cacheName)); + cacheFlags.includeCacheType(CacheType.getByValue(cacheName)); } catch (IllegalArgumentException e) { throw new IllegalArgumentException( - unrecognized(request, Set.of(cacheName), CacheType.allRepresentations(), "cache type") + unrecognized(request, Set.of(cacheName), CacheType.allValues(), "cache type") ); } } From 40796c50ce0350c8183451d0b00613ed0989fa20 Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Thu, 25 Apr 2024 09:20:27 -0700 Subject: [PATCH 16/19] Addressed Michael's comments Signed-off-by: Peter Alfonsi --- .../cache/store/disk/EhcacheDiskCache.java | 8 ++--- .../store/disk/EhCacheDiskCacheTests.java | 6 ++-- .../CacheStatsAPIIndicesRequestCacheIT.java | 4 +-- .../cluster/node/stats/NodesStatsRequest.java | 2 +- .../node/stats/TransportNodesStatsAction.java | 2 +- .../admin/indices/stats/CommonStatsFlags.java | 3 +- .../opensearch/common/cache/CacheType.java | 2 +- .../common/cache/stats/CacheStats.java | 34 +++++++++---------- .../common/cache/stats/CacheStatsHolder.java | 22 ++++++------ .../cache/stats/ImmutableCacheStats.java | 22 ++++++------ .../stats/ImmutableCacheStatsHolder.java | 4 +-- .../cache/store/OpenSearchOnHeapCache.java | 6 ++-- .../cache/stats/CacheStatsHolderTests.java | 10 +++--- .../stats/ImmutableCacheStatsHolderTests.java | 8 ++--- .../store/OpenSearchOnHeapCacheTests.java | 6 ++-- .../indices/IndicesRequestCacheTests.java | 4 +-- 16 files changed, 72 insertions(+), 71 deletions(-) diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java index 585ca9a5868e1..f36b647c87850 100644 --- a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java @@ -509,7 +509,7 @@ private long getNewValuePairSize(CacheEvent, ? extends By public void onEvent(CacheEvent, ? extends ByteArrayWrapper> event) { switch (event.getType()) { case CREATED: - cacheStatsHolder.incrementEntries(event.getKey().dimensions); + cacheStatsHolder.incrementItems(event.getKey().dimensions); cacheStatsHolder.incrementSizeInBytes(event.getKey().dimensions, getNewValuePairSize(event)); assert event.getOldValue() == null; break; @@ -517,7 +517,7 @@ public void onEvent(CacheEvent, ? extends ByteArrayWrappe this.removalListener.onRemoval( new RemovalNotification<>(event.getKey(), deserializeValue(event.getOldValue()), RemovalReason.EVICTED) ); - cacheStatsHolder.decrementEntries(event.getKey().dimensions); + cacheStatsHolder.decrementItems(event.getKey().dimensions); cacheStatsHolder.decrementSizeInBytes(event.getKey().dimensions, getOldValuePairSize(event)); cacheStatsHolder.incrementEvictions(event.getKey().dimensions); assert event.getNewValue() == null; @@ -526,7 +526,7 @@ public void onEvent(CacheEvent, ? extends ByteArrayWrappe this.removalListener.onRemoval( new RemovalNotification<>(event.getKey(), deserializeValue(event.getOldValue()), RemovalReason.EXPLICIT) ); - cacheStatsHolder.decrementEntries(event.getKey().dimensions); + cacheStatsHolder.decrementItems(event.getKey().dimensions); cacheStatsHolder.decrementSizeInBytes(event.getKey().dimensions, getOldValuePairSize(event)); assert event.getNewValue() == null; break; @@ -534,7 +534,7 @@ public void onEvent(CacheEvent, ? extends ByteArrayWrappe this.removalListener.onRemoval( new RemovalNotification<>(event.getKey(), deserializeValue(event.getOldValue()), RemovalReason.INVALIDATED) ); - cacheStatsHolder.decrementEntries(event.getKey().dimensions); + cacheStatsHolder.decrementItems(event.getKey().dimensions); cacheStatsHolder.decrementSizeInBytes(event.getKey().dimensions, getOldValuePairSize(event)); assert event.getNewValue() == null; break; diff --git a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java index 06ebed08d7525..f93b09cf2d4f4 100644 --- a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java +++ b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java @@ -93,7 +93,7 @@ public void testBasicGetAndPut() throws IOException { String value = ehcacheTest.get(getICacheKey(entry.getKey())); assertEquals(entry.getValue(), value); } - assertEquals(randomKeys, ehcacheTest.stats().getTotalEntries()); + assertEquals(randomKeys, ehcacheTest.stats().getTotalItems()); assertEquals(randomKeys, ehcacheTest.stats().getTotalHits()); assertEquals(expectedSize, ehcacheTest.stats().getTotalSizeInBytes()); assertEquals(randomKeys, ehcacheTest.count()); @@ -217,7 +217,7 @@ public void testConcurrentPut() throws Exception { assertEquals(entry.getValue(), value); } assertEquals(randomKeys, ehcacheTest.count()); - assertEquals(randomKeys, ehcacheTest.stats().getTotalEntries()); + assertEquals(randomKeys, ehcacheTest.stats().getTotalItems()); ehcacheTest.close(); } } @@ -416,7 +416,7 @@ public String load(ICacheKey key) { assertEquals(1, numberOfTimesValueLoaded); assertEquals(0, ((EhcacheDiskCache) ehcacheTest).getCompletableFutureMap().size()); assertEquals(1, ehcacheTest.stats().getTotalMisses()); - assertEquals(1, ehcacheTest.stats().getTotalEntries()); + assertEquals(1, ehcacheTest.stats().getTotalItems()); assertEquals(numberOfRequest - 1, ehcacheTest.stats().getTotalHits()); assertEquals(1, ehcacheTest.count()); ehcacheTest.close(); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java index 7cf535c88eb9b..ed732c8d777fa 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java @@ -245,7 +245,7 @@ private static Map getNodeCacheStatsXContentMap(Client client, L NodesStatsResponse nodeStatsResponse = client.admin() .cluster() .prepareNodesStats("data:true") - .addMetric(NodesStatsRequest.Metric.SEARCH_CACHE_STATS.metricName()) + .addMetric(NodesStatsRequest.Metric.CACHE_STATS.metricName()) .setIndices(statsFlags) .get(); // Can always get the first data node as there's only one in this test suite @@ -288,7 +288,7 @@ private static void checkCacheStatsAPIResponse( assertEquals(expectedStats.getSizeInBytes(), (int) aggregatedStatsResponse.get(ImmutableCacheStats.Fields.SIZE_IN_BYTES)); } if (checkEntries) { - assertEquals(expectedStats.getEntries(), (int) aggregatedStatsResponse.get(ImmutableCacheStats.Fields.ENTRIES)); + assertEquals(expectedStats.getItems(), (int) aggregatedStatsResponse.get(ImmutableCacheStats.Fields.ITEM_COUNT)); } } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java index 9c0cba1322b21..379836cf442e3 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -220,7 +220,7 @@ public enum Metric { SEGMENT_REPLICATION_BACKPRESSURE("segment_replication_backpressure"), REPOSITORIES("repositories"), ADMISSION_CONTROL("admission_control"), - SEARCH_CACHE_STATS("caches"); + CACHE_STATS("caches"); private String metricName; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 14a52163c5bf6..2e93e5e7841cb 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -129,7 +129,7 @@ protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) { NodesStatsRequest.Metric.SEGMENT_REPLICATION_BACKPRESSURE.containedIn(metrics), NodesStatsRequest.Metric.REPOSITORIES.containedIn(metrics), NodesStatsRequest.Metric.ADMISSION_CONTROL.containedIn(metrics), - NodesStatsRequest.Metric.SEARCH_CACHE_STATS.containedIn(metrics) + NodesStatsRequest.Metric.CACHE_STATS.containedIn(metrics) ); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java index 505002d82b045..ddea79b9f9336 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java @@ -43,6 +43,7 @@ import java.io.IOException; import java.util.Collections; import java.util.EnumSet; +import java.util.Set; /** * Common Stats Flags for OpenSearch @@ -167,7 +168,7 @@ public Flag[] getFlags() { return flags.toArray(new Flag[0]); } - public EnumSet getIncludeCaches() { + public Set getIncludeCaches() { return includeCaches; } diff --git a/server/src/main/java/org/opensearch/common/cache/CacheType.java b/server/src/main/java/org/opensearch/common/cache/CacheType.java index e8cb3ee54fab6..eee6204ac5412 100644 --- a/server/src/main/java/org/opensearch/common/cache/CacheType.java +++ b/server/src/main/java/org/opensearch/common/cache/CacheType.java @@ -23,7 +23,7 @@ public enum CacheType { INDICES_REQUEST_CACHE("indices.requests.cache", "request_cache"); private final String settingPrefix; - private final String value; // The value displayed for this cache type in API responses + private final String value; // The value displayed for this cache type in stats API responses private static final Map valuesMap; static { diff --git a/server/src/main/java/org/opensearch/common/cache/stats/CacheStats.java b/server/src/main/java/org/opensearch/common/cache/stats/CacheStats.java index b0cb66b56b70d..93fa1ff7fcddf 100644 --- a/server/src/main/java/org/opensearch/common/cache/stats/CacheStats.java +++ b/server/src/main/java/org/opensearch/common/cache/stats/CacheStats.java @@ -20,9 +20,9 @@ public class CacheStats { CounterMetric misses; CounterMetric evictions; CounterMetric sizeInBytes; - CounterMetric entries; + CounterMetric items; - public CacheStats(long hits, long misses, long evictions, long sizeInBytes, long entries) { + public CacheStats(long hits, long misses, long evictions, long sizeInBytes, long items) { this.hits = new CounterMetric(); this.hits.inc(hits); this.misses = new CounterMetric(); @@ -31,8 +31,8 @@ public CacheStats(long hits, long misses, long evictions, long sizeInBytes, long this.evictions.inc(evictions); this.sizeInBytes = new CounterMetric(); this.sizeInBytes.inc(sizeInBytes); - this.entries = new CounterMetric(); - this.entries.inc(entries); + this.items = new CounterMetric(); + this.items.inc(items); } public CacheStats() { @@ -44,33 +44,33 @@ private void internalAdd(long otherHits, long otherMisses, long otherEvictions, this.misses.inc(otherMisses); this.evictions.inc(otherEvictions); this.sizeInBytes.inc(otherSizeInBytes); - this.entries.inc(otherEntries); + this.items.inc(otherEntries); } public void add(CacheStats other) { if (other == null) { return; } - internalAdd(other.getHits(), other.getMisses(), other.getEvictions(), other.getSizeInBytes(), other.getEntries()); + internalAdd(other.getHits(), other.getMisses(), other.getEvictions(), other.getSizeInBytes(), other.getItems()); } public void add(ImmutableCacheStats snapshot) { if (snapshot == null) { return; } - internalAdd(snapshot.getHits(), snapshot.getMisses(), snapshot.getEvictions(), snapshot.getSizeInBytes(), snapshot.getEntries()); + internalAdd(snapshot.getHits(), snapshot.getMisses(), snapshot.getEvictions(), snapshot.getSizeInBytes(), snapshot.getItems()); } public void subtract(ImmutableCacheStats other) { if (other == null) { return; } - internalAdd(-other.getHits(), -other.getMisses(), -other.getEvictions(), -other.getSizeInBytes(), -other.getEntries()); + internalAdd(-other.getHits(), -other.getMisses(), -other.getEvictions(), -other.getSizeInBytes(), -other.getItems()); } @Override public int hashCode() { - return Objects.hash(hits.count(), misses.count(), evictions.count(), sizeInBytes.count(), entries.count()); + return Objects.hash(hits.count(), misses.count(), evictions.count(), sizeInBytes.count(), items.count()); } public void incrementHits() { @@ -93,12 +93,12 @@ public void decrementSizeInBytes(long amount) { sizeInBytes.dec(amount); } - public void incrementEntries() { - entries.inc(); + public void incrementItems() { + items.inc(); } - public void decrementEntries() { - entries.dec(); + public void decrementItems() { + items.dec(); } public long getHits() { @@ -117,16 +117,16 @@ public long getSizeInBytes() { return sizeInBytes.count(); } - public long getEntries() { - return entries.count(); + public long getItems() { + return items.count(); } public void resetSizeAndEntries() { sizeInBytes = new CounterMetric(); - entries = new CounterMetric(); + items = new CounterMetric(); } public ImmutableCacheStats immutableSnapshot() { - return new ImmutableCacheStats(hits.count(), misses.count(), evictions.count(), sizeInBytes.count(), entries.count()); + return new ImmutableCacheStats(hits.count(), misses.count(), evictions.count(), sizeInBytes.count(), items.count()); } } diff --git a/server/src/main/java/org/opensearch/common/cache/stats/CacheStatsHolder.java b/server/src/main/java/org/opensearch/common/cache/stats/CacheStatsHolder.java index d8743e0a82590..8f001b3824f8b 100644 --- a/server/src/main/java/org/opensearch/common/cache/stats/CacheStatsHolder.java +++ b/server/src/main/java/org/opensearch/common/cache/stats/CacheStatsHolder.java @@ -80,12 +80,12 @@ public void decrementSizeInBytes(List dimensionValues, long amountBytes) internalIncrement(dimensionValues, (node) -> node.decrementSizeInBytes(amountBytes), false); } - public void incrementEntries(List dimensionValues) { - internalIncrement(dimensionValues, Node::incrementEntries, true); + public void incrementItems(List dimensionValues) { + internalIncrement(dimensionValues, Node::incrementItems, true); } - public void decrementEntries(List dimensionValues) { - internalIncrement(dimensionValues, Node::decrementEntries, false); + public void decrementItems(List dimensionValues) { + internalIncrement(dimensionValues, Node::decrementItems, false); } /** @@ -105,7 +105,7 @@ private void resetHelper(Node current) { public long count() { // Include this here so caches don't have to create an entire CacheStats object to run count(). - return statsRoot.getEntries(); + return statsRoot.getItems(); } private void internalIncrement(List dimensionValues, Consumer adder, boolean createNodesIfAbsent) { @@ -255,16 +255,16 @@ void decrementSizeInBytes(long amountBytes) { this.stats.decrementSizeInBytes(amountBytes); } - void incrementEntries() { - this.stats.incrementEntries(); + void incrementItems() { + this.stats.incrementItems(); } - void decrementEntries() { - this.stats.decrementEntries(); + void decrementItems() { + this.stats.decrementItems(); } - long getEntries() { - return this.stats.getEntries(); + long getItems() { + return this.stats.getItems(); } ImmutableCacheStats getImmutableStats() { diff --git a/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStats.java b/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStats.java index ea6af3b140588..dbd78a2584f9c 100644 --- a/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStats.java +++ b/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStats.java @@ -30,14 +30,14 @@ public class ImmutableCacheStats implements Writeable, ToXContent { private final long misses; private final long evictions; private final long sizeInBytes; - private final long entries; + private final long items; - public ImmutableCacheStats(long hits, long misses, long evictions, long sizeInBytes, long entries) { + public ImmutableCacheStats(long hits, long misses, long evictions, long sizeInBytes, long items) { this.hits = hits; this.misses = misses; this.evictions = evictions; this.sizeInBytes = sizeInBytes; - this.entries = entries; + this.items = items; } public ImmutableCacheStats(StreamInput in) throws IOException { @@ -50,7 +50,7 @@ public static ImmutableCacheStats addSnapshots(ImmutableCacheStats s1, Immutable s1.misses + s2.misses, s1.evictions + s2.evictions, s1.sizeInBytes + s2.sizeInBytes, - s1.entries + s2.entries + s1.items + s2.items ); } @@ -70,8 +70,8 @@ public long getSizeInBytes() { return sizeInBytes; } - public long getEntries() { - return entries; + public long getItems() { + return items; } @Override @@ -80,7 +80,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(misses); out.writeVLong(evictions); out.writeVLong(sizeInBytes); - out.writeVLong(entries); + out.writeVLong(items); } @Override @@ -96,12 +96,12 @@ public boolean equals(Object o) { && (misses == other.misses) && (evictions == other.evictions) && (sizeInBytes == other.sizeInBytes) - && (entries == other.entries); + && (items == other.items); } @Override public int hashCode() { - return Objects.hash(hits, misses, evictions, sizeInBytes, entries); + return Objects.hash(hits, misses, evictions, sizeInBytes, items); } @Override @@ -111,7 +111,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(Fields.EVICTIONS, evictions); builder.field(Fields.HIT_COUNT, hits); builder.field(Fields.MISS_COUNT, misses); - builder.field(Fields.ENTRIES, entries); + builder.field(Fields.ITEM_COUNT, items); return builder; } @@ -124,6 +124,6 @@ public static final class Fields { public static final String EVICTIONS = "evictions"; public static final String HIT_COUNT = "hit_count"; public static final String MISS_COUNT = "miss_count"; - public static final String ENTRIES = "entries"; + public static final String ITEM_COUNT = "item_count"; } } diff --git a/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolder.java b/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolder.java index 6e031c3d7993f..66ad1adce8d72 100644 --- a/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolder.java +++ b/server/src/main/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolder.java @@ -151,8 +151,8 @@ public long getTotalSizeInBytes() { return getTotalStats().getSizeInBytes(); } - public long getTotalEntries() { - return getTotalStats().getEntries(); + public long getTotalItems() { + return getTotalStats().getItems(); } public ImmutableCacheStats getStatsForDimensionValues(List dimensionValues) { diff --git a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java index 742e5d93d12f4..c459325ba2cc7 100644 --- a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java +++ b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java @@ -81,7 +81,7 @@ public V get(ICacheKey key) { @Override public void put(ICacheKey key, V value) { cache.put(key, value); - cacheStatsHolder.incrementEntries(key.dimensions); + cacheStatsHolder.incrementItems(key.dimensions); cacheStatsHolder.incrementSizeInBytes(key.dimensions, weigher.applyAsLong(key, value)); } @@ -92,7 +92,7 @@ public V computeIfAbsent(ICacheKey key, LoadAwareCacheLoader, V> cacheStatsHolder.incrementHits(key.dimensions); } else { cacheStatsHolder.incrementMisses(key.dimensions); - cacheStatsHolder.incrementEntries(key.dimensions); + cacheStatsHolder.incrementItems(key.dimensions); cacheStatsHolder.incrementSizeInBytes(key.dimensions, cache.getWeigher().applyAsLong(key, value)); } return value; @@ -140,7 +140,7 @@ public ImmutableCacheStatsHolder stats(String[] levels) { @Override public void onRemoval(RemovalNotification, V> notification) { removalListener.onRemoval(notification); - cacheStatsHolder.decrementEntries(notification.getKey().dimensions); + cacheStatsHolder.decrementItems(notification.getKey().dimensions); cacheStatsHolder.decrementSizeInBytes( notification.getKey().dimensions, cache.getWeigher().applyAsLong(notification.getKey(), notification.getValue()) diff --git a/server/src/test/java/org/opensearch/common/cache/stats/CacheStatsHolderTests.java b/server/src/test/java/org/opensearch/common/cache/stats/CacheStatsHolderTests.java index 88a4eb840dec2..7c7df362a9b47 100644 --- a/server/src/test/java/org/opensearch/common/cache/stats/CacheStatsHolderTests.java +++ b/server/src/test/java/org/opensearch/common/cache/stats/CacheStatsHolderTests.java @@ -64,7 +64,7 @@ public void testReset() throws Exception { for (List dimensionValues : expected.keySet()) { CacheStats originalCounter = expected.get(dimensionValues); originalCounter.sizeInBytes = new CounterMetric(); - originalCounter.entries = new CounterMetric(); + originalCounter.items = new CounterMetric(); CacheStatsHolder.Node node = getNode(dimensionValues, cacheStatsHolder.getStatsRoot()); ImmutableCacheStats actual = node.getImmutableStats(); @@ -116,7 +116,7 @@ public void testCount() throws Exception { long expectedCount = 0L; for (CacheStats counter : expected.values()) { - expectedCount += counter.getEntries(); + expectedCount += counter.getItems(); } assertEquals(expectedCount, cacheStatsHolder.count()); } @@ -225,7 +225,7 @@ static Map, CacheStats> populateStats( expected.get(dimensions).misses.inc(statsToInc.getMisses()); expected.get(dimensions).evictions.inc(statsToInc.getEvictions()); expected.get(dimensions).sizeInBytes.inc(statsToInc.getSizeInBytes()); - expected.get(dimensions).entries.inc(statsToInc.getEntries()); + expected.get(dimensions).items.inc(statsToInc.getItems()); CacheStatsHolderTests.populateStatsHolderFromStatsValueMap(cacheStatsHolder, Map.of(dimensions, statsToInc)); } } @@ -295,8 +295,8 @@ public static void populateStatsHolderFromStatsValueMap(CacheStatsHolder cacheSt cacheStatsHolder.incrementEvictions(dims); } cacheStatsHolder.incrementSizeInBytes(dims, stats.getSizeInBytes()); - for (int i = 0; i < stats.getEntries(); i++) { - cacheStatsHolder.incrementEntries(dims); + for (int i = 0; i < stats.getItems(); i++) { + cacheStatsHolder.incrementItems(dims); } } } diff --git a/server/src/test/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolderTests.java b/server/src/test/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolderTests.java index 3fe702ddf8307..3932576bf141f 100644 --- a/server/src/test/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolderTests.java +++ b/server/src/test/java/org/opensearch/common/cache/stats/ImmutableCacheStatsHolderTests.java @@ -103,7 +103,7 @@ public void testGet() throws Exception { assertEquals(expectedTotal.getMisses(), stats.getTotalMisses()); assertEquals(expectedTotal.getEvictions(), stats.getTotalEvictions()); assertEquals(expectedTotal.getSizeInBytes(), stats.getTotalSizeInBytes()); - assertEquals(expectedTotal.getEntries(), stats.getTotalEntries()); + assertEquals(expectedTotal.getItems(), stats.getTotalItems()); assertSumOfChildrenStats(stats.getStatsRoot()); } @@ -213,8 +213,8 @@ public void testXContentForLevels() throws Exception { (counter, value) -> counter.hits.inc(value), ImmutableCacheStats.Fields.MISS_COUNT, (counter, value) -> counter.misses.inc(value), - ImmutableCacheStats.Fields.ENTRIES, - (counter, value) -> counter.entries.inc(value) + ImmutableCacheStats.Fields.ITEM_COUNT, + (counter, value) -> counter.items.inc(value) ); Map, ImmutableCacheStatsHolder.Node> leafNodes = getAllLeafNodes(stats.getStatsRoot()); @@ -287,7 +287,7 @@ private void assertTotalStatsPresentInXContentResponse(Map resul assertNotEquals(0, (int) result.get(ImmutableCacheStats.Fields.EVICTIONS)); assertNotEquals(0, (int) result.get(ImmutableCacheStats.Fields.HIT_COUNT)); assertNotEquals(0, (int) result.get(ImmutableCacheStats.Fields.MISS_COUNT)); - assertNotEquals(0, (int) result.get(ImmutableCacheStats.Fields.ENTRIES)); + assertNotEquals(0, (int) result.get(ImmutableCacheStats.Fields.ITEM_COUNT)); // assert the store name is present assertEquals(storeName, (String) result.get(ImmutableCacheStatsHolder.STORE_NAME_FIELD)); } diff --git a/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java b/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java index 008dc7c2e0902..7c32352ed0b1b 100644 --- a/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java +++ b/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java @@ -48,7 +48,7 @@ public void testStats() throws Exception { assertEquals(i + 1, cache.stats().getTotalMisses()); assertEquals(0, cache.stats().getTotalHits()); - assertEquals(Math.min(maxKeys, i + 1), cache.stats().getTotalEntries()); + assertEquals(Math.min(maxKeys, i + 1), cache.stats().getTotalItems()); assertEquals(Math.min(maxKeys, i + 1) * keyValueSize, cache.stats().getTotalSizeInBytes()); assertEquals(Math.max(0, i + 1 - maxKeys), cache.stats().getTotalEvictions()); } @@ -59,7 +59,7 @@ public void testStats() throws Exception { assertEquals(numAdded, cache.stats().getTotalMisses()); assertEquals(numHits, cache.stats().getTotalHits()); - assertEquals(maxKeys, cache.stats().getTotalEntries()); + assertEquals(maxKeys, cache.stats().getTotalItems()); assertEquals(maxKeys * keyValueSize, cache.stats().getTotalSizeInBytes()); assertEquals(numEvicted, cache.stats().getTotalEvictions()); } @@ -71,7 +71,7 @@ public void testStats() throws Exception { assertEquals(numAdded, cache.stats().getTotalMisses()); assertEquals(maxKeys, cache.stats().getTotalHits()); - assertEquals(maxKeys - numInvalidated, cache.stats().getTotalEntries()); + assertEquals(maxKeys - numInvalidated, cache.stats().getTotalItems()); assertEquals((maxKeys - numInvalidated) * keyValueSize, cache.stats().getTotalSizeInBytes()); assertEquals(numEvicted, cache.stats().getTotalEvictions()); } diff --git a/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java b/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java index 051acfe9d085a..3d016d9552974 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java @@ -872,7 +872,7 @@ public void testClosingIndexWipesStats() throws Exception { assertNotNull(snapshot); // check the values are not empty by confirming entries != 0, this should always be true since the missed value is loaded // into the cache - assertNotEquals(0, snapshot.getEntries()); + assertNotEquals(0, snapshot.getItems()); } } @@ -896,7 +896,7 @@ public void testClosingIndexWipesStats() throws Exception { assertNotNull(snapshot); // check the values are not empty by confirming entries != 0, this should always be true since the missed value is loaded // into the cache - assertNotEquals(0, snapshot.getEntries()); + assertNotEquals(0, snapshot.getItems()); } } From ecf780cc42abeff1ebd3846f091125885db56c9b Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Sun, 28 Apr 2024 14:33:16 -0700 Subject: [PATCH 17/19] Fixed IT Signed-off-by: Peter Alfonsi --- .../indices/CacheStatsAPIIndicesRequestCacheIT.java | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java index ed732c8d777fa..de7a52761c77c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java @@ -53,10 +53,7 @@ public CacheStatsAPIIndicesRequestCacheIT(Settings settings) { @ParametersFactory public static Collection parameters() { - return Arrays.asList( - new Object[] { Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE, "true").build() }, - new Object[] { Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE, "false").build() } - ); + return Arrays.asList(new Object[] { Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE, "true").build() }); } public void testCacheStatsAPIWIthOnHeapCache() throws Exception { From 3d1a1ee871e423885ac97e3c9fe034224c824b5d Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Sun, 28 Apr 2024 15:44:11 -0700 Subject: [PATCH 18/19] rerun Signed-off-by: Peter Alfonsi From 5d76f0d89ee06e51908ab2a5f03dc0d8f654cf92 Mon Sep 17 00:00:00 2001 From: Peter Alfonsi Date: Sun, 28 Apr 2024 18:02:50 -0700 Subject: [PATCH 19/19] rerun Signed-off-by: Peter Alfonsi