diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 8cfd636b9fd2e..6b86da2c91261 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -23,3 +23,4 @@ BWC_VERSION: - "2.9.0" - "2.9.1" - "2.10.0" + - "2.11.0" diff --git a/CHANGELOG.md b/CHANGELOG.md index 6dea6a4589387..eabea882750f0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -53,6 +53,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Remote Store] Add Segment download stats to remotestore stats API ([#8718](https://github.com/opensearch-project/OpenSearch/pull/8718)) - [Remote Store] Add remote segment transfer stats on NodesStats API ([#9168](https://github.com/opensearch-project/OpenSearch/pull/9168) [#9393](https://github.com/opensearch-project/OpenSearch/pull/9393) [#9454](https://github.com/opensearch-project/OpenSearch/pull/9454)) - Return 409 Conflict HTTP status instead of 503 on failure to concurrently execute snapshots ([#8986](https://github.com/opensearch-project/OpenSearch/pull/5855)) +- [Remote Store] Removing feature flag to mark feature GA ([#9761](https://github.com/opensearch-project/OpenSearch/pull/9761)) ### Deprecated @@ -104,6 +105,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Remote state] Integrate remote cluster state in publish/commit flow ([#9665](https://github.com/opensearch-project/OpenSearch/pull/9665)) - [Segment Replication] Adding segment replication statistics rolled up at index, node and cluster level ([#9709](https://github.com/opensearch-project/OpenSearch/pull/9709)) - [Remote Store] Changes to introduce repository registration during bootstrap via node attributes. ([#9105](https://github.com/opensearch-project/OpenSearch/pull/9105)) +- [Remote state] Auto restore index metadata from last known cluster state ([#9831](https://github.com/opensearch-project/OpenSearch/pull/9831)) ### Dependencies - Bump `org.apache.logging.log4j:log4j-core` from 2.17.1 to 2.20.0 ([#8307](https://github.com/opensearch-project/OpenSearch/pull/8307)) @@ -183,6 +185,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Remote Store] Add support for Remote Translog Store stats in `_remotestore/stats/` API ([#9263](https://github.com/opensearch-project/OpenSearch/pull/9263)) - Add support for query profiler with concurrent aggregation ([#9248](https://github.com/opensearch-project/OpenSearch/pull/9248)) - Cleanup Unreferenced file on segment merge failure ([#9503](https://github.com/opensearch-project/OpenSearch/pull/9503)) +- Move ZStd to a plugin ([#9658](https://github.com/opensearch-project/OpenSearch/pull/9658)) +- [Remote Store] Add support for Remote Translog Store upload stats in `_nodes/stats/` API ([#8908](https://github.com/opensearch-project/OpenSearch/pull/8908)) ### Deprecated diff --git a/distribution/src/config/opensearch.yml b/distribution/src/config/opensearch.yml index de2d0e023a200..1d2cfe7eccae6 100644 --- a/distribution/src/config/opensearch.yml +++ b/distribution/src/config/opensearch.yml @@ -98,18 +98,10 @@ ${path.logs} # node.attr.remote_store.translog.repository: my-repo-1 # # ---------------------------------- Experimental Features ----------------------------------- -# # Gates the visibility of the experimental segment replication features until they are production ready. # #opensearch.experimental.feature.segment_replication_experimental.enabled: false # -# -# Gates the visibility of the index setting that allows persisting data to remote store along with local disk. -# Once the feature is ready for production release, this feature flag can be removed. -# -#opensearch.experimental.feature.remote_store.enabled: false -# -# # Gates the functionality of a new parameter to the snapshot restore API # that allows for creation of a new index type that searches a snapshot # directly in a remote repository without restoring all index data to disk diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index d78c16d17de72..5103999428814 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -48,10 +48,6 @@ dependencies { implementation 'org.apache.commons:commons-compress:1.23.0' } -configurations.implementation { - exclude group: 'org.bouncycastle', module: 'bcprov-jdk15to18' -} - tasks.named("dependencyLicenses").configure { mapping from: /bc.*/, to: 'bouncycastle' } diff --git a/libs/common/src/main/java/org/opensearch/common/crypto/CryptoHandler.java b/libs/common/src/main/java/org/opensearch/common/crypto/CryptoHandler.java index 8f36edc9c191f..9572b5b9054b2 100644 --- a/libs/common/src/main/java/org/opensearch/common/crypto/CryptoHandler.java +++ b/libs/common/src/main/java/org/opensearch/common/crypto/CryptoHandler.java @@ -11,6 +11,7 @@ import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.io.InputStreamContainer; +import java.io.Closeable; import java.io.IOException; import java.io.InputStream; @@ -22,7 +23,7 @@ * @param Parsed Encryption Metadata / CryptoContext */ @ExperimentalApi -public interface CryptoHandler { +public interface CryptoHandler extends Closeable { /** * To initialise or create a new crypto metadata to be used in encryption. This is needed to set the context before diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index b05a069ba971c..32f4ca0317907 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -94,6 +94,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_9_0 = new Version(2090099, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_9_1 = new Version(2090199, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_10_0 = new Version(2100099, org.apache.lucene.util.Version.LUCENE_9_7_0); + public static final Version V_2_11_0 = new Version(2110099, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_8_0); public static final Version CURRENT = V_3_0_0; diff --git a/libs/encryption-sdk/build.gradle b/libs/encryption-sdk/build.gradle deleted file mode 100644 index c87394f8d9cb9..0000000000000 --- a/libs/encryption-sdk/build.gradle +++ /dev/null @@ -1,47 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -apply plugin: 'opensearch.build' -apply plugin: 'opensearch.publish' - -forbiddenApis.ignoreFailures = false - -thirdPartyAudit.enabled = false -forbiddenApisTest.ignoreFailures = true -testingConventions.enabled = false - -dependencies { - // Common crypto classes - api project(':libs:opensearch-common') - - // Encryption - implementation "com.amazonaws:aws-encryption-sdk-java:2.4.0" - implementation "org.apache.commons:commons-lang3:${versions.commonslang}" - - //Tests - testImplementation "junit:junit:${versions.junit}" - testImplementation "org.hamcrest:hamcrest:${versions.hamcrest}" - testImplementation(project(":test:framework")) { - exclude group: 'org.opensearch', module: 'opensearch-encryption-sdk' - } - - compileOnly 'com.google.code.findbugs:annotations:3.0.1' -} - -tasks.named('forbiddenApisMain').configure { - // Only enable limited check because AD code has too many violations. - replaceSignatureFiles 'jdk-signatures' - signaturesFiles += files('src/forbidden/crypto-signatures.txt') -} - -// Encryption SDK files have missing java docs so disabling for the lib. -tasks.named('missingJavadoc').configure { - enabled = false -} - -forbiddenApisTest.setSignaturesFiles(files('src/forbidden/crypto-test-signatures.txt')) diff --git a/libs/encryption-sdk/licenses/aws-encryption-sdk-java-2.4.0.jar.sha1 b/libs/encryption-sdk/licenses/aws-encryption-sdk-java-2.4.0.jar.sha1 deleted file mode 100644 index 504b4a423a975..0000000000000 --- a/libs/encryption-sdk/licenses/aws-encryption-sdk-java-2.4.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -98943eda1dc05bb01f4f5405e115b08dc541afbf \ No newline at end of file diff --git a/libs/encryption-sdk/licenses/aws-encryption-sdk-java-NOTICE.txt b/libs/encryption-sdk/licenses/aws-encryption-sdk-java-NOTICE.txt deleted file mode 100644 index e32695955374a..0000000000000 --- a/libs/encryption-sdk/licenses/aws-encryption-sdk-java-NOTICE.txt +++ /dev/null @@ -1,11 +0,0 @@ -AWS Encryption SDK -Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. - -THIRD PARTY COMPONENTS -********************** -This software includes third party software subject to the following copyrights: - --Cryptographic functions from Bouncy Castle Crypto APIs for Java - Copyright -2000-2013 The Legion of the Bouncy Castle - -The licenses for these third party components are included in LICENSE.txt diff --git a/libs/encryption-sdk/src/forbidden/crypto-signatures.txt b/libs/encryption-sdk/src/forbidden/crypto-signatures.txt deleted file mode 100644 index 3699186679924..0000000000000 --- a/libs/encryption-sdk/src/forbidden/crypto-signatures.txt +++ /dev/null @@ -1,13 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - * - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -@defaultMessage use format with Locale -java.lang.String#format(java.lang.String,java.lang.Object[]) \ No newline at end of file diff --git a/libs/encryption-sdk/src/forbidden/crypto-test-signatures.txt b/libs/encryption-sdk/src/forbidden/crypto-test-signatures.txt deleted file mode 100644 index 3699186679924..0000000000000 --- a/libs/encryption-sdk/src/forbidden/crypto-test-signatures.txt +++ /dev/null @@ -1,13 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - * - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -@defaultMessage use format with Locale -java.lang.String#format(java.lang.String,java.lang.Object[]) \ No newline at end of file diff --git a/libs/encryption-sdk/src/main/java/org/opensearch/encryption/CryptoManager.java b/libs/encryption-sdk/src/main/java/org/opensearch/encryption/CryptoManager.java deleted file mode 100644 index 8e1fc8570d552..0000000000000 --- a/libs/encryption-sdk/src/main/java/org/opensearch/encryption/CryptoManager.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.encryption; - -import org.opensearch.common.crypto.CryptoHandler; -import org.opensearch.common.util.concurrent.RefCounted; - -/** - * Crypto plugin interface used for encryption and decryption. - */ -public interface CryptoManager extends RefCounted { - - /** - * @return key provider type - */ - String type(); - - /** - * @return key provider name - */ - String name(); - - /** - * @return Crypto provider for encrypting or decrypting raw content. - */ - CryptoHandler getCryptoProvider(); -} diff --git a/libs/encryption-sdk/src/main/java/org/opensearch/encryption/CryptoManagerFactory.java b/libs/encryption-sdk/src/main/java/org/opensearch/encryption/CryptoManagerFactory.java deleted file mode 100644 index e1dc9291ed1a6..0000000000000 --- a/libs/encryption-sdk/src/main/java/org/opensearch/encryption/CryptoManagerFactory.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.encryption; - -import org.opensearch.common.crypto.CryptoHandler; -import org.opensearch.common.crypto.MasterKeyProvider; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.AbstractRefCounted; -import org.opensearch.encryption.keyprovider.CryptoMasterKey; - -import java.security.SecureRandom; -import java.util.concurrent.TimeUnit; - -import com.amazonaws.encryptionsdk.CryptoAlgorithm; -import com.amazonaws.encryptionsdk.caching.CachingCryptoMaterialsManager; -import com.amazonaws.encryptionsdk.caching.LocalCryptoMaterialsCache; - -public class CryptoManagerFactory { - - private final int dataKeyCacheSize; - private final String algorithm; - - // - Cache TTL and Jitter is used to decide the Crypto Cache TTL. - // - Random number between: (TTL Jitter, TTL - Jitter) - private final long dataKeyCacheTTL; - private static final long dataKeyCacheJitter = TimeUnit.MINUTES.toMillis(30); // - 30 minutes - - public CryptoManagerFactory(String algorithm, TimeValue keyRefreshInterval, int keyCacheSize) { - this.dataKeyCacheSize = keyCacheSize; - validateAndGetAlgorithmId(algorithm); - this.algorithm = algorithm; - dataKeyCacheTTL = keyRefreshInterval.getMillis(); - } - - private String validateAndGetAlgorithmId(String algorithm) { - // Supporting only 256 bit algorithm - switch (algorithm) { - case "ALG_AES_256_GCM_HKDF_SHA512_COMMIT_KEY": - return CryptoAlgorithm.ALG_AES_256_GCM_HKDF_SHA512_COMMIT_KEY.getDataKeyAlgo(); - case "ALG_AES_256_GCM_HKDF_SHA512_COMMIT_KEY_ECDSA_P384": - return CryptoAlgorithm.ALG_AES_256_GCM_HKDF_SHA512_COMMIT_KEY_ECDSA_P384.getDataKeyAlgo(); - default: - throw new IllegalArgumentException("Unsupported algorithm: " + algorithm); - } - } - - public CryptoManager getOrCreateCryptoManager( - MasterKeyProvider keyProvider, - String keyProviderName, - String keyProviderType, - Runnable onClose - ) { - CachingCryptoMaterialsManager materialsManager = createMaterialsManager( - keyProvider, - keyProviderName, - validateAndGetAlgorithmId(algorithm) - ); - CryptoHandler cryptoHandler = createCryptoProvider(algorithm, materialsManager, keyProvider); - return createCryptoManager(cryptoHandler, keyProviderType, keyProviderName, onClose); - } - - // package private for tests - CryptoHandler createCryptoProvider( - String algorithm, - CachingCryptoMaterialsManager materialsManager, - MasterKeyProvider masterKeyProvider - ) { - return new NoOpCryptoHandler(); - } - - // Package private for tests - CachingCryptoMaterialsManager createMaterialsManager(MasterKeyProvider masterKeyProvider, String keyProviderName, String algorithm) { - SecureRandom r = new SecureRandom(); - long low = dataKeyCacheTTL - dataKeyCacheJitter; - long high = dataKeyCacheTTL + dataKeyCacheJitter; - long masterKeyCacheTTL = r.nextInt((int) (high - low)) + low; - - CryptoMasterKey cryptoMasterKey = new CryptoMasterKey(masterKeyProvider, keyProviderName, algorithm); - return CachingCryptoMaterialsManager.newBuilder() - .withMasterKeyProvider(cryptoMasterKey) - .withCache(new LocalCryptoMaterialsCache(dataKeyCacheSize)) - .withMaxAge(masterKeyCacheTTL, TimeUnit.MILLISECONDS) - .build(); - } - - // package private for tests - CryptoManager createCryptoManager( - CryptoHandler cryptoHandler, - String keyProviderType, - String keyProviderName, - Runnable onClose - ) { - return new CryptoManagerImpl(keyProviderName, keyProviderType) { - @Override - protected void closeInternal() { - onClose.run(); - } - - @Override - public String type() { - return keyProviderType; - } - - @Override - public String name() { - return keyProviderName; - } - - @Override - public CryptoHandler getCryptoProvider() { - return cryptoHandler; - } - }; - } - - private static abstract class CryptoManagerImpl extends AbstractRefCounted implements CryptoManager { - public CryptoManagerImpl(String keyProviderName, String keyProviderType) { - super(keyProviderName + "-" + keyProviderType); - } - } -} diff --git a/libs/encryption-sdk/src/main/java/org/opensearch/encryption/NoOpCryptoHandler.java b/libs/encryption-sdk/src/main/java/org/opensearch/encryption/NoOpCryptoHandler.java deleted file mode 100644 index d6b23ed08c6b0..0000000000000 --- a/libs/encryption-sdk/src/main/java/org/opensearch/encryption/NoOpCryptoHandler.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.encryption; - -import org.opensearch.common.crypto.CryptoHandler; -import org.opensearch.common.crypto.DecryptedRangedStreamProvider; -import org.opensearch.common.crypto.EncryptedHeaderContentSupplier; -import org.opensearch.common.io.InputStreamContainer; - -import java.io.IOException; -import java.io.InputStream; - -public class NoOpCryptoHandler implements CryptoHandler { - - /** - * No op - Initialises metadata store used in encryption. - * @return crypto metadata object constructed with encryption metadata like data key pair, encryption algorithm, etc. - */ - public Object initEncryptionMetadata() { - return new Object(); - } - - /** - * No op content size adjustment of length of a partial content used in partial encryption. - * - * @param cryptoContextObj stateful object for a request consisting of materials required in encryption. - * @param streamSize Size of the stream to be adjusted. - * @return Adjusted size of the stream. - */ - public long adjustContentSizeForPartialEncryption(Object cryptoContextObj, long streamSize) { - return streamSize; - } - - /** - * No op - Estimate length of the encrypted stream. - * - * @param cryptoMetadataObj crypto metadata instance - * @param contentLength Size of the raw content - * @return Calculated size of the encrypted stream for the provided raw stream. - */ - public long estimateEncryptedLengthOfEntireContent(Object cryptoMetadataObj, long contentLength) { - return contentLength; - } - - /** - * No op length estimation for a given content length. - * - * @param cryptoMetadataObj crypto metadata instance - * @param contentLength Size of the encrypted content - * @return Calculated size of the encrypted stream for the provided raw stream. - */ - public long estimateDecryptedLength(Object cryptoMetadataObj, long contentLength) { - return contentLength; - } - - /** - * No op encrypting stream wrapper. - * - * @param cryptoContextObj consists encryption metadata. - * @param stream Raw InputStream to encrypt - * @return encrypting stream wrapped around raw InputStream. - */ - public InputStreamContainer createEncryptingStream(Object cryptoContextObj, InputStreamContainer stream) { - return stream; - } - - /** - * No op encrypting stream provider for a part of content. - * - * @param cryptoContextObj stateful object for a request consisting of materials required in encryption. - * @param stream raw stream for which encrypted stream has to be created. - * @param totalStreams Number of streams being used for the entire content. - * @param streamIdx Index of the current stream. - * @return Encrypted stream for the provided raw stream. - */ - public InputStreamContainer createEncryptingStreamOfPart( - Object cryptoContextObj, - InputStreamContainer stream, - int totalStreams, - int streamIdx - ) { - return stream; - } - - /** - * - * @param encryptedHeaderContentSupplier Supplier used to fetch bytes from source for header creation - * @return parsed encryption metadata object - * @throws IOException if content fetch for header creation fails - */ - public Object loadEncryptionMetadata(EncryptedHeaderContentSupplier encryptedHeaderContentSupplier) throws IOException { - return new Object(); - } - - /** - * No op decrypting stream provider. - * - * @param encryptedStream to be decrypted. - * @return Decrypting wrapper stream - */ - public InputStream createDecryptingStream(InputStream encryptedStream) { - return encryptedStream; - } - - /** - * No Op decrypted stream range provider - * - * @param cryptoContext crypto metadata instance consisting of encryption metadata used in encryption. - * @param startPosOfRawContent starting position in the raw/decrypted content - * @param endPosOfRawContent ending position in the raw/decrypted content - * @return stream provider for decrypted stream for the specified range of content including adjusted range - */ - public DecryptedRangedStreamProvider createDecryptingStreamOfRange( - Object cryptoContext, - long startPosOfRawContent, - long endPosOfRawContent - ) { - long[] range = { startPosOfRawContent, endPosOfRawContent }; - return new DecryptedRangedStreamProvider(range, (encryptedStream) -> encryptedStream); - } - -} diff --git a/libs/encryption-sdk/src/main/java/org/opensearch/encryption/TrimmingStream.java b/libs/encryption-sdk/src/main/java/org/opensearch/encryption/TrimmingStream.java deleted file mode 100644 index d6640bbe5e79e..0000000000000 --- a/libs/encryption-sdk/src/main/java/org/opensearch/encryption/TrimmingStream.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.encryption; - -import java.io.IOException; -import java.io.InputStream; - -/** - * Trims content from a given source range to a target range. - */ -public class TrimmingStream extends InputStream { - - private final long sourceStart; - private final long sourceEnd; - private final long targetStart; - private final long targetEnd; - private final InputStream in; - - private long offsetFromStart = 0; - - public TrimmingStream(long sourceStart, long sourceEnd, long targetStart, long targetEnd, InputStream in) { - if (sourceStart < 0 - || targetStart < 0 - || targetEnd < 0 - || targetStart > targetEnd - || sourceStart > targetStart - || sourceEnd < targetEnd) { - throw new IllegalArgumentException("Invalid arguments to the bounded stream"); - } - - this.sourceStart = sourceStart; - this.sourceEnd = sourceEnd; - this.targetStart = targetStart; - this.targetEnd = targetEnd; - this.in = in; - } - - private void skipBytesOutsideBounds() throws IOException { - long relativeOffset = offsetFromStart + sourceStart; - - if (relativeOffset < targetStart) { - skipBytes(relativeOffset, targetStart); - } - - if (relativeOffset > targetEnd) { - skipBytes(relativeOffset, sourceEnd + 1); - } - } - - private void skipBytes(long offset, long end) throws IOException { - long bytesToSkip = end - offset; - while (bytesToSkip > 0) { - long skipped = skip(bytesToSkip); - if (skipped <= 0) { - // End of stream or unable to skip further - break; - } - bytesToSkip -= skipped; - } - } - - @Override - public int read() throws IOException { - skipBytesOutsideBounds(); - if (offsetFromStart + sourceStart > targetEnd) { - return -1; - } - int b = in.read(); - if (b != -1) { - offsetFromStart++; - } - // This call is made again to ensure that source stream is fully consumed when it reaches end of target range. - skipBytesOutsideBounds(); - return b; - } - - @Override - public int read(byte[] b, int off, int len) throws IOException { - skipBytesOutsideBounds(); - if (offsetFromStart + sourceStart > targetEnd) { - return -1; - } - len = (int) Math.min(len, targetEnd - offsetFromStart - sourceStart + 1); - int bytesRead = in.read(b, off, len); - if (bytesRead != -1) { - offsetFromStart += bytesRead; - } - // This call is made again to ensure that source stream is fully consumed when it reaches end of target range. - skipBytesOutsideBounds(); - return bytesRead; - } - - /** - * Skips specified number of bytes of input. - * @param n the number of bytes to skip - * @return the actual number of bytes skipped - * @throws IOException if an I/O error has occurred - */ - public long skip(long n) throws IOException { - byte[] buf = new byte[512]; - long total = 0; - while (total < n) { - long len = n - total; - len = in.read(buf, 0, len < buf.length ? (int) len : buf.length); - if (len == -1) { - return total; - } - offsetFromStart += len; - total += len; - } - return total; - } -} diff --git a/libs/encryption-sdk/src/main/java/org/opensearch/encryption/keyprovider/CryptoMasterKey.java b/libs/encryption-sdk/src/main/java/org/opensearch/encryption/keyprovider/CryptoMasterKey.java deleted file mode 100644 index 6f014c9b4d99b..0000000000000 --- a/libs/encryption-sdk/src/main/java/org/opensearch/encryption/keyprovider/CryptoMasterKey.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ -package org.opensearch.encryption.keyprovider; - -import org.opensearch.common.crypto.DataKeyPair; -import org.opensearch.common.crypto.MasterKeyProvider; - -import javax.crypto.spec.SecretKeySpec; - -import java.io.Closeable; -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.util.Collection; -import java.util.Map; - -import com.amazonaws.encryptionsdk.CryptoAlgorithm; -import com.amazonaws.encryptionsdk.DataKey; -import com.amazonaws.encryptionsdk.EncryptedDataKey; -import com.amazonaws.encryptionsdk.MasterKey; -import com.amazonaws.encryptionsdk.exception.AwsCryptoException; - -public class CryptoMasterKey extends MasterKey implements Closeable { - private final MasterKeyProvider keyProvider; - private final String keyProviderName; - private final String cryptoAlgorithm; - - public CryptoMasterKey(MasterKeyProvider keyProvider, String keyProviderName, String cryptoAlgorithm) { - this.keyProvider = keyProvider; - this.keyProviderName = keyProviderName; - this.cryptoAlgorithm = cryptoAlgorithm; - } - - @Override - public String getProviderId() { - return keyProviderName; - } - - @Override - public String getKeyId() { - return keyProvider.getKeyId(); - } - - @Override - public DataKey generateDataKey(CryptoAlgorithm algorithm, Map encryptionContext) { - DataKeyPair dataKeyPairResponse = keyProvider.generateDataPair(); - final SecretKeySpec key = new SecretKeySpec(dataKeyPairResponse.getRawKey(), cryptoAlgorithm); - return new DataKey<>(key, dataKeyPairResponse.getEncryptedKey(), getKeyId().getBytes(StandardCharsets.UTF_8), this); - } - - @Override - public DataKey encryptDataKey(CryptoAlgorithm algorithm, Map encryptionContext, DataKey dataKey) { - throw new UnsupportedOperationException("Multiple data-key encryption is not supported."); - } - - @Override - public DataKey decryptDataKey( - CryptoAlgorithm algorithm, - Collection encryptedDataKeys, - Map encryptionContext - ) throws AwsCryptoException { - if (encryptedDataKeys == null || encryptedDataKeys.isEmpty()) { - throw new IllegalArgumentException("No encrypted data key passed for decryption."); - } - EncryptedDataKey encryptedDataKey = encryptedDataKeys.iterator().next(); - final String keyId = new String(encryptedDataKey.getProviderInformation(), StandardCharsets.UTF_8); - if (!this.getKeyId().equals(keyId)) { - throw new IllegalArgumentException("Invalid provider info present in encrypted key."); - } - - byte[] encryptedKey = encryptedDataKey.getEncryptedDataKey(); - byte[] rawKey = keyProvider.decryptKey(encryptedKey); - return new DataKey<>(new SecretKeySpec(rawKey, cryptoAlgorithm), encryptedKey, keyId.getBytes(StandardCharsets.UTF_8), this); - } - - @Override - public void close() throws IOException { - keyProvider.close(); - } -} diff --git a/libs/encryption-sdk/src/main/java/org/opensearch/encryption/keyprovider/package-info.java b/libs/encryption-sdk/src/main/java/org/opensearch/encryption/keyprovider/package-info.java deleted file mode 100644 index 611b095a54250..0000000000000 --- a/libs/encryption-sdk/src/main/java/org/opensearch/encryption/keyprovider/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * Key provider package specific to encryption sdk - */ -package org.opensearch.encryption.keyprovider; diff --git a/libs/encryption-sdk/src/test/java/org/opensearch/encryption/CryptoManagerFactoryTests.java b/libs/encryption-sdk/src/test/java/org/opensearch/encryption/CryptoManagerFactoryTests.java deleted file mode 100644 index fb5c477232bc4..0000000000000 --- a/libs/encryption-sdk/src/test/java/org/opensearch/encryption/CryptoManagerFactoryTests.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.encryption; - -import org.opensearch.common.crypto.CryptoHandler; -import org.opensearch.common.crypto.MasterKeyProvider; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.test.OpenSearchTestCase; -import org.junit.Before; - -import java.util.Collections; - -import com.amazonaws.encryptionsdk.caching.CachingCryptoMaterialsManager; - -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class CryptoManagerFactoryTests extends OpenSearchTestCase { - - private CryptoManagerFactory cryptoManagerFactory; - - @Before - public void setup() { - cryptoManagerFactory = new CryptoManagerFactory( - "ALG_AES_256_GCM_HKDF_SHA512_COMMIT_KEY_ECDSA_P384", - TimeValue.timeValueDays(2), - 10 - ); - } - - public void testGetOrCreateCryptoManager() { - MasterKeyProvider mockKeyProvider = mock(MasterKeyProvider.class); - when(mockKeyProvider.getEncryptionContext()).thenReturn(Collections.emptyMap()); - - CryptoManager cryptoManager = cryptoManagerFactory.getOrCreateCryptoManager( - mockKeyProvider, - "keyProviderName", - "keyProviderType", - () -> {} - ); - - assertNotNull(cryptoManager); - } - - public void testCreateCryptoProvider() { - CachingCryptoMaterialsManager mockMaterialsManager = mock(CachingCryptoMaterialsManager.class); - MasterKeyProvider mockKeyProvider = mock(MasterKeyProvider.class); - when(mockKeyProvider.getEncryptionContext()).thenReturn(Collections.emptyMap()); - - CryptoHandler cryptoHandler = cryptoManagerFactory.createCryptoProvider( - "ALG_AES_256_GCM_HKDF_SHA512_COMMIT_KEY_ECDSA_P384", - mockMaterialsManager, - mockKeyProvider - ); - - assertNotNull(cryptoHandler); - } - - public void testCreateMaterialsManager() { - MasterKeyProvider mockKeyProvider = mock(MasterKeyProvider.class); - when(mockKeyProvider.getEncryptionContext()).thenReturn(Collections.emptyMap()); - - CachingCryptoMaterialsManager materialsManager = cryptoManagerFactory.createMaterialsManager( - mockKeyProvider, - "keyProviderName", - "ALG_AES_256_GCM_HKDF_SHA512_COMMIT_KEY_ECDSA_P384" - ); - - assertNotNull(materialsManager); - } - - public void testCreateCryptoManager() { - CryptoHandler mockCryptoHandler = mock(CryptoHandler.class); - CryptoManager cryptoManager = cryptoManagerFactory.createCryptoManager( - mockCryptoHandler, - "keyProviderName", - "keyProviderType", - null - ); - assertNotNull(cryptoManager); - } - - public void testUnsupportedAlgorithm() { - expectThrows(IllegalArgumentException.class, () -> new CryptoManagerFactory("Unsupported_algo", TimeValue.timeValueDays(2), 10)); - } -} diff --git a/libs/encryption-sdk/src/test/java/org/opensearch/encryption/MockKeyProvider.java b/libs/encryption-sdk/src/test/java/org/opensearch/encryption/MockKeyProvider.java deleted file mode 100644 index a5e74534ef32b..0000000000000 --- a/libs/encryption-sdk/src/test/java/org/opensearch/encryption/MockKeyProvider.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ -package org.opensearch.encryption; - -import javax.crypto.spec.SecretKeySpec; - -import java.io.BufferedReader; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.nio.charset.StandardCharsets; -import java.util.Collection; -import java.util.Map; - -import com.amazonaws.encryptionsdk.CryptoAlgorithm; -import com.amazonaws.encryptionsdk.DataKey; -import com.amazonaws.encryptionsdk.MasterKey; -import com.amazonaws.encryptionsdk.exception.AwsCryptoException; -import com.amazonaws.encryptionsdk.exception.UnsupportedProviderException; - -@SuppressWarnings({ "rawtypes", "unchecked" }) -public class MockKeyProvider extends MasterKey { - - private static final String keyId = "test-key-id"; - - public static byte[] loadFile(String file) { - byte[] content; - try { - InputStream in = MockKeyProvider.class.getResourceAsStream(file); - StringBuilder stringBuilder = new StringBuilder(); - BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8)); - for (String line; (line = bufferedReader.readLine()) != null;) { - stringBuilder.append(line); - } - content = stringBuilder.toString().getBytes(StandardCharsets.UTF_8); - } catch (Exception e) { - throw new IllegalArgumentException("File " + file + " cannot be read correctly."); - } - String text = new String(content, StandardCharsets.UTF_8); - - String[] byteValues = text.substring(1, text.length() - 1).split(","); - byte[] bytes = new byte[byteValues.length]; - - for (int i = 0, len = bytes.length; i < len; i++) { - bytes[i] = Byte.parseByte(byteValues[i].trim()); - } - - return bytes; - } - - private static final byte[] rawKey = loadFile("/raw_key"); - private static final byte[] encryptedKey = loadFile("/encrypted_key"); - - @Override - public String getProviderId() { - return "sample-provider-id"; - } - - @Override - public String getKeyId() { - return "Sample-key-id"; - } - - @Override - public DataKey encryptDataKey(CryptoAlgorithm algorithm, Map encryptionContext, DataKey dataKey) { - throw new UnsupportedOperationException("Multiple data-key encryption is not supported."); - } - - @Override - public DataKey generateDataKey(CryptoAlgorithm algorithm, Map encryptionContext) { - final SecretKeySpec key = new SecretKeySpec(rawKey, algorithm.getDataKeyAlgo()); - return new DataKey(key, encryptedKey, getKeyId().getBytes(StandardCharsets.UTF_8), this); - } - - @Override - public DataKey decryptDataKey(CryptoAlgorithm algorithm, Collection collection, Map encryptionContext) - throws UnsupportedProviderException, AwsCryptoException { - return new DataKey<>( - new SecretKeySpec(rawKey, algorithm.getDataKeyAlgo()), - encryptedKey, - keyId.getBytes(StandardCharsets.UTF_8), - this - ); - } - - static class DataKeyPair { - private final byte[] rawKey; - private final byte[] encryptedKey; - - public DataKeyPair(byte[] rawKey, byte[] encryptedKey) { - this.rawKey = rawKey; - this.encryptedKey = encryptedKey; - } - - public byte[] getRawKey() { - return this.rawKey; - } - - public byte[] getEncryptedKey() { - return this.encryptedKey; - } - } - -} diff --git a/libs/encryption-sdk/src/test/java/org/opensearch/encryption/NoOpCryptoHandlerTests.java b/libs/encryption-sdk/src/test/java/org/opensearch/encryption/NoOpCryptoHandlerTests.java deleted file mode 100644 index 5e3836fd10988..0000000000000 --- a/libs/encryption-sdk/src/test/java/org/opensearch/encryption/NoOpCryptoHandlerTests.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.encryption; - -import org.opensearch.common.crypto.DecryptedRangedStreamProvider; -import org.opensearch.common.crypto.EncryptedHeaderContentSupplier; -import org.opensearch.common.io.InputStreamContainer; -import org.opensearch.test.OpenSearchTestCase; - -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.InputStream; - -public class NoOpCryptoHandlerTests extends OpenSearchTestCase { - - public void testInitEncryptionMetadata() { - NoOpCryptoHandler cryptoProvider = new NoOpCryptoHandler(); - Object encryptionMetadata = cryptoProvider.initEncryptionMetadata(); - assertNotNull(encryptionMetadata); - } - - public void testAdjustContentSizeForPartialEncryption() { - NoOpCryptoHandler cryptoProvider = new NoOpCryptoHandler(); - long originalSize = 1000L; - long adjustedSize = cryptoProvider.adjustContentSizeForPartialEncryption(new Object(), originalSize); - assertEquals(originalSize, adjustedSize); - } - - public void testEstimateEncryptedLengthOfEntireContent() { - NoOpCryptoHandler cryptoProvider = new NoOpCryptoHandler(); - long originalSize = 2000L; - long estimatedSize = cryptoProvider.estimateEncryptedLengthOfEntireContent(new Object(), originalSize); - assertEquals(originalSize, estimatedSize); - } - - public void testEstimateDecryptedLength() { - NoOpCryptoHandler cryptoProvider = new NoOpCryptoHandler(); - long originalSize = 1500L; - long estimatedSize = cryptoProvider.estimateDecryptedLength(new Object(), originalSize); - assertEquals(originalSize, estimatedSize); - } - - public void testCreateEncryptingStream() { - NoOpCryptoHandler cryptoProvider = new NoOpCryptoHandler(); - InputStreamContainer inputStream = randomStream(); - InputStreamContainer encryptedStream = cryptoProvider.createEncryptingStream(new Object(), inputStream); - assertEquals(inputStream, encryptedStream); - } - - public void testCreateEncryptingStreamOfPart() { - NoOpCryptoHandler cryptoProvider = new NoOpCryptoHandler(); - InputStreamContainer inputStream = randomStream(); - InputStreamContainer encryptedStream = cryptoProvider.createEncryptingStreamOfPart(new Object(), inputStream, 2, 1); - assertEquals(inputStream, encryptedStream); - } - - private InputStreamContainer randomStream() { - byte[] bytes = randomAlphaOfLength(10).getBytes(); - ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(bytes); - int offset = randomIntBetween(0, bytes.length - 1); - return new InputStreamContainer(byteArrayInputStream, bytes.length, offset); - } - - public void testLoadEncryptionMetadata() throws IOException { - NoOpCryptoHandler cryptoProvider = new NoOpCryptoHandler(); - EncryptedHeaderContentSupplier supplier = (start, length) -> { throw new UnsupportedOperationException("Not implemented"); }; - Object encryptionMetadata = cryptoProvider.loadEncryptionMetadata(supplier); - assertNotNull(encryptionMetadata); - } - - public void testCreateDecryptingStream() { - NoOpCryptoHandler cryptoProvider = new NoOpCryptoHandler(); - InputStream encryptedStream = randomStream().getInputStream(); - InputStream decryptedStream = cryptoProvider.createDecryptingStream(encryptedStream); - assertEquals(encryptedStream, decryptedStream); - } - - public void testCreateDecryptingStreamOfRange() { - NoOpCryptoHandler cryptoProvider = new NoOpCryptoHandler(); - Object cryptoContext = new Object(); - long startPos = 0L; - long endPos = 100L; - DecryptedRangedStreamProvider streamProvider = cryptoProvider.createDecryptingStreamOfRange(cryptoContext, startPos, endPos); - assertNotNull(streamProvider); - InputStream stream = randomStream().getInputStream(); - InputStream decryptedStream = streamProvider.getDecryptedStreamProvider().apply(stream); // Replace with your encrypted input stream - assertEquals(stream, decryptedStream); - } -} diff --git a/libs/encryption-sdk/src/test/java/org/opensearch/encryption/TrimmingStreamTests.java b/libs/encryption-sdk/src/test/java/org/opensearch/encryption/TrimmingStreamTests.java deleted file mode 100644 index f0d957d81e1e1..0000000000000 --- a/libs/encryption-sdk/src/test/java/org/opensearch/encryption/TrimmingStreamTests.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.encryption; - -import org.opensearch.test.OpenSearchTestCase; - -import java.io.ByteArrayInputStream; -import java.io.IOException; - -public class TrimmingStreamTests extends OpenSearchTestCase { - - static class ReadCountInputStreamTest extends ByteArrayInputStream { - - public ReadCountInputStreamTest(byte[] buf) { - super(buf); - } - - public int getPos() { - return pos; - } - } - - public void testReadInRange() throws IOException { - byte[] data = generateRandomData(100); - ReadCountInputStreamTest input = new ReadCountInputStreamTest(data); - - long sourceStart = generateRandomValue(0, 80); - long sourceEnd = generateRandomValue(sourceStart, 99); - long targetStart = generateRandomValue(sourceStart, sourceEnd); - long targetEnd = generateRandomValue(targetStart, sourceEnd); - - TrimmingStream trimmingStream = new TrimmingStream(sourceStart, sourceEnd, targetStart, targetEnd, input); - - byte[] result = new byte[(int) (sourceEnd - sourceStart + 1)]; - int bytesRead = trimmingStream.read(result, 0, result.length); - - long expectedBytesRead = targetEnd - targetStart + 1; - assertEquals(expectedBytesRead, bytesRead); - assertEquals(sourceEnd - sourceStart + 1, input.getPos()); - } - - public void testReadOutsideRange() throws IOException { - byte[] data = generateRandomData(100); - ReadCountInputStreamTest input = new ReadCountInputStreamTest(data); - - long sourceStart = generateRandomValue(0, 80); - long sourceEnd = generateRandomValue(sourceStart, 99); - long targetStart = generateRandomValue(sourceStart, sourceEnd); - long targetEnd = generateRandomValue(targetStart, sourceEnd); - - TrimmingStream trimmingStream = new TrimmingStream(sourceStart, sourceEnd, targetStart, targetEnd, input); - - byte[] result = new byte[(int) (targetEnd - targetStart + 1)]; - int bytesRead = trimmingStream.read(result, 0, result.length); - - long expectedBytesRead = targetEnd - targetStart + 1; - assertEquals(expectedBytesRead, bytesRead); - assertEquals(sourceEnd - sourceStart + 1, input.getPos()); - - // Try to read more bytes, should return -1 (end of stream) - int additionalBytesRead = trimmingStream.read(result, 0, 50); - assertEquals(-1, additionalBytesRead); - assertEquals(sourceEnd - sourceStart + 1, input.getPos()); - } - - public void testSingleByteReadInRange() throws IOException { - byte[] data = generateRandomData(100); - ReadCountInputStreamTest input = new ReadCountInputStreamTest(data); - - long sourceStart = generateRandomValue(0, 80); - long sourceEnd = generateRandomValue(sourceStart, 99); - long targetStart = generateRandomValue(sourceStart, sourceEnd); - long targetEnd = generateRandomValue(targetStart, sourceEnd); - - TrimmingStream trimmingStream = new TrimmingStream(sourceStart, sourceEnd, targetStart, targetEnd, input); - - int bytesRead = 0; - int value; - while ((value = trimmingStream.read()) != -1) { - bytesRead++; - } - - long expectedBytesRead = targetEnd - targetStart + 1; - assertEquals(expectedBytesRead, bytesRead); - assertEquals(sourceEnd - sourceStart + 1, input.getPos()); - } - - public void testInvalidInputs() { - assertThrows(IllegalArgumentException.class, () -> new TrimmingStream(-10, 60, 20, 40, new ByteArrayInputStream(new byte[100]))); - assertThrows(IllegalArgumentException.class, () -> new TrimmingStream(10, 60, 40, 20, new ByteArrayInputStream(new byte[100]))); - } - - public void testSourceSameAsTarget() throws IOException { - byte[] data = generateRandomData(100); - ReadCountInputStreamTest input = new ReadCountInputStreamTest(data); - - long sourceStart = generateRandomValue(0, 80); - long sourceEnd = generateRandomValue(sourceStart, 99); - TrimmingStream trimmingStream = new TrimmingStream(sourceStart, sourceEnd, sourceStart, sourceEnd, input); - - byte[] result = new byte[(int) (sourceEnd - sourceStart + 1)]; - int bytesRead = trimmingStream.read(result, 0, result.length); - - assertEquals(sourceEnd - sourceStart + 1, bytesRead); - assertEquals(sourceEnd - sourceStart + 1, input.getPos()); - } - - private byte[] generateRandomData(int length) { - byte[] data = new byte[length]; - for (int i = 0; i < length; i++) { - data[i] = (byte) (Math.random() * 256 - 128); - } - return data; - } - - private long generateRandomValue(long min, long max) { - return min + (long) (Math.random() * (max - min + 1)); - } -} diff --git a/libs/encryption-sdk/src/test/resources/encrypted_key b/libs/encryption-sdk/src/test/resources/encrypted_key deleted file mode 100644 index da4e503581585..0000000000000 --- a/libs/encryption-sdk/src/test/resources/encrypted_key +++ /dev/null @@ -1 +0,0 @@ -[1, 2, 1, 0, 120, -96, 18, 71, -6, 90, -126, -39, -16, 94, -113, -46, 71, 85, 35, -66, -117, -108, -59, 88, -81, 64, -118, -74, -102, 50, 103, 16, -76, 23, 19, 20, 67, 1, -11, 55, -3, 32, -89, -16, 1, -40, 59, 76, -2, -61, -49, -97, 34, 14, 0, 0, 0, 126, 48, 124, 6, 9, 42, -122, 72, -122, -9, 13, 1, 7, 6, -96, 111, 48, 109, 2, 1, 0, 48, 104, 6, 9, 42, -122, 72, -122, -9, 13, 1, 7, 1, 48, 30, 6, 9, 96, -122, 72, 1, 101, 3, 4, 1, 46, 48, 17, 4, 12, -63, 67, 37, -51, 85, 75, 7, -64, -78, 52, 102, 26, 2, 1, 16, -128, 59, -98, -123, 100, 125, -37, 102, -87, -71, 74, 68, 54, 56, -32, 77, 127, -86, -125, -17, 45, 75, -98, 54, -52, -15, -56, -47, -88, -12, -128, 113, -5, -18, -14, 127, 114, -9, 47, -112, -38, 39, 2, -89, 117, 64, -2, 47, -81, 52, 27, -118, 37, 79, -64, 58, -3, 10, -115, 122, 124] \ No newline at end of file diff --git a/libs/encryption-sdk/src/test/resources/raw_content_for_crypto_test b/libs/encryption-sdk/src/test/resources/raw_content_for_crypto_test deleted file mode 100644 index c93b6161ac8d6..0000000000000 --- a/libs/encryption-sdk/src/test/resources/raw_content_for_crypto_test +++ /dev/null @@ -1,25 +0,0 @@ -ewogICJmaWxlSW5mb3MiOiBbCiAgICB7CiAgICAgICJuYW1lIjogIl80LmZubSIsCiAgICAgICJyZW1vdGVfc -GF0aCI6ICIyYzYwMzNmNmZlZTY0NTY1YTU3YzQzZWVmZThmY2QzMS9kdW1teS1jb2xsZWN0aW9uMi9kMDRmYz -AyZi0wMDQ0LTRhYmYtYjgzMy0xMGE0YTA5M2VkNTcvMC8wL2luZGljZXMvMSIsCiAgICAgICJzaXplIjogOTQz -CiAgICB9LAogICAgewogICAgICAibmFtZSI6ICJfMl9MdWNlbmU4MF8wLmR2ZCIsCiAgICAgICJyZW1vdGVfcGF -0aCI6ICIyYzYwMzNmNmZlZTY0NTY1YTU3YzQzZWVmZThmY2QzMS9kdW1teS1jb2xsZWN0aW9uMi9kMDRmYzAyZi0wMDQ0LTRhYmYtYjg -zMy0xMGE0YTA5M2VkNTcvMC8wL2luZGljZXMvMSIsCiAgICAgICJzaXplIjogMzU1CiAgICB9CiAgXQp9 -ewogICJja3BfZmlsZSI6IHsKICAgICJuYW1lIjogInRyYW5zbG9nLTguY2twIiwKICAgICJyZW1vdGVfcGF0aCI6ICIyYz -YwMzNmNmZlZTY0NTY1YTU3YzQzZWVmZThmY2QzMS9kdW1teS1jb2xsZWN0aW9uMi9kMDRmYzAyZi0wMDQ0LTRhYmYtYjgzMy0 -xMGE0YTA5M2VkNTcvMC8wL3RyYW5zbG9nLzEiLAogICAgInNpemUiOiAwCiAgfSwKICAidGxvZ192ZXJzaW9uIjogewogICAgIjg -iOiAiMmM2MDMzZjZmZWU2NDU2NWE1N2M0M2VlZmU4ZmNkMzEvZHVtbXktY29sbGVjdGlvbjIvZDA0ZmMwMmYtMDA0NC00YWJmLWI4MzMtMT -BhNGEwOTNlZDU3LzAvMC90cmFuc2xvZy8xIgogIH0KfQ== -ewogICJmaWxlSW5mb3MiOiBbCiAgICB7CiAgICAgICJuYW1lIjogIl80LmZubSIsCiAgICAgICJyZW1vdGVfcGF0aCI6ICIyYzYwMzNmNmZl -ZTY0NTY1YTU3YzQzZWVmZThmY2QzMS9kdW1teS1jb2xsZWN0aW9uMi9kMDRmYzAyZi0wMDQ0LTRhYmYtYjgzMy0xMGE0YTA5M2VkNTcvMC8wL2luZG -ljZXMvMSIsCiAgICAgICJzaXplIjogOTQzCiAgICB9LAogICAgewogICAgICAibmFtZSI6ICJfNC5mZHQiLAogICAgICAicmVtb3RlX3BhdGgiOiAi -MmM2MDMzZjZmZWU2NDU2NWE1N2M0M2VlZmU4ZmNkMzEvZHVtbXktY29sbGVjdGlvbjIvZDA0ZmMwMmYtMDA0NC00YWJmLWI4MzMtMTBhNGEwOTNlZDU3 -LzAvMC9pbmRpY2VzLzEiLAogICAgICAic2l6ZSI6IDQ1MTMKICAgIH0sCiAgICB7CiAgICAgICJuYW1lIjogInNlZ21lbnRzX2MiLAogICAgICAicmVtb3R -lX3BhdGgiOiAiMmM2MDMzZjZmZWU2NDU2NWE1N2M0M2VlZmU4ZmNkMzEvZHVtbXktY29sbGVjdGlvbjIvZDA0ZmMwMmYtMDA0NC00YWJmLWI4MzM -tMTBhNGEwOTNlZDU3LzAvMC9pbmRpY2VzLzEiLAogICAgICAic2l6ZSI6IDM1NQogICAgfQogIF0KfQ== -ewogICJja3BfZmlsZSI6IHsKICAgICJuYW1lIjogInRyYW5zbG9nLTcuY2twIiwKICAgICJyZW1vdGVfcGF0aCI6ICIyYzYwMzNmNmZlZ -TY0NTY1YTU3YzQzZWVmZThmY2QzMS9kdW1teS1jb2xsZWN0aW9uMi9kMDRmYzAyZi0wMDQ0LTRhYmYtYjgzMy0xMGE0YTA5M2VkNTcvMC8wL3RyY -W5zbG9nLzEiLAogICAgInNpemUiOiAwCiAgfSwKICAidGxvZ192ZXJzaW9uIjogewogICAgIjYiOiAiMmM2MDMzZjZmZWU2NDU2NWE1N2M0M2VlZ -mU4ZmNkMzEvZHVtbXktY29sbGVjdGlvbjIvZDA0ZmMwMmYtMDA0NC00YWJmLWI4MzMtMTBhNGEwOTNlZDU3LzAvMC90cmFuc2xvZy8xIiwKICAgICI3Ijo -gIjJjNjAzM2Y2ZmVlNjQ1NjVhNTdjNDNlZWZlOGZjZDMxL2R1bW15LWNvbGxlY3Rpb24yL2QwNGZjMDJmLTAwNDQtNGFiZi1iODMzLTEwYTRhMDkzZW -Q1Ny8wLzAvdHJhbnNsb2cvMSIKICB9Cn0= - diff --git a/libs/encryption-sdk/src/test/resources/raw_key b/libs/encryption-sdk/src/test/resources/raw_key deleted file mode 100644 index 3c4f8b54cbb6a..0000000000000 --- a/libs/encryption-sdk/src/test/resources/raw_key +++ /dev/null @@ -1 +0,0 @@ -[57, 59, -48, -8, -44, 9, -78, 16, 106, -80, 66, -41, 66, 43, -88, 7, 47, -23, -16, -43, 99, 104, -8, -74, 46, -117, -111, -41, -39, -69, 5, 117] \ No newline at end of file diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index cad7d67f3ef84..ea517ca53c000 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -69,6 +69,7 @@ dependencies { testImplementation project(':modules:transport-netty4') // for parent/child testing testImplementation project(':modules:parent-join') + testImplementation project(':plugins:custom-codecs') } restResources { @@ -95,4 +96,5 @@ forbiddenPatterns { tasks.named("bundlePlugin").configure { dependsOn("copyParentJoinMetadata") dependsOn("copyTransportNetty4Metadata") + dependsOn("copyCustomCodecsMetadata") } diff --git a/modules/reindex/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecReindexIT.java b/modules/reindex/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecReindexIT.java index 7c2fe8d99c330..3f5df7cf57897 100644 --- a/modules/reindex/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecReindexIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecReindexIT.java @@ -15,14 +15,19 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.index.codec.customcodecs.CustomCodecPlugin; import org.opensearch.index.engine.Segment; import org.opensearch.index.reindex.BulkByScrollResponse; import org.opensearch.index.reindex.ReindexAction; +import org.opensearch.index.reindex.ReindexModulePlugin; import org.opensearch.index.reindex.ReindexRequestBuilder; import org.opensearch.index.reindex.ReindexTestCase; +import org.opensearch.plugins.Plugin; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; +import java.util.List; import java.util.Map; import java.util.UUID; import java.util.concurrent.ExecutionException; @@ -40,6 +45,11 @@ public class MultiCodecReindexIT extends ReindexTestCase { + @Override + protected Collection> nodePlugins() { + return List.of(CustomCodecPlugin.class, ReindexModulePlugin.class); + } + public void testReindexingMultipleCodecs() throws InterruptedException, ExecutionException { internalCluster().ensureAtLeastNumDataNodes(1); Map codecMap = Map.of( @@ -125,7 +135,7 @@ private void assertReindexingWithMultipleCodecs(String destCodec, String destCod } private void useCodec(String index, String codec) throws ExecutionException, InterruptedException { - assertAcked(client().admin().indices().prepareClose(index)); + assertAcked(client().admin().indices().prepareClose(index).setWaitForActiveShards(1)); assertAcked( client().admin() @@ -134,7 +144,7 @@ private void useCodec(String index, String codec) throws ExecutionException, Int .get() ); - assertAcked(client().admin().indices().prepareOpen(index)); + assertAcked(client().admin().indices().prepareOpen(index).setWaitForActiveShards(1)); } private void flushAndRefreshIndex(String index) { diff --git a/plugins/crypto-kms/build.gradle b/plugins/crypto-kms/build.gradle index fa76118a43d92..c4a8609b6df48 100644 --- a/plugins/crypto-kms/build.gradle +++ b/plugins/crypto-kms/build.gradle @@ -26,8 +26,6 @@ ext { } dependencies { - api project(':libs:opensearch-encryption-sdk') - api "software.amazon.awssdk:sdk-core:${versions.aws}" api "software.amazon.awssdk:aws-core:${versions.aws}" api "software.amazon.awssdk:utils:${versions.aws}" @@ -56,6 +54,10 @@ dependencies { api "org.reactivestreams:reactive-streams:${versions.reactivestreams}" } +//testClusters.all { +// module ':modules:crypto' +//} + tasks.named("dependencyLicenses").configure { mapping from: /jackson-.*/, to: 'jackson' mapping from: /jaxb-.*/, to: 'jaxb' diff --git a/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsService.java b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsService.java index 87f6cfbb254c6..108c88bd3bf80 100644 --- a/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsService.java +++ b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsService.java @@ -242,12 +242,13 @@ static void setDefaultAwsProfilePath() { } public MasterKeyProvider createMasterKeyProvider(CryptoMetadata cryptoMetadata) { - String keyArn = KEY_ARN_SETTING.get(cryptoMetadata.settings()); + Settings cryptoSettings = Settings.builder().put(cryptoMetadata.settings()).normalizePrefix("kms.").build(); + String keyArn = KEY_ARN_SETTING.get(cryptoSettings); if (!Strings.hasText(keyArn)) { throw new IllegalArgumentException("Missing key_arn setting"); } - String kmsEncCtx = ENC_CTX_SETTING.get(cryptoMetadata.settings()); + String kmsEncCtx = ENC_CTX_SETTING.get(cryptoSettings); Map encCtx; if (Strings.hasText(kmsEncCtx)) { try { diff --git a/plugins/custom-codecs/build.gradle b/plugins/custom-codecs/build.gradle new file mode 100644 index 0000000000000..253822e88b817 --- /dev/null +++ b/plugins/custom-codecs/build.gradle @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +apply plugin: 'opensearch.opensearchplugin' +apply plugin: 'opensearch.internal-cluster-test' + +opensearchplugin { + name 'custom-codecs' + description 'A plugin that implements custom compression codecs.' + classname 'org.opensearch.index.codec.customcodecs.CustomCodecPlugin' + licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile rootProject.file('NOTICE.txt') +} + +dependencies { + api "com.github.luben:zstd-jni:1.5.5-5" +} + +testingConventions.enabled = false; diff --git a/server/src/internalClusterTest/java/org/opensearch/index/codec/CodecCompressionLevelIT.java b/plugins/custom-codecs/src/internalClusterTest/java/org/opensearch/index/codec/CodecCompressionLevelIT.java similarity index 88% rename from server/src/internalClusterTest/java/org/opensearch/index/codec/CodecCompressionLevelIT.java rename to plugins/custom-codecs/src/internalClusterTest/java/org/opensearch/index/codec/CodecCompressionLevelIT.java index 5f3e53f1454fc..7810b5810fffb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/codec/CodecCompressionLevelIT.java +++ b/plugins/custom-codecs/src/internalClusterTest/java/org/opensearch/index/codec/CodecCompressionLevelIT.java @@ -12,14 +12,24 @@ import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.index.codec.customcodecs.CustomCodecPlugin; +import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; +import java.util.Collection; +import java.util.Collections; import java.util.concurrent.ExecutionException; +import static org.opensearch.index.codec.customcodecs.CustomCodecService.ZSTD_CODEC; +import static org.opensearch.index.codec.customcodecs.CustomCodecService.ZSTD_NO_DICT_CODEC; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) public class CodecCompressionLevelIT extends OpenSearchIntegTestCase { + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(CustomCodecPlugin.class); + } public void testLuceneCodecsCreateIndexWithCompressionLevel() { @@ -62,7 +72,7 @@ public void testZStandardCodecsCreateIndexWithCompressionLevel() { Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("index.codec", randomFrom(CodecService.ZSTD_CODEC, CodecService.ZSTD_NO_DICT_CODEC)) + .put("index.codec", randomFrom(ZSTD_CODEC, ZSTD_NO_DICT_CODEC)) .put("index.codec.compression_level", randomIntBetween(1, 6)) .build() ); @@ -81,13 +91,13 @@ public void testZStandardToLuceneCodecsWithCompressionLevel() throws ExecutionEx Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("index.codec", randomFrom(CodecService.ZSTD_CODEC, CodecService.ZSTD_NO_DICT_CODEC)) + .put("index.codec", randomFrom(ZSTD_CODEC, ZSTD_NO_DICT_CODEC)) .put("index.codec.compression_level", randomIntBetween(1, 6)) .build() ); ensureGreen(index); - assertAcked(client().admin().indices().prepareClose(index)); + assertAcked(client().admin().indices().prepareClose(index).setWaitForActiveShards(1)); Throwable executionException = expectThrows( ExecutionException.class, @@ -118,7 +128,7 @@ public void testZStandardToLuceneCodecsWithCompressionLevel() throws ExecutionEx .get() ); - assertAcked(client().admin().indices().prepareOpen(index)); + assertAcked(client().admin().indices().prepareOpen(index).setWaitForActiveShards(1)); ensureGreen(index); } @@ -138,7 +148,7 @@ public void testLuceneToZStandardCodecsWithCompressionLevel() throws ExecutionEx ); ensureGreen(index); - assertAcked(client().admin().indices().prepareClose(index)); + assertAcked(client().admin().indices().prepareClose(index).setWaitForActiveShards(1)); Throwable executionException = expectThrows( ExecutionException.class, @@ -164,14 +174,14 @@ public void testLuceneToZStandardCodecsWithCompressionLevel() throws ExecutionEx .updateSettings( new UpdateSettingsRequest(index).settings( Settings.builder() - .put("index.codec", randomFrom(CodecService.ZSTD_CODEC, CodecService.ZSTD_NO_DICT_CODEC)) + .put("index.codec", randomFrom(ZSTD_CODEC, ZSTD_NO_DICT_CODEC)) .put("index.codec.compression_level", randomIntBetween(1, 6)) ) ) .get() ); - assertAcked(client().admin().indices().prepareOpen(index)); + assertAcked(client().admin().indices().prepareOpen(index).setWaitForActiveShards(1)); ensureGreen(index); } diff --git a/server/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecMergeIT.java b/plugins/custom-codecs/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecMergeIT.java similarity index 94% rename from server/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecMergeIT.java rename to plugins/custom-codecs/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecMergeIT.java index 23d5cc3b35486..bb508282e1952 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecMergeIT.java +++ b/plugins/custom-codecs/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecMergeIT.java @@ -15,11 +15,15 @@ import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.index.codec.customcodecs.CustomCodecPlugin; import org.opensearch.index.engine.Segment; +import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.UUID; @@ -40,6 +44,11 @@ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) public class MultiCodecMergeIT extends OpenSearchIntegTestCase { + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(CustomCodecPlugin.class); + } + public void testForceMergeMultipleCodecs() throws ExecutionException, InterruptedException { Map codecMap = Map.of( @@ -109,7 +118,7 @@ private void forceMergeMultipleCodecs(String finalCodec, String finalCodecMode, } private void useCodec(String index, String codec) throws ExecutionException, InterruptedException { - assertAcked(client().admin().indices().prepareClose(index)); + assertAcked(client().admin().indices().prepareClose(index).setWaitForActiveShards(1)); assertAcked( client().admin() @@ -118,7 +127,7 @@ private void useCodec(String index, String codec) throws ExecutionException, Int .get() ); - assertAcked(client().admin().indices().prepareOpen(index)); + assertAcked(client().admin().indices().prepareOpen(index).setWaitForActiveShards(1)); } private void ingestDocs(String index) throws InterruptedException { diff --git a/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecPlugin.java b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecPlugin.java new file mode 100644 index 0000000000000..91a13a1d924a2 --- /dev/null +++ b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecPlugin.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.customcodecs; + +import org.opensearch.index.IndexSettings; +import org.opensearch.index.codec.CodecServiceFactory; +import org.opensearch.index.engine.EngineConfig; +import org.opensearch.plugins.EnginePlugin; +import org.opensearch.plugins.Plugin; + +import java.util.Optional; + +/** + * A plugin that implements custom codecs. Supports these codecs: + *
    + *
  • ZSTD + *
  • ZSTDNODICT + *
+ * + * @opensearch.internal + */ +public final class CustomCodecPlugin extends Plugin implements EnginePlugin { + + /** + * Creates a new instance + */ + public CustomCodecPlugin() {} + + /** + * @param indexSettings is the default indexSettings + * @return the engine factory + */ + @Override + public Optional getCustomCodecServiceFactory(final IndexSettings indexSettings) { + String codecName = indexSettings.getValue(EngineConfig.INDEX_CODEC_SETTING); + if (codecName.equals(CustomCodecService.ZSTD_NO_DICT_CODEC) || codecName.equals(CustomCodecService.ZSTD_CODEC)) { + return Optional.of(new CustomCodecServiceFactory()); + } + return Optional.empty(); + } +} diff --git a/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecService.java b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecService.java new file mode 100644 index 0000000000000..de0eb2b3286d3 --- /dev/null +++ b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecService.java @@ -0,0 +1,72 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.customcodecs; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.codecs.Codec; +import org.opensearch.common.collect.MapBuilder; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.codec.CodecService; +import org.opensearch.index.mapper.MapperService; + +import java.util.Arrays; +import java.util.Map; +import java.util.stream.Stream; + +import static org.opensearch.index.engine.EngineConfig.INDEX_CODEC_COMPRESSION_LEVEL_SETTING; + +/** + * CustomCodecService provides ZSTD and ZSTD_NO_DICT compression codecs. + */ +public class CustomCodecService extends CodecService { + private final Map codecs; + /** + * ZStandard codec + */ + public static final String ZSTD_CODEC = "zstd"; + /** + * ZStandard without dictionary codec + */ + public static final String ZSTD_NO_DICT_CODEC = "zstd_no_dict"; + + /** + * Creates a new CustomCodecService. + * + * @param mapperService The mapper service. + * @param indexSettings The index settings. + * @param logger The logger. + */ + public CustomCodecService(MapperService mapperService, IndexSettings indexSettings, Logger logger) { + super(mapperService, indexSettings, logger); + int compressionLevel = indexSettings.getValue(INDEX_CODEC_COMPRESSION_LEVEL_SETTING); + final MapBuilder codecs = MapBuilder.newMapBuilder(); + if (mapperService == null) { + codecs.put(ZSTD_CODEC, new ZstdCodec(compressionLevel)); + codecs.put(ZSTD_NO_DICT_CODEC, new ZstdNoDictCodec(compressionLevel)); + } else { + codecs.put(ZSTD_CODEC, new ZstdCodec(mapperService, logger, compressionLevel)); + codecs.put(ZSTD_NO_DICT_CODEC, new ZstdNoDictCodec(mapperService, logger, compressionLevel)); + } + this.codecs = codecs.immutableMap(); + } + + @Override + public Codec codec(String name) { + Codec codec = codecs.get(name); + if (codec == null) { + return super.codec(name); + } + return codec; + } + + @Override + public String[] availableCodecs() { + return Stream.concat(Arrays.stream(super.availableCodecs()), codecs.keySet().stream()).toArray(String[]::new); + } +} diff --git a/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecServiceFactory.java b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecServiceFactory.java new file mode 100644 index 0000000000000..d634616162684 --- /dev/null +++ b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecServiceFactory.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.customcodecs; + +import org.opensearch.index.codec.CodecService; +import org.opensearch.index.codec.CodecServiceConfig; +import org.opensearch.index.codec.CodecServiceFactory; + +/** + * A factory for creating new {@link CodecService} instance + */ +public class CustomCodecServiceFactory implements CodecServiceFactory { + + /** Creates a new instance. */ + public CustomCodecServiceFactory() {} + + @Override + public CodecService createCodecService(CodecServiceConfig config) { + return new CustomCodecService(config.getMapperService(), config.getIndexSettings(), config.getLogger()); + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java similarity index 54% rename from server/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java rename to plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java index 8aa422a47a073..89acc980abd2f 100644 --- a/server/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java +++ b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java @@ -15,6 +15,9 @@ import org.opensearch.index.codec.PerFieldMappingPostingFormatCodec; import org.opensearch.index.mapper.MapperService; +import java.util.Collections; +import java.util.Set; + /** * * Extends {@link FilterCodec} to reuse the functionality of Lucene Codec. @@ -23,12 +26,48 @@ * @opensearch.internal */ public abstract class Lucene95CustomCodec extends FilterCodec { + + /** Default compression level used for compression */ public static final int DEFAULT_COMPRESSION_LEVEL = 3; /** Each mode represents a compression algorithm. */ public enum Mode { - ZSTD, - ZSTD_NO_DICT + /** + * ZStandard mode with dictionary + */ + ZSTD("ZSTD", Set.of("zstd")), + /** + * ZStandard mode without dictionary + */ + ZSTD_NO_DICT("ZSTDNODICT", Set.of("zstd_no_dict")), + /** + * Deprecated ZStandard mode, added for backward compatibility to support indices created in 2.9.0 where + * both ZSTD and ZSTD_NO_DICT used Lucene95CustomCodec underneath. This should not be used to + * create new indices. + */ + ZSTD_DEPRECATED("Lucene95CustomCodec", Collections.emptySet()); + + private final String codec; + private final Set aliases; + + Mode(String codec, Set aliases) { + this.codec = codec; + this.aliases = aliases; + } + + /** + * Returns the Codec that is registered with Lucene + */ + public String getCodec() { + return codec; + } + + /** + * Returns the aliases of the Codec + */ + public Set getAliases() { + return aliases; + } } private final StoredFieldsFormat storedFieldsFormat; @@ -51,12 +90,22 @@ public Lucene95CustomCodec(Mode mode) { * @param compressionLevel The compression level. */ public Lucene95CustomCodec(Mode mode, int compressionLevel) { - super("Lucene95CustomCodec", new Lucene95Codec()); + super(mode.getCodec(), new Lucene95Codec()); this.storedFieldsFormat = new Lucene95CustomStoredFieldsFormat(mode, compressionLevel); } + /** + * Creates a new compression codec with the given compression level. We use + * lowercase letters when registering the codec so that we remain consistent with + * the other compression codecs: default, lucene_default, and best_compression. + * + * @param mode The compression codec (ZSTD or ZSTDNODICT). + * @param compressionLevel The compression level. + * @param mapperService The mapper service. + * @param logger The logger. + */ public Lucene95CustomCodec(Mode mode, int compressionLevel, MapperService mapperService, Logger logger) { - super("Lucene95CustomCodec", new PerFieldMappingPostingFormatCodec(Lucene95Codec.Mode.BEST_SPEED, mapperService, logger)); + super(mode.getCodec(), new PerFieldMappingPostingFormatCodec(Lucene95Codec.Mode.BEST_SPEED, mapperService, logger)); this.storedFieldsFormat = new Lucene95CustomStoredFieldsFormat(mode, compressionLevel); } diff --git a/server/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java similarity index 98% rename from server/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java rename to plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java index 2816e2907a5f6..79d97035089ab 100644 --- a/server/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java +++ b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java @@ -101,6 +101,7 @@ public StoredFieldsWriter fieldsWriter(Directory directory, SegmentInfo si, IOCo StoredFieldsFormat impl(Lucene95CustomCodec.Mode mode) { switch (mode) { case ZSTD: + case ZSTD_DEPRECATED: return new Lucene90CompressingStoredFieldsFormat( "CustomStoredFieldsZstd", zstdCompressionMode, @@ -125,6 +126,9 @@ Lucene95CustomCodec.Mode getMode() { return mode; } + /** + * Returns the compression level. + */ public int getCompressionLevel() { return compressionLevel; } diff --git a/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java similarity index 79% rename from server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java rename to plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java index 042f7eaa29e53..a3e3a34a5d258 100644 --- a/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java +++ b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java @@ -10,14 +10,17 @@ import org.apache.logging.log4j.Logger; import org.opensearch.common.settings.Setting; +import org.opensearch.index.codec.CodecAliases; import org.opensearch.index.codec.CodecSettings; import org.opensearch.index.engine.EngineConfig; import org.opensearch.index.mapper.MapperService; +import java.util.Set; + /** * ZstdCodec provides ZSTD compressor using the zstd-jni library. */ -public class ZstdCodec extends Lucene95CustomCodec implements CodecSettings { +public class ZstdCodec extends Lucene95CustomCodec implements CodecSettings, CodecAliases { /** * Creates a new ZstdCodec instance with the default compression level. @@ -35,6 +38,13 @@ public ZstdCodec(int compressionLevel) { super(Mode.ZSTD, compressionLevel); } + /** + * Creates a new ZstdCodec instance. + * + * @param mapperService The mapper service. + * @param logger The logger. + * @param compressionLevel The compression level. + */ public ZstdCodec(MapperService mapperService, Logger logger, int compressionLevel) { super(Mode.ZSTD, compressionLevel, mapperService, logger); } @@ -49,4 +59,9 @@ public String toString() { public boolean supports(Setting setting) { return setting.equals(EngineConfig.INDEX_CODEC_COMPRESSION_LEVEL_SETTING); } + + @Override + public Set aliases() { + return Mode.ZSTD.getAliases(); + } } diff --git a/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCompressionMode.java b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCompressionMode.java similarity index 100% rename from server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCompressionMode.java rename to plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCompressionMode.java diff --git a/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdDeprecatedCodec.java b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdDeprecatedCodec.java new file mode 100644 index 0000000000000..02fa386db97b3 --- /dev/null +++ b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdDeprecatedCodec.java @@ -0,0 +1,61 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.customcodecs; + +import org.apache.logging.log4j.Logger; +import org.opensearch.common.settings.Setting; +import org.opensearch.index.codec.CodecSettings; +import org.opensearch.index.engine.EngineConfig; +import org.opensearch.index.mapper.MapperService; + +/** + * ZstdDeprecatedCodec provides ZSTD compressor using the zstd-jni library. + * Added to support backward compatibility for indices created with Lucene95CustomCodec as codec name. + */ +@Deprecated(since = "2.10") +public class ZstdDeprecatedCodec extends Lucene95CustomCodec implements CodecSettings { + + /** + * Creates a new ZstdDefaultCodec instance with the default compression level. + */ + public ZstdDeprecatedCodec() { + this(DEFAULT_COMPRESSION_LEVEL); + } + + /** + * Creates a new ZstdDefaultCodec instance. + * + * @param compressionLevel The compression level. + */ + public ZstdDeprecatedCodec(int compressionLevel) { + super(Mode.ZSTD_DEPRECATED, compressionLevel); + } + + /** + * Creates a new ZstdDefaultCodec instance. + * + * @param mapperService The mapper service. + * @param logger The logger. + * @param compressionLevel The compression level. + */ + public ZstdDeprecatedCodec(MapperService mapperService, Logger logger, int compressionLevel) { + super(Mode.ZSTD_DEPRECATED, compressionLevel, mapperService, logger); + } + + /** The name for this codec. */ + @Override + public String toString() { + return getClass().getSimpleName(); + } + + @Override + public boolean supports(Setting setting) { + return setting.equals(EngineConfig.INDEX_CODEC_COMPRESSION_LEVEL_SETTING); + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java similarity index 78% rename from server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java rename to plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java index a7e8e0e42ee68..ea7351f755361 100644 --- a/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java +++ b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java @@ -10,14 +10,17 @@ import org.apache.logging.log4j.Logger; import org.opensearch.common.settings.Setting; +import org.opensearch.index.codec.CodecAliases; import org.opensearch.index.codec.CodecSettings; import org.opensearch.index.engine.EngineConfig; import org.opensearch.index.mapper.MapperService; +import java.util.Set; + /** * ZstdNoDictCodec provides ZSTD compressor without a dictionary support. */ -public class ZstdNoDictCodec extends Lucene95CustomCodec implements CodecSettings { +public class ZstdNoDictCodec extends Lucene95CustomCodec implements CodecSettings, CodecAliases { /** * Creates a new ZstdNoDictCodec instance with the default compression level. @@ -35,6 +38,13 @@ public ZstdNoDictCodec(int compressionLevel) { super(Mode.ZSTD_NO_DICT, compressionLevel); } + /** + * Creates a new ZstdNoDictCodec instance. + * + * @param mapperService The mapper service. + * @param logger The logger. + * @param compressionLevel The compression level. + */ public ZstdNoDictCodec(MapperService mapperService, Logger logger, int compressionLevel) { super(Mode.ZSTD_NO_DICT, compressionLevel, mapperService, logger); } @@ -49,4 +59,9 @@ public String toString() { public boolean supports(Setting setting) { return setting.equals(EngineConfig.INDEX_CODEC_COMPRESSION_LEVEL_SETTING); } + + @Override + public Set aliases() { + return Mode.ZSTD_NO_DICT.getAliases(); + } } diff --git a/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressionMode.java b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressionMode.java similarity index 100% rename from server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressionMode.java rename to plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressionMode.java diff --git a/server/src/main/java/org/opensearch/index/codec/customcodecs/package-info.java b/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/package-info.java similarity index 100% rename from server/src/main/java/org/opensearch/index/codec/customcodecs/package-info.java rename to plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/package-info.java diff --git a/libs/encryption-sdk/src/main/java/org/opensearch/encryption/package-info.java b/plugins/custom-codecs/src/main/plugin-metadata/plugin-security.policy similarity index 66% rename from libs/encryption-sdk/src/main/java/org/opensearch/encryption/package-info.java rename to plugins/custom-codecs/src/main/plugin-metadata/plugin-security.policy index 1fa008797ce87..8161010cfa897 100644 --- a/libs/encryption-sdk/src/main/java/org/opensearch/encryption/package-info.java +++ b/plugins/custom-codecs/src/main/plugin-metadata/plugin-security.policy @@ -6,7 +6,6 @@ * compatible open source license. */ -/** - * Crypto plugin to for encryption and decryption use cases. - */ -package org.opensearch.encryption; +grant codeBase "${codebase.zstd-jni}" { + permission java.lang.RuntimePermission "loadLibrary.*"; +}; diff --git a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec b/plugins/custom-codecs/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec similarity index 63% rename from server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec rename to plugins/custom-codecs/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec index 8b37d91cd8bc4..ba5054055d00a 100644 --- a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec +++ b/plugins/custom-codecs/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec @@ -1,2 +1,3 @@ org.opensearch.index.codec.customcodecs.ZstdCodec org.opensearch.index.codec.customcodecs.ZstdNoDictCodec +org.opensearch.index.codec.customcodecs.ZstdDeprecatedCodec diff --git a/server/src/test/java/org/opensearch/index/codec/customcodecs/AbstractCompressorTests.java b/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/AbstractCompressorTests.java similarity index 100% rename from server/src/test/java/org/opensearch/index/codec/customcodecs/AbstractCompressorTests.java rename to plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/AbstractCompressorTests.java diff --git a/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/CustomCodecTests.java b/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/CustomCodecTests.java new file mode 100644 index 0000000000000..5365b9e222d9a --- /dev/null +++ b/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/CustomCodecTests.java @@ -0,0 +1,250 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.index.codec.customcodecs; + +import org.apache.logging.log4j.LogManager; +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.codecs.lucene90.Lucene90StoredFieldsFormat; +import org.apache.lucene.codecs.lucene95.Lucene95Codec; +import org.apache.lucene.document.Document; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.SegmentReader; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.util.LuceneTestCase.SuppressCodecs; +import org.opensearch.common.settings.IndexScopedSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.env.Environment; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.analysis.IndexAnalyzers; +import org.opensearch.index.codec.CodecService; +import org.opensearch.index.codec.CodecServiceConfig; +import org.opensearch.index.codec.CodecServiceFactory; +import org.opensearch.index.codec.CodecSettings; +import org.opensearch.index.engine.EngineConfig; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.similarity.SimilarityService; +import org.opensearch.indices.mapper.MapperRegistry; +import org.opensearch.plugins.MapperPlugin; +import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collections; +import java.util.Optional; + +import static org.opensearch.index.codec.customcodecs.CustomCodecService.ZSTD_CODEC; +import static org.opensearch.index.codec.customcodecs.CustomCodecService.ZSTD_NO_DICT_CODEC; +import static org.opensearch.index.engine.EngineConfig.INDEX_CODEC_COMPRESSION_LEVEL_SETTING; + +@SuppressCodecs("*") // we test against default codec so never get a random one here! +public class CustomCodecTests extends OpenSearchTestCase { + + private CustomCodecPlugin plugin; + + @Before + public void setup() { + plugin = new CustomCodecPlugin(); + } + + public void testZstd() throws Exception { + Codec codec = createCodecService(false).codec("zstd"); + assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD, codec); + Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); + assertEquals(Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL, storedFieldsFormat.getCompressionLevel()); + } + + public void testZstdNoDict() throws Exception { + Codec codec = createCodecService(false).codec("zstd_no_dict"); + assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD_NO_DICT, codec); + Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); + assertEquals(Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL, storedFieldsFormat.getCompressionLevel()); + } + + public void testZstdDeprecatedCodec() { + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> createCodecService(false).codec("ZSTD_DEPRECATED") + ); + assertTrue(e.getMessage().startsWith("failed to find codec")); + } + + public void testZstdWithCompressionLevel() throws Exception { + int randomCompressionLevel = randomIntBetween(1, 6); + Codec codec = createCodecService(randomCompressionLevel, "zstd").codec("zstd"); + assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD, codec); + Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); + assertEquals(randomCompressionLevel, storedFieldsFormat.getCompressionLevel()); + } + + public void testZstdNoDictWithCompressionLevel() throws Exception { + int randomCompressionLevel = randomIntBetween(1, 6); + Codec codec = createCodecService(randomCompressionLevel, "zstd_no_dict").codec("zstd_no_dict"); + assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD_NO_DICT, codec); + Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); + assertEquals(randomCompressionLevel, storedFieldsFormat.getCompressionLevel()); + } + + public void testBestCompressionWithCompressionLevel() { + final Settings zstdSettings = Settings.builder() + .put(INDEX_CODEC_COMPRESSION_LEVEL_SETTING.getKey(), randomIntBetween(1, 6)) + .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), randomFrom(ZSTD_CODEC, ZSTD_NO_DICT_CODEC)) + .build(); + + // able to validate zstd + final IndexScopedSettings zstdIndexScopedSettings = new IndexScopedSettings( + zstdSettings, + IndexScopedSettings.BUILT_IN_INDEX_SETTINGS + ); + zstdIndexScopedSettings.validate(zstdSettings, true); + } + + public void testLuceneCodecsWithCompressionLevel() { + final Settings customCodecSettings = Settings.builder() + .put(INDEX_CODEC_COMPRESSION_LEVEL_SETTING.getKey(), randomIntBetween(1, 6)) + .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), randomFrom("zstd", "zstd_no_dict")) + .build(); + + final IndexScopedSettings customCodecIndexScopedSettings = new IndexScopedSettings( + customCodecSettings, + IndexScopedSettings.BUILT_IN_INDEX_SETTINGS + ); + customCodecIndexScopedSettings.validate(customCodecSettings, true); + } + + public void testZstandardCompressionLevelSupport() throws Exception { + CodecService codecService = createCodecService(false); + CodecSettings zstdCodec = (CodecSettings) codecService.codec("zstd"); + CodecSettings zstdNoDictCodec = (CodecSettings) codecService.codec("zstd_no_dict"); + assertTrue(zstdCodec.supports(INDEX_CODEC_COMPRESSION_LEVEL_SETTING)); + assertTrue(zstdNoDictCodec.supports(INDEX_CODEC_COMPRESSION_LEVEL_SETTING)); + } + + public void testDefaultMapperServiceNull() throws Exception { + Codec codec = createCodecService(true).codec("default"); + assertStoredFieldsCompressionEquals(Lucene95Codec.Mode.BEST_SPEED, codec); + } + + public void testBestCompressionMapperServiceNull() throws Exception { + Codec codec = createCodecService(true).codec("best_compression"); + assertStoredFieldsCompressionEquals(Lucene95Codec.Mode.BEST_COMPRESSION, codec); + } + + public void testZstdMapperServiceNull() throws Exception { + Codec codec = createCodecService(true).codec("zstd"); + assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD, codec); + Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); + assertEquals(Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL, storedFieldsFormat.getCompressionLevel()); + } + + public void testZstdNoDictMapperServiceNull() throws Exception { + Codec codec = createCodecService(true).codec("zstd_no_dict"); + assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD_NO_DICT, codec); + Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); + assertEquals(Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL, storedFieldsFormat.getCompressionLevel()); + } + + // write some docs with it, inspect .si to see this was the used compression + private void assertStoredFieldsCompressionEquals(Lucene95Codec.Mode expected, Codec actual) throws Exception { + SegmentReader sr = getSegmentReader(actual); + String v = sr.getSegmentInfo().info.getAttribute(Lucene90StoredFieldsFormat.MODE_KEY); + assertNotNull(v); + assertEquals(expected, Lucene95Codec.Mode.valueOf(v)); + } + + private void assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode expected, Codec actual) throws Exception { + SegmentReader sr = getSegmentReader(actual); + String v = sr.getSegmentInfo().info.getAttribute(Lucene95CustomStoredFieldsFormat.MODE_KEY); + assertNotNull(v); + assertEquals(expected, Lucene95CustomCodec.Mode.valueOf(v)); + } + + private CodecService createCodecService(boolean isMapperServiceNull) throws IOException { + Settings nodeSettings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build(); + if (isMapperServiceNull) { + return new CustomCodecService(null, IndexSettingsModule.newIndexSettings("_na", nodeSettings), LogManager.getLogger("test")); + } + return buildCodecService(nodeSettings); + } + + private CodecService createCodecService(int randomCompressionLevel, String codec) throws IOException { + Settings nodeSettings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put("index.codec", codec) + .put("index.codec.compression_level", randomCompressionLevel) + .build(); + return buildCodecService(nodeSettings); + } + + private CodecService buildCodecService(Settings nodeSettings) throws IOException { + + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("_na", nodeSettings); + SimilarityService similarityService = new SimilarityService(indexSettings, null, Collections.emptyMap()); + IndexAnalyzers indexAnalyzers = createTestAnalysis(indexSettings, nodeSettings).indexAnalyzers; + MapperRegistry mapperRegistry = new MapperRegistry(Collections.emptyMap(), Collections.emptyMap(), MapperPlugin.NOOP_FIELD_FILTER); + MapperService service = new MapperService( + indexSettings, + indexAnalyzers, + xContentRegistry(), + similarityService, + mapperRegistry, + () -> null, + () -> false, + null + ); + + Optional customCodecServiceFactory = plugin.getCustomCodecServiceFactory(indexSettings); + if (customCodecServiceFactory.isPresent()) { + return customCodecServiceFactory.get().createCodecService(new CodecServiceConfig(indexSettings, service, logger)); + } + return new CustomCodecService(service, indexSettings, LogManager.getLogger("test")); + } + + private SegmentReader getSegmentReader(Codec codec) throws IOException { + Directory dir = newDirectory(); + IndexWriterConfig iwc = newIndexWriterConfig(null); + iwc.setCodec(codec); + IndexWriter iw = new IndexWriter(dir, iwc); + iw.addDocument(new Document()); + iw.commit(); + iw.close(); + DirectoryReader ir = DirectoryReader.open(dir); + SegmentReader sr = (SegmentReader) ir.leaves().get(0).reader(); + ir.close(); + dir.close(); + return sr; + } + +} diff --git a/server/src/test/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormatTests.java b/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormatTests.java similarity index 100% rename from server/src/test/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormatTests.java rename to plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormatTests.java diff --git a/server/src/test/java/org/opensearch/index/codec/customcodecs/ZstdCompressorTests.java b/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdCompressorTests.java similarity index 100% rename from server/src/test/java/org/opensearch/index/codec/customcodecs/ZstdCompressorTests.java rename to plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdCompressorTests.java diff --git a/server/src/test/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressorTests.java b/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressorTests.java similarity index 100% rename from server/src/test/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressorTests.java rename to plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressorTests.java diff --git a/libs/encryption-sdk/licenses/commons-lang3-3.13.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-lang3-3.13.0.jar.sha1 similarity index 100% rename from libs/encryption-sdk/licenses/commons-lang3-3.13.0.jar.sha1 rename to plugins/ingest-attachment/licenses/commons-lang3-3.13.0.jar.sha1 diff --git a/libs/encryption-sdk/licenses/commons-lang3-LICENSE.txt b/plugins/ingest-attachment/licenses/commons-lang3-LICENSE.txt similarity index 100% rename from libs/encryption-sdk/licenses/commons-lang3-LICENSE.txt rename to plugins/ingest-attachment/licenses/commons-lang3-LICENSE.txt diff --git a/libs/encryption-sdk/licenses/commons-lang3-NOTICE.txt b/plugins/ingest-attachment/licenses/commons-lang3-NOTICE.txt similarity index 100% rename from libs/encryption-sdk/licenses/commons-lang3-NOTICE.txt rename to plugins/ingest-attachment/licenses/commons-lang3-NOTICE.txt diff --git a/plugins/repository-azure/licenses/commons-lang3-3.13.0.jar.sha1 b/plugins/repository-azure/licenses/commons-lang3-3.13.0.jar.sha1 new file mode 100644 index 0000000000000..d0c2f2486ee1f --- /dev/null +++ b/plugins/repository-azure/licenses/commons-lang3-3.13.0.jar.sha1 @@ -0,0 +1 @@ +b7263237aa89c1f99b327197c41d0669707a462e \ No newline at end of file diff --git a/libs/encryption-sdk/licenses/aws-encryption-sdk-java-LICENSE.txt b/plugins/repository-azure/licenses/commons-lang3-LICENSE.txt similarity index 99% rename from libs/encryption-sdk/licenses/aws-encryption-sdk-java-LICENSE.txt rename to plugins/repository-azure/licenses/commons-lang3-LICENSE.txt index 8dada3edaf50d..d645695673349 100644 --- a/libs/encryption-sdk/licenses/aws-encryption-sdk-java-LICENSE.txt +++ b/plugins/repository-azure/licenses/commons-lang3-LICENSE.txt @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -178,7 +179,7 @@ APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" + boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -186,7 +187,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {yyyy} {name of copyright owner} + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/plugins/repository-azure/licenses/commons-lang3-NOTICE.txt b/plugins/repository-azure/licenses/commons-lang3-NOTICE.txt new file mode 100644 index 0000000000000..13a3140897472 --- /dev/null +++ b/plugins/repository-azure/licenses/commons-lang3-NOTICE.txt @@ -0,0 +1,5 @@ +Apache Commons Lang +Copyright 2001-2019 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). diff --git a/plugins/repository-hdfs/licenses/commons-lang3-3.13.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-lang3-3.13.0.jar.sha1 new file mode 100644 index 0000000000000..d0c2f2486ee1f --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-lang3-3.13.0.jar.sha1 @@ -0,0 +1 @@ +b7263237aa89c1f99b327197c41d0669707a462e \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-lang3-LICENSE.txt b/plugins/repository-hdfs/licenses/commons-lang3-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-lang3-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-hdfs/licenses/commons-lang3-NOTICE.txt b/plugins/repository-hdfs/licenses/commons-lang3-NOTICE.txt new file mode 100644 index 0000000000000..13a3140897472 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-lang3-NOTICE.txt @@ -0,0 +1,5 @@ +Apache Commons Lang +Copyright 2001-2019 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). diff --git a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java index 75083a929b491..686fc78dcec8a 100644 --- a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java +++ b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java @@ -114,7 +114,6 @@ private void printClusterRouting() throws IOException, ParseException { * This test verifies that segment replication does not break when primary shards are on lower OS version. It does this * by verifying replica shards contains same number of documents as primary's. */ - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9685") public void testIndexingWithPrimaryOnBwcNodes() throws Exception { if (UPGRADE_FROM_VERSION.before(Version.V_2_4_0)) { logger.info("--> Skip test for version {} where segment replication feature is not available", UPGRADE_FROM_VERSION); diff --git a/server/build.gradle b/server/build.gradle index af56032897ee1..f6db3d53a0dcc 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -106,7 +106,6 @@ dependencies { api project(':libs:opensearch-x-content') api project(":libs:opensearch-geo") api project(":libs:opensearch-telemetry") - api project(":libs:opensearch-encryption-sdk") compileOnly project(':libs:opensearch-plugin-classloader') diff --git a/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java b/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java index 2fbad9099c056..033ea75b68958 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java @@ -15,7 +15,6 @@ import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.rest.RestStatus; import org.opensearch.index.shard.IndexShard; @@ -31,7 +30,6 @@ import java.util.Collection; import java.util.Collections; import java.util.List; -import java.util.Objects; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -264,9 +262,9 @@ public void testFailStaleReplica() throws Exception { } public void testWithDocumentReplicationEnabledIndex() throws Exception { - assumeTrue( - "Can't create DocRep index with remote store enabled. Skipping.", - Objects.equals(featureFlagSettings().get(FeatureFlags.REMOTE_STORE, "false"), "false") + assumeFalse( + "Skipping the test as its not compatible with segment replication with remote store. Cannot create DocRep indices with Remote store enabled", + segmentReplicationWithRemoteEnabled() ); Settings settings = Settings.builder() .put(MAX_REPLICATION_TIME_BACKPRESSURE_SETTING.getKey(), TimeValue.timeValueMillis(500)) diff --git a/server/src/internalClusterTest/java/org/opensearch/index/codec/ZstdNotEnabledIT.java b/server/src/internalClusterTest/java/org/opensearch/index/codec/ZstdNotEnabledIT.java new file mode 100644 index 0000000000000..9b1fa77fc9a5a --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/index/codec/ZstdNotEnabledIT.java @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec; + +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.List; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) +public class ZstdNotEnabledIT extends OpenSearchIntegTestCase { + + public void testZStdCodecsWithoutPluginInstalled() { + + internalCluster().startNode(); + final String index = "test-index"; + + // creating index with zstd and zstd_no_dict should fail if custom-codecs plugin is not installed + for (String codec : List.of("zstd", "zstd_no_dict")) { + assertThrows( + IllegalArgumentException.class, + () -> createIndex( + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.codec", codec) + .build() + ) + ); + } + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexPrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexPrimaryRelocationIT.java index e9962706bcd39..c049c8ed2d4a6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexPrimaryRelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexPrimaryRelocationIT.java @@ -56,15 +56,12 @@ public class IndexPrimaryRelocationIT extends OpenSearchIntegTestCase { private static final int RELOCATION_COUNT = 15; - public void setup() {} - public Settings indexSettings() { return Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0).build(); } public void testPrimaryRelocationWhileIndexing() throws Exception { internalCluster().ensureAtLeastNumDataNodes(randomIntBetween(2, 3)); - setup(); client().admin().indices().prepareCreate("test").setSettings(indexSettings()).setMapping("field", "type=text").get(); ensureGreen("test"); AtomicInteger numAutoGenDocs = new AtomicInteger(); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java index a16167c8ad0c4..8e68a8bde39d5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java @@ -18,7 +18,6 @@ import org.opensearch.common.Nullable; import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexModule; @@ -204,8 +203,7 @@ protected IndexShard getIndexShard(String node, String indexName) { } protected boolean segmentReplicationWithRemoteEnabled() { - return IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexSettings()).booleanValue() - && "true".equalsIgnoreCase(featureFlagSettings().get(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL)); + return IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexSettings()).booleanValue(); } protected Releasable blockReplication(List nodes, CountDownLatch latch) { diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java index 3f432060e13fb..ee904dbcb6924 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java @@ -71,6 +71,7 @@ import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.remote.RemoteSegmentStats; import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.translog.RemoteTranslogStats; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.IndicesRequestCache; @@ -1436,9 +1437,12 @@ public void testZeroRemoteStoreStatsOnNonRemoteStoreIndex() { .get() .status() ); - ShardStats shard = client().admin().indices().prepareStats(indexName).setSegments(true).get().getShards()[0]; + ShardStats shard = client().admin().indices().prepareStats(indexName).setSegments(true).setTranslog(true).get().getShards()[0]; RemoteSegmentStats remoteSegmentStatsFromIndexStats = shard.getStats().getSegments().getRemoteSegmentStats(); assertZeroRemoteSegmentStats(remoteSegmentStatsFromIndexStats); + RemoteTranslogStats remoteTranslogStatsFromIndexStats = shard.getStats().getTranslog().getRemoteTranslogStats(); + assertZeroRemoteTranslogStats(remoteTranslogStatsFromIndexStats); + NodesStatsResponse nodesStatsResponse = client().admin().cluster().prepareNodesStats(primaryNodeName(indexName)).get(); RemoteSegmentStats remoteSegmentStatsFromNodesStats = nodesStatsResponse.getNodes() .get(0) @@ -1446,20 +1450,22 @@ public void testZeroRemoteStoreStatsOnNonRemoteStoreIndex() { .getSegments() .getRemoteSegmentStats(); assertZeroRemoteSegmentStats(remoteSegmentStatsFromNodesStats); + RemoteTranslogStats remoteTranslogStatsFromNodesStats = nodesStatsResponse.getNodes() + .get(0) + .getIndices() + .getTranslog() + .getRemoteTranslogStats(); + assertZeroRemoteTranslogStats(remoteTranslogStatsFromNodesStats); } private void assertZeroRemoteSegmentStats(RemoteSegmentStats remoteSegmentStats) { - assertEquals(0, remoteSegmentStats.getUploadBytesStarted()); - assertEquals(0, remoteSegmentStats.getUploadBytesSucceeded()); - assertEquals(0, remoteSegmentStats.getUploadBytesFailed()); - assertEquals(0, remoteSegmentStats.getDownloadBytesStarted()); - assertEquals(0, remoteSegmentStats.getDownloadBytesSucceeded()); - assertEquals(0, remoteSegmentStats.getDownloadBytesFailed()); - assertEquals(0, remoteSegmentStats.getTotalRefreshBytesLag()); - assertEquals(0, remoteSegmentStats.getMaxRefreshBytesLag()); - assertEquals(0, remoteSegmentStats.getMaxRefreshTimeLag()); - assertEquals(0, remoteSegmentStats.getTotalUploadTime()); - assertEquals(0, remoteSegmentStats.getTotalDownloadTime()); + // Compare with fresh object because all values default to 0 in default fresh object + assertEquals(new RemoteSegmentStats(), remoteSegmentStats); + } + + private void assertZeroRemoteTranslogStats(RemoteTranslogStats remoteTranslogStats) { + // Compare with fresh object because all values default to 0 in default fresh object + assertEquals(new RemoteTranslogStats(), remoteTranslogStats); } /** diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java index 73f5278c175a2..bc55f6cc2cbcb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java @@ -12,15 +12,12 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.util.FileSystemUtils; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.snapshots.AbstractSnapshotIntegTestCase; -import org.opensearch.test.FeatureFlagSetter; -import org.junit.Before; import java.io.IOException; import java.nio.file.Path; @@ -31,6 +28,7 @@ import java.util.stream.Collectors; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; @@ -43,17 +41,6 @@ public abstract class AbstractRemoteStoreMockRepositoryIntegTestCase extends Abs protected static final String TRANSLOG_REPOSITORY_NAME = "my-translog-repo-1"; protected static final String INDEX_NAME = "remote-store-test-idx-1"; - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE, "true").build(); - } - - @Before - public void setup() { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - FeatureFlagSetter.set(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL); - } - @Override public Settings indexSettings() { return remoteStoreIndexSettings(0); @@ -91,6 +78,16 @@ public Settings buildRemoteStoreNodeAttributes(Path repoLocation, double ioFailu "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, TRANSLOG_REPOSITORY_NAME ); + String stateRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + REPOSITORY_NAME + ); + String stateRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + REPOSITORY_NAME + ); return Settings.builder() .put("node.attr." + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, REPOSITORY_NAME) @@ -104,6 +101,9 @@ public Settings buildRemoteStoreNodeAttributes(Path repoLocation, double ioFailu .put("node.attr." + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, TRANSLOG_REPOSITORY_NAME) .put(translogRepoTypeAttributeKey, "mock") .put(translogRepoSettingsAttributeKeyPrefix + "location", repoLocation) + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, REPOSITORY_NAME) + .put(stateRepoTypeAttributeKey, "mock") + .put(stateRepoSettingsAttributeKeyPrefix + "location", repoLocation) .build(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java new file mode 100644 index 0000000000000..ad3e99dd274ce --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java @@ -0,0 +1,107 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; +import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.transport.MockTransportService; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; + +public class BaseRemoteStoreRestoreIT extends RemoteStoreBaseIntegTestCase { + static final String INDEX_NAME = "remote-store-test-idx-1"; + static final String INDEX_NAMES = "test-remote-store-1,test-remote-store-2,remote-store-test-index-1,remote-store-test-index-2"; + static final String INDEX_NAMES_WILDCARD = "test-remote-store-*,remote-store-test-index-*"; + static final String TOTAL_OPERATIONS = "total-operations"; + static final String REFRESHED_OR_FLUSHED_OPERATIONS = "refreshed-or-flushed-operations"; + static final String MAX_SEQ_NO_TOTAL = "max-seq-no-total"; + + @Override + public Settings indexSettings() { + return remoteStoreIndexSettings(0); + } + + public Settings indexSettings(int shards, int replicas) { + return remoteStoreIndexSettings(replicas, shards); + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(MockTransportService.TestPlugin.class); + } + + protected void restore(String... indices) { + boolean restoreAllShards = randomBoolean(); + if (restoreAllShards) { + assertAcked(client().admin().indices().prepareClose(indices)); + } + client().admin() + .cluster() + .restoreRemoteStore( + new RestoreRemoteStoreRequest().indices(indices).restoreAllShards(restoreAllShards), + PlainActionFuture.newFuture() + ); + } + + protected void verifyRestoredData(Map indexStats, String indexName) throws Exception { + ensureYellowAndNoInitializingShards(indexName); + ensureGreen(indexName); + // This is to ensure that shards that were already assigned will get latest count + refresh(indexName); + assertBusy( + () -> assertHitCount(client().prepareSearch(indexName).setSize(0).get(), indexStats.get(TOTAL_OPERATIONS)), + 30, + TimeUnit.SECONDS + ); + IndexResponse response = indexSingleDoc(indexName); + if (indexStats.containsKey(MAX_SEQ_NO_TOTAL + "-shard-" + response.getShardId().id())) { + assertEquals(indexStats.get(MAX_SEQ_NO_TOTAL + "-shard-" + response.getShardId().id()) + 1, response.getSeqNo()); + } + refresh(indexName); + assertBusy( + () -> assertHitCount(client().prepareSearch(indexName).setSize(0).get(), indexStats.get(TOTAL_OPERATIONS) + 1), + 30, + TimeUnit.SECONDS + ); + } + + public void prepareCluster(int numClusterManagerNodes, int numDataOnlyNodes, String indices, int replicaCount, int shardCount) { + prepareCluster(numClusterManagerNodes, numDataOnlyNodes, indices, replicaCount, shardCount, Settings.EMPTY); + } + + public void prepareCluster( + int numClusterManagerNodes, + int numDataOnlyNodes, + String indices, + int replicaCount, + int shardCount, + Settings settings + ) { + prepareCluster(numClusterManagerNodes, numDataOnlyNodes, settings); + for (String index : indices.split(",")) { + createIndex(index, remoteStoreIndexSettings(replicaCount, shardCount)); + ensureYellowAndNoInitializingShards(index); + ensureGreen(index); + } + } + + public void prepareCluster(int numClusterManagerNodes, int numDataOnlyNodes, Settings settings) { + internalCluster().startClusterManagerOnlyNodes(numClusterManagerNodes, settings); + internalCluster().startDataOnlyNodes(numDataOnlyNodes, settings); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java index b45b19c43b3b3..d8b7718a55377 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java @@ -10,7 +10,6 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.indices.recovery.IndexPrimaryRelocationIT; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.OpenSearchIntegTestCase; @@ -19,18 +18,17 @@ import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteIndexPrimaryRelocationIT extends IndexPrimaryRelocationIT { protected static final String REPOSITORY_NAME = "test-remote-store-repo"; protected Path absolutePath; - public void setup() { - absolutePath = randomRepoPath().toAbsolutePath(); - } - protected Settings nodeSettings(int nodeOrdinal) { + if (absolutePath == null) { + absolutePath = randomRepoPath().toAbsolutePath(); + } return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) .put(remoteStoreClusterSettings(REPOSITORY_NAME, absolutePath)) @@ -51,16 +49,8 @@ public Settings indexSettings() { .build(); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder() - .put(super.featureFlagSettings()) - .put(FeatureFlags.REMOTE_STORE, "true") - .put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, "true") - .build(); - } - public void testPrimaryRelocationWhileIndexing() throws Exception { + internalCluster().startClusterManagerOnlyNode(); super.testPrimaryRelocationWhileIndexing(); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java index d11c09928a08f..4eb1cc7703735 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java @@ -10,7 +10,6 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.indices.recovery.IndexRecoveryIT; @@ -46,15 +45,6 @@ protected Settings nodeSettings(int nodeOrdinal) { .build(); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder() - .put(super.featureFlagSettings()) - .put(FeatureFlags.REMOTE_STORE, "true") - .put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, "true") - .build(); - } - @Override public Settings indexSettings() { return Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index 346e9d12410b7..4ebccb9b9e551 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -10,7 +10,6 @@ import org.opensearch.action.DocWriteResponse; import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; -import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.opensearch.action.admin.indices.get.GetIndexRequest; import org.opensearch.action.admin.indices.get.GetIndexResponse; @@ -21,11 +20,11 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.rest.RestStatus; import org.opensearch.index.IndexSettings; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.snapshots.AbstractSnapshotIntegTestCase; +import org.opensearch.snapshots.SnapshotInfo; import org.opensearch.snapshots.SnapshotState; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; @@ -34,7 +33,9 @@ import java.io.IOException; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; +import java.util.List; import java.util.concurrent.ExecutionException; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; @@ -63,7 +64,6 @@ public void teardown() { protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(FeatureFlags.REMOTE_STORE, "true") .put(remoteStoreClusterSettings(BASE_REMOTE_REPO, remoteRepoPath)) .build(); } @@ -129,32 +129,21 @@ public void testRestoreOperationsShallowCopyEnabled() throws IOException, Execut internalCluster().startDataOnlyNode(); logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(true) - .setIndices(indexName1, indexName2) - .get(); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat( - createSnapshotResponse.getSnapshotInfo().successfulShards(), - equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) - ); - assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + + SnapshotInfo snapshotInfo = createSnapshot(snapshotRepoName, snapshotName1, new ArrayList<>(Arrays.asList(indexName1, indexName2))); + assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfo.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); updateRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, false)); - CreateSnapshotResponse createSnapshotResponse2 = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepoName, snapshotName2) - .setWaitForCompletion(true) - .setIndices(indexName1, indexName2) - .get(); - assertThat(createSnapshotResponse2.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat( - createSnapshotResponse2.getSnapshotInfo().successfulShards(), - equalTo(createSnapshotResponse2.getSnapshotInfo().totalShards()) + SnapshotInfo snapshotInfo2 = createSnapshot( + snapshotRepoName, + snapshotName2, + new ArrayList<>(Arrays.asList(indexName1, indexName2)) ); - assertThat(createSnapshotResponse2.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfo2.state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfo2.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo2.successfulShards(), equalTo(snapshotInfo2.totalShards())); DeleteResponse deleteResponse = client().prepareDelete(indexName1, "0").execute().actionGet(); assertEquals(deleteResponse.getResult(), DocWriteResponse.Result.DELETED); @@ -258,7 +247,6 @@ public void testRestoreOperationsShallowCopyEnabled() throws IOException, Execut assertDocsPresentInIndex(client, restoredIndexName1Doc, numDocsInIndex1 + 2); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9326") public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); String primary = internalCluster().startDataOnlyNode(); @@ -289,32 +277,24 @@ public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { internalCluster().startDataOnlyNode(); logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(true) - .setIndices(indexName1, indexName2) - .get(); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat( - createSnapshotResponse.getSnapshotInfo().successfulShards(), - equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + SnapshotInfo snapshotInfo1 = createSnapshot( + snapshotRepoName, + snapshotName1, + new ArrayList<>(Arrays.asList(indexName1, indexName2)) ); - assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfo1.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo1.successfulShards(), equalTo(snapshotInfo1.totalShards())); + assertThat(snapshotInfo1.state(), equalTo(SnapshotState.SUCCESS)); updateRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, false)); - CreateSnapshotResponse createSnapshotResponse2 = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepoName, snapshotName2) - .setWaitForCompletion(true) - .setIndices(indexName1, indexName2) - .get(); - assertThat(createSnapshotResponse2.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat( - createSnapshotResponse2.getSnapshotInfo().successfulShards(), - equalTo(createSnapshotResponse2.getSnapshotInfo().totalShards()) + SnapshotInfo snapshotInfo2 = createSnapshot( + snapshotRepoName, + snapshotName2, + new ArrayList<>(Arrays.asList(indexName1, indexName2)) ); - assertThat(createSnapshotResponse2.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfo2.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo2.successfulShards(), equalTo(snapshotInfo2.totalShards())); + assertThat(snapshotInfo2.state(), equalTo(SnapshotState.SUCCESS)); DeleteResponse deleteResponse = client().prepareDelete(indexName1, "0").execute().actionGet(); assertEquals(deleteResponse.getResult(), DocWriteResponse.Result.DELETED); @@ -342,6 +322,10 @@ public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { ensureGreen(indexName1, restoredIndexName2); assertDocsPresentInIndex(client, indexName1, numDocsInIndex1); assertDocsPresentInIndex(client, restoredIndexName2, numDocsInIndex2); + // indexing some new docs and validating + indexDocuments(client, indexName1, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(indexName1); + assertDocsPresentInIndex(client, indexName1, numDocsInIndex1 + 2); // deleting data for restoredIndexName1 and restoring from remote store. internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); @@ -356,9 +340,9 @@ public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { ensureGreen(indexName1); assertDocsPresentInIndex(client(), indexName1, numDocsInIndex1); // indexing some new docs and validating - indexDocuments(client, indexName1, numDocsInIndex1, numDocsInIndex1 + 2); + indexDocuments(client, indexName1, numDocsInIndex1 + 2, numDocsInIndex1 + 4); ensureGreen(indexName1); - assertDocsPresentInIndex(client, indexName1, numDocsInIndex1 + 2); + assertDocsPresentInIndex(client, indexName1, numDocsInIndex1 + 4); } public void testRestoreShallowCopySnapshotWithDifferentRepo() throws IOException { @@ -392,18 +376,14 @@ public void testRestoreShallowCopySnapshotWithDifferentRepo() throws IOException internalCluster().startDataOnlyNode(); logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(true) - .setIndices(indexName1, indexName2) - .get(); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat( - createSnapshotResponse.getSnapshotInfo().successfulShards(), - equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + SnapshotInfo snapshotInfo1 = createSnapshot( + snapshotRepoName, + snapshotName1, + new ArrayList<>(Arrays.asList(indexName1, indexName2)) ); - assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfo1.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo1.successfulShards(), equalTo(snapshotInfo1.totalShards())); + assertThat(snapshotInfo1.state(), equalTo(SnapshotState.SUCCESS)); Settings remoteStoreIndexSettings = Settings.builder() .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, remoteStoreRepo2Name) @@ -479,18 +459,10 @@ public void testRestoreShallowSnapshotRepositoryOverriden() throws ExecutionExce ensureGreen(indexName1); logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(true) - .setIndices(indexName1) - .get(); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat( - createSnapshotResponse.getSnapshotInfo().successfulShards(), - equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) - ); - assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + SnapshotInfo snapshotInfo1 = createSnapshot(snapshotRepoName, snapshotName1, new ArrayList<>(List.of(indexName1))); + assertThat(snapshotInfo1.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo1.successfulShards(), equalTo(snapshotInfo1.totalShards())); + assertThat(snapshotInfo1.state(), equalTo(SnapshotState.SUCCESS)); createRepository(BASE_REMOTE_REPO, "fs", absolutePath2); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteSegmentStatsFromNodesStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteSegmentStatsFromNodesStatsIT.java deleted file mode 100644 index 32f3b1066aacd..0000000000000 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteSegmentStatsFromNodesStatsIT.java +++ /dev/null @@ -1,267 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.remotestore; - -import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; -import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsResponse; -import org.opensearch.action.admin.indices.stats.CommonStatsFlags; -import org.opensearch.common.settings.Settings; -import org.opensearch.index.remote.RemoteSegmentStats; -import org.opensearch.test.OpenSearchIntegTestCase; -import org.junit.Before; - -import java.util.concurrent.TimeUnit; - -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) -public class RemoteSegmentStatsFromNodesStatsIT extends RemoteStoreBaseIntegTestCase { - private static final String INDEX_NAME = "remote-index-1"; - private static final int DATA_NODE_COUNT = 2; - private static final int CLUSTER_MANAGER_NODE_COUNT = 3; - - @Before - public void setup() { - setupCustomCluster(); - } - - private void setupCustomCluster() { - internalCluster().startClusterManagerOnlyNodes(CLUSTER_MANAGER_NODE_COUNT); - internalCluster().startDataOnlyNodes(DATA_NODE_COUNT); - ensureStableCluster(DATA_NODE_COUNT + CLUSTER_MANAGER_NODE_COUNT); - } - - /** - * - Creates two indices with single primary shard, pinned to a single node. - * - Index documents in both of them and forces a fresh for both - * - Polls the _remotestore/stats API for individual index level stats - * - Adds up requisite fields from the API output, repeats this for the 2nd index - * - Polls _nodes/stats and verifies that the total values at node level adds up - * to the values capture in the previous step - */ - public void testNodesStatsParityWithOnlyPrimaryShards() { - String[] dataNodes = internalCluster().getDataNodeNames().toArray(String[]::new); - String randomDataNode = dataNodes[randomIntBetween(0, dataNodes.length - 1)]; - String firstIndex = INDEX_NAME + "1"; - String secondIndex = INDEX_NAME + "2"; - - // Create first index - createIndex( - firstIndex, - Settings.builder().put(remoteStoreIndexSettings(0, 1)).put("index.routing.allocation.require._name", randomDataNode).build() - ); - ensureGreen(firstIndex); - indexSingleDoc(firstIndex, true); - - // Create second index - createIndex( - secondIndex, - Settings.builder().put(remoteStoreIndexSettings(0, 1)).put("index.routing.allocation.require._name", randomDataNode).build() - ); - ensureGreen(secondIndex); - indexSingleDoc(secondIndex, true); - - long cumulativeUploadsSucceeded = 0, cumulativeUploadsStarted = 0, cumulativeUploadsFailed = 0; - long totalBytesLag = 0, maxBytesLag = 0, maxTimeLag = 0; - long totalUploadTime = 0; - // Fetch upload stats - RemoteStoreStatsResponse remoteStoreStatsFirstIndex = client(randomDataNode).admin() - .cluster() - .prepareRemoteStoreStats(firstIndex, "0") - .setLocal(true) - .get(); - cumulativeUploadsSucceeded += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesSucceeded; - cumulativeUploadsStarted += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesStarted; - cumulativeUploadsFailed += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesFailed; - totalBytesLag += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag; - maxBytesLag = Math.max(maxBytesLag, remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag); - maxTimeLag = Math.max(maxTimeLag, remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().refreshTimeLagMs); - totalUploadTime += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().totalUploadTimeInMs; - - RemoteStoreStatsResponse remoteStoreStatsSecondIndex = client(randomDataNode).admin() - .cluster() - .prepareRemoteStoreStats(secondIndex, "0") - .setLocal(true) - .get(); - - cumulativeUploadsSucceeded += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesSucceeded; - cumulativeUploadsStarted += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesStarted; - cumulativeUploadsFailed += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesFailed; - totalBytesLag += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag; - maxBytesLag = Math.max(maxBytesLag, remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag); - maxTimeLag = Math.max(maxTimeLag, remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().refreshTimeLagMs); - totalUploadTime += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().totalUploadTimeInMs; - - // Fetch nodes stats - NodesStatsResponse nodesStatsResponse = client().admin() - .cluster() - .prepareNodesStats(randomDataNode) - .setIndices(new CommonStatsFlags().set(CommonStatsFlags.Flag.Segments, true)) - .get(); - RemoteSegmentStats remoteSegmentStats = nodesStatsResponse.getNodes().get(0).getIndices().getSegments().getRemoteSegmentStats(); - assertTrue(cumulativeUploadsSucceeded > 0 && cumulativeUploadsSucceeded == remoteSegmentStats.getUploadBytesSucceeded()); - assertTrue(cumulativeUploadsStarted > 0 && cumulativeUploadsStarted == remoteSegmentStats.getUploadBytesStarted()); - assertEquals(cumulativeUploadsFailed, remoteSegmentStats.getUploadBytesFailed()); - assertEquals(totalBytesLag, remoteSegmentStats.getTotalRefreshBytesLag()); - assertEquals(maxBytesLag, remoteSegmentStats.getMaxRefreshBytesLag()); - assertEquals(maxTimeLag, remoteSegmentStats.getMaxRefreshTimeLag()); - assertTrue(totalUploadTime > 0 && totalUploadTime == remoteSegmentStats.getTotalUploadTime()); - } - - /** - * - Creates two indices with single primary shard and single replica - * - Index documents in both of them and forces a fresh for both - * - Polls the _remotestore/stats API for individual index level stats - * - Adds up requisite fields from the API output for both indices - * - Polls _nodes/stats and verifies that the total values at node level adds up - * to the values capture in the previous step - * - Repeats the above 3 steps for the second node - */ - public void testNodesStatsParityWithReplicaShards() throws Exception { - String firstIndex = INDEX_NAME + "1"; - String secondIndex = INDEX_NAME + "2"; - - createIndex(firstIndex, Settings.builder().put(remoteStoreIndexSettings(1, 1)).build()); - ensureGreen(firstIndex); - indexSingleDoc(firstIndex, true); - - // Create second index - createIndex(secondIndex, Settings.builder().put(remoteStoreIndexSettings(1, 1)).build()); - ensureGreen(secondIndex); - indexSingleDoc(secondIndex, true); - - assertBusy(() -> assertNodeStatsParityAcrossNodes(firstIndex, secondIndex), 15, TimeUnit.SECONDS); - } - - /** - * Ensures that node stats shows 0 values for dedicated cluster manager nodes - * since cluster manager nodes does not participate in indexing - */ - public void testZeroRemoteStatsOnNodesStatsForClusterManager() { - createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); - ensureGreen(INDEX_NAME); - indexSingleDoc(INDEX_NAME); - refresh(INDEX_NAME); - NodesStatsResponse nodesStatsResponseForClusterManager = client().admin() - .cluster() - .prepareNodesStats(internalCluster().getClusterManagerName()) - .setIndices(new CommonStatsFlags().set(CommonStatsFlags.Flag.Segments, true)) - .get(); - assertTrue( - nodesStatsResponseForClusterManager.getNodes().get(0).getNode().isClusterManagerNode() - && !nodesStatsResponseForClusterManager.getNodes().get(0).getNode().isDataNode() - ); - assertZeroRemoteSegmentStats( - nodesStatsResponseForClusterManager.getNodes().get(0).getIndices().getSegments().getRemoteSegmentStats() - ); - NodesStatsResponse nodesStatsResponseForDataNode = client().admin() - .cluster() - .prepareNodesStats(primaryNodeName(INDEX_NAME)) - .setIndices(new CommonStatsFlags().set(CommonStatsFlags.Flag.Segments, true)) - .get(); - assertTrue(nodesStatsResponseForDataNode.getNodes().get(0).getNode().isDataNode()); - RemoteSegmentStats remoteSegmentStats = nodesStatsResponseForDataNode.getNodes() - .get(0) - .getIndices() - .getSegments() - .getRemoteSegmentStats(); - assertTrue(remoteSegmentStats.getUploadBytesStarted() > 0); - assertTrue(remoteSegmentStats.getUploadBytesSucceeded() > 0); - } - - private void assertZeroRemoteSegmentStats(RemoteSegmentStats remoteSegmentStats) { - assertEquals(0, remoteSegmentStats.getUploadBytesStarted()); - assertEquals(0, remoteSegmentStats.getUploadBytesSucceeded()); - assertEquals(0, remoteSegmentStats.getUploadBytesFailed()); - assertEquals(0, remoteSegmentStats.getDownloadBytesStarted()); - assertEquals(0, remoteSegmentStats.getDownloadBytesSucceeded()); - assertEquals(0, remoteSegmentStats.getDownloadBytesFailed()); - assertEquals(0, remoteSegmentStats.getTotalRefreshBytesLag()); - assertEquals(0, remoteSegmentStats.getMaxRefreshBytesLag()); - assertEquals(0, remoteSegmentStats.getMaxRefreshTimeLag()); - assertEquals(0, remoteSegmentStats.getTotalUploadTime()); - assertEquals(0, remoteSegmentStats.getTotalDownloadTime()); - } - - private static void assertNodeStatsParityAcrossNodes(String firstIndex, String secondIndex) { - for (String dataNode : internalCluster().getDataNodeNames()) { - long cumulativeUploadsSucceeded = 0, cumulativeUploadsStarted = 0, cumulativeUploadsFailed = 0; - long cumulativeDownloadsSucceeded = 0, cumulativeDownloadsStarted = 0, cumulativeDownloadsFailed = 0; - long totalBytesLag = 0, maxBytesLag = 0, maxTimeLag = 0; - long totalUploadTime = 0, totalDownloadTime = 0; - // Fetch upload stats - RemoteStoreStatsResponse remoteStoreStatsFirstIndex = client(dataNode).admin() - .cluster() - .prepareRemoteStoreStats(firstIndex, "0") - .setLocal(true) - .get(); - cumulativeUploadsSucceeded += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesSucceeded; - cumulativeUploadsStarted += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesStarted; - cumulativeUploadsFailed += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesFailed; - cumulativeDownloadsSucceeded += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0] - .getSegmentStats().directoryFileTransferTrackerStats.transferredBytesSucceeded; - cumulativeDownloadsStarted += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0] - .getSegmentStats().directoryFileTransferTrackerStats.transferredBytesStarted; - cumulativeDownloadsFailed += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0] - .getSegmentStats().directoryFileTransferTrackerStats.transferredBytesFailed; - totalBytesLag += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag; - maxBytesLag = Math.max(maxBytesLag, remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag); - maxTimeLag = Math.max(maxTimeLag, remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().refreshTimeLagMs); - totalUploadTime += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().totalUploadTimeInMs; - totalDownloadTime += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0] - .getSegmentStats().directoryFileTransferTrackerStats.totalTransferTimeInMs; - - RemoteStoreStatsResponse remoteStoreStatsSecondIndex = client(dataNode).admin() - .cluster() - .prepareRemoteStoreStats(secondIndex, "0") - .setLocal(true) - .get(); - cumulativeUploadsSucceeded += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesSucceeded; - cumulativeUploadsStarted += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesStarted; - cumulativeUploadsFailed += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesFailed; - cumulativeDownloadsSucceeded += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0] - .getSegmentStats().directoryFileTransferTrackerStats.transferredBytesSucceeded; - cumulativeDownloadsStarted += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0] - .getSegmentStats().directoryFileTransferTrackerStats.transferredBytesStarted; - cumulativeDownloadsFailed += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0] - .getSegmentStats().directoryFileTransferTrackerStats.transferredBytesFailed; - totalBytesLag += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag; - maxBytesLag = Math.max(maxBytesLag, remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag); - maxTimeLag = Math.max(maxTimeLag, remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().refreshTimeLagMs); - totalUploadTime += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().totalUploadTimeInMs; - totalDownloadTime += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0] - .getSegmentStats().directoryFileTransferTrackerStats.totalTransferTimeInMs; - - // Fetch nodes stats - NodesStatsResponse nodesStatsResponse = client().admin() - .cluster() - .prepareNodesStats(dataNode) - .setIndices(new CommonStatsFlags().set(CommonStatsFlags.Flag.Segments, true)) - .get(); - RemoteSegmentStats remoteSegmentStats = nodesStatsResponse.getNodes().get(0).getIndices().getSegments().getRemoteSegmentStats(); - assertEquals(cumulativeUploadsSucceeded, remoteSegmentStats.getUploadBytesSucceeded()); - assertEquals(cumulativeUploadsStarted, remoteSegmentStats.getUploadBytesStarted()); - assertEquals(cumulativeUploadsFailed, remoteSegmentStats.getUploadBytesFailed()); - assertEquals(cumulativeDownloadsSucceeded, remoteSegmentStats.getDownloadBytesSucceeded()); - assertEquals(cumulativeDownloadsStarted, remoteSegmentStats.getDownloadBytesStarted()); - assertEquals(cumulativeDownloadsFailed, remoteSegmentStats.getDownloadBytesFailed()); - assertEquals(totalBytesLag, remoteSegmentStats.getTotalRefreshBytesLag()); - assertEquals(maxBytesLag, remoteSegmentStats.getMaxRefreshBytesLag()); - assertEquals(maxTimeLag, remoteSegmentStats.getMaxRefreshTimeLag()); - // Ensure that total upload time has non-zero value if there has been segments uploaded from the node - if (cumulativeUploadsStarted > 0) { - assertTrue(totalUploadTime > 0); - } - assertEquals(totalUploadTime, remoteSegmentStats.getTotalUploadTime()); - // Ensure that total download time has non-zero value if there has been segments downloaded to the node - if (cumulativeDownloadsStarted > 0) { - assertTrue(totalDownloadTime > 0); - } - assertEquals(totalDownloadTime, remoteSegmentStats.getTotalDownloadTime()); - } - } -} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index 7173fda89505c..9a684ce0a1482 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -22,7 +22,6 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; @@ -48,7 +47,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; -import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; @@ -135,15 +134,6 @@ protected Settings nodeSettings(int nodeOrdinal) { } } - @Override - protected Settings featureFlagSettings() { - return Settings.builder() - .put(super.featureFlagSettings()) - .put(FeatureFlags.REMOTE_STORE, "true") - .put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, "true") - .build(); - } - public Settings indexSettings() { return defaultIndexSettings(); } @@ -185,13 +175,7 @@ public static Settings remoteStoreClusterSettings( Path translogRepoPath ) { Settings.Builder settingsBuilder = Settings.builder(); - - if (randomBoolean()) { - settingsBuilder.put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT); - } - settingsBuilder.put(buildRemoteStoreNodeAttributes(segmentRepoName, segmentRepoPath, translogRepoName, translogRepoPath, false)); - return settingsBuilder.build(); } @@ -222,6 +206,16 @@ public static Settings buildRemoteStoreNodeAttributes( "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, translogRepoName ); + String stateRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + segmentRepoName + ); + String stateRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + segmentRepoName + ); Settings.Builder settings = Settings.builder() .put("node.attr." + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName) @@ -229,7 +223,10 @@ public static Settings buildRemoteStoreNodeAttributes( .put(segmentRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath) .put("node.attr." + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, translogRepoName) .put(translogRepoTypeAttributeKey, FsRepository.TYPE) - .put(translogRepoSettingsAttributeKeyPrefix + "location", translogRepoPath); + .put(translogRepoSettingsAttributeKeyPrefix + "location", translogRepoPath) + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName) + .put(stateRepoTypeAttributeKey, FsRepository.TYPE) + .put(stateRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath); if (withRateLimiterAttributes) { settings.put(segmentRepoSettingsAttributeKeyPrefix + "compress", randomBoolean()) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java new file mode 100644 index 0000000000000..5e92bb195680b --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java @@ -0,0 +1,288 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreResponse; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.common.settings.Settings; +import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.io.IOException; +import java.nio.file.Files; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ExecutionException; + +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; +import static org.opensearch.indices.ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE; +import static org.opensearch.indices.ShardLimitValidator.SETTING_MAX_SHARDS_PER_CLUSTER_KEY; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteStoreClusterStateRestoreIT extends BaseRemoteStoreRestoreIT { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true).build(); + } + + private void addNewNodes(int dataNodeCount, int clusterManagerNodeCount) { + internalCluster().startNodes(dataNodeCount + clusterManagerNodeCount); + } + + private Map initialTestSetup(int shardCount, int replicaCount, int dataNodeCount, int clusterManagerNodeCount) { + prepareCluster(clusterManagerNodeCount, dataNodeCount, INDEX_NAME, replicaCount, shardCount); + Map indexStats = indexData(1, false, INDEX_NAME); + assertEquals(shardCount * (replicaCount + 1), getNumShards(INDEX_NAME).totalNumShards); + ensureGreen(INDEX_NAME); + return indexStats; + } + + private void resetCluster(int dataNodeCount, int clusterManagerNodeCount) { + internalCluster().stopAllNodes(); + addNewNodes(dataNodeCount, clusterManagerNodeCount); + } + + private void restoreAndValidate(String clusterUUID, Map indexStats) throws Exception { + restoreAndValidate(clusterUUID, indexStats, true); + } + + private void restoreAndValidate(String clusterUUID, Map indexStats, boolean validate) throws Exception { + // TODO once auto restore is merged, the remote cluster state will be restored + + if (validate) { + // Step - 4 validation restore is successful. + ensureGreen(INDEX_NAME); + verifyRestoredData(indexStats, INDEX_NAME); + } + } + + private void restoreAndValidateFails( + String clusterUUID, + PlainActionFuture actionListener, + Class clazz, + String errorSubString + ) { + + try { + restoreAndValidate(clusterUUID, null, false); + } catch (Exception e) { + assertTrue( + String.format(Locale.ROOT, "%s %s", clazz, e), + clazz.isAssignableFrom(e.getClass()) + || clazz.isAssignableFrom(e.getCause().getClass()) + || (e.getCause().getCause() != null && clazz.isAssignableFrom(e.getCause().getCause().getClass())) + ); + assertTrue( + String.format(Locale.ROOT, "Error message mismatch. Expected: [%s]. Actual: [%s]", errorSubString, e.getMessage()), + e.getMessage().contains(errorSubString) + ); + } + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9834") + public void testFullClusterRestore() throws Exception { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 1; + + // Step - 1 index some data to generate files in remote directory + Map indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, 1); + String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + + // Step - 2 Replace all nodes in the cluster with new nodes. This ensures new cluster state doesn't have previous index metadata + resetCluster(dataNodeCount, clusterManagerNodeCount); + + String newClusterUUID = clusterService().state().metadata().clusterUUID(); + assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; + + // Step - 3 Trigger full cluster restore and validate + restoreAndValidate(prevClusterUUID, indexStats); + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9834") + public void testFullClusterRestoreMultipleIndices() throws Exception { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 1; + + // Step - 1 index some data to generate files in remote directory + Map indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, clusterManagerNodeCount); + + String secondIndexName = INDEX_NAME + "-2"; + createIndex(secondIndexName, remoteStoreIndexSettings(replicaCount, shardCount + 1)); + Map indexStats2 = indexData(1, false, secondIndexName); + assertEquals((shardCount + 1) * (replicaCount + 1), getNumShards(secondIndexName).totalNumShards); + ensureGreen(secondIndexName); + + String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + + // Step - 2 Replace all nodes in the cluster with new nodes. This ensures new cluster state doesn't have previous index metadata + resetCluster(dataNodeCount, clusterManagerNodeCount); + + String newClusterUUID = clusterService().state().metadata().clusterUUID(); + assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; + + // Step - 3 Trigger full cluster restore + restoreAndValidate(prevClusterUUID, indexStats); + ensureGreen(secondIndexName); + verifyRestoredData(indexStats2, secondIndexName); + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9834") + public void testFullClusterRestoreFailureValidationFailures() throws Exception { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 1; + + // index some data to generate files in remote directory + Map indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, clusterManagerNodeCount); + String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + + // Start of Test - 1 + // Test - 1 Trigger full cluster restore and validate it fails due to incorrect cluster UUID + PlainActionFuture future = PlainActionFuture.newFuture(); + restoreAndValidateFails("randomUUID", future, IllegalStateException.class, "Remote Cluster State not found - randomUUID"); + // End of Test - 1 + + // Start of Test - 3 + // Test - 2 Trigger full cluster restore and validate it fails due to cluster UUID same as current cluster UUID + future = PlainActionFuture.newFuture(); + restoreAndValidateFails( + clusterService().state().metadata().clusterUUID(), + future, + IllegalArgumentException.class, + "clusterUUID to restore from should be different from current cluster UUID" + ); + // End of Test - 2 + + // Start of Test - 3 + // Step - 2 Replace all nodes in the cluster with new nodes. This ensures new cluster state doesn't have previous index metadata + // Restarting cluster with just 1 data node helps with applying cluster settings + resetCluster(1, clusterManagerNodeCount); + String newClusterUUID = clusterService().state().metadata().clusterUUID(); + assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; + + reduceShardLimits(1, 1); + + // Step - 4 Trigger full cluster restore and validate it fails + future = PlainActionFuture.newFuture(); + restoreAndValidateFails( + prevClusterUUID, + future, + IllegalArgumentException.class, + "this action would add [2] total shards, but this cluster currently has [0]/[1] maximum shards open" + ); + resetShardLimits(); + // End of Test - 3 + + // Start of Test - 4 + // Test -4 Reset cluster and trigger full restore with same name index in the cluster + // Test -4 Add required nodes for this test after last reset. + addNewNodes(dataNodeCount - 1, 0); + + newClusterUUID = clusterService().state().metadata().clusterUUID(); + assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; + + // Test -4 Step - 2 Create a new index with same name + createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 1)); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + + future = PlainActionFuture.newFuture(); + + // Test -4 Step - 3 Trigger full cluster restore and validate fails + restoreAndValidateFails( + prevClusterUUID, + future, + IllegalStateException.class, + "cannot restore index [remote-store-test-idx-1] because an open index with same name/uuid already exists in the cluster" + ); + + // Test -4 Step - 4 validation restore is successful. + ensureGreen(INDEX_NAME); + // End of Test - 4 + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9834") + public void testFullClusterRestoreManifestFilePointsToInvalidIndexMetadataPathThrowsException() throws Exception { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 1; + + // Step - 1 index some data to generate files in remote directory + initialTestSetup(shardCount, replicaCount, dataNodeCount, clusterManagerNodeCount); + + String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + + // Step - 2 Replace all nodes in the cluster with new nodes. This ensures new cluster state doesn't have previous index metadata + resetCluster(dataNodeCount, clusterManagerNodeCount); + + String newClusterUUID = clusterService().state().metadata().clusterUUID(); + assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; + + // Step - 4 Delete index metadata file in remote + try { + Files.move( + segmentRepoPath.resolve( + RemoteClusterStateService.encodeString(clusterService().state().getClusterName().value()) + + "/cluster-state/" + + prevClusterUUID + + "/index" + ), + segmentRepoPath.resolve("cluster-state/") + ); + } catch (IOException e) { + throw new RuntimeException(e); + } + + // Step - 5 Trigger full cluster restore and validate fails + PlainActionFuture future = PlainActionFuture.newFuture(); + restoreAndValidateFails(prevClusterUUID, future, IllegalStateException.class, "asdsa"); + } + + private void reduceShardLimits(int maxShardsPerNode, int maxShardsPerCluster) { + // Step 3 - Reduce shard limits to hit shard limit with less no of shards + try { + client().admin() + .cluster() + .updateSettings( + new ClusterUpdateSettingsRequest().transientSettings( + Settings.builder() + .put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), maxShardsPerNode) + .put(SETTING_MAX_SHARDS_PER_CLUSTER_KEY, maxShardsPerCluster) + ) + ) + .get(); + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException(e); + } + } + + private void resetShardLimits() { + // Step - 5 Reset the cluster settings + ClusterUpdateSettingsRequest resetRequest = new ClusterUpdateSettingsRequest(); + resetRequest.transientSettings( + Settings.builder().putNull(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey()).putNull(SETTING_MAX_SHARDS_PER_CLUSTER_KEY) + ); + + try { + client().admin().cluster().updateSettings(resetRequest).get(); + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException(e); + } + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java index b84b9e38c63a3..489f4c52d4298 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java @@ -10,20 +10,14 @@ import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreResponse; -import org.opensearch.action.index.IndexResponse; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.health.ClusterHealthStatus; -import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.plugins.Plugin; import org.opensearch.repositories.RepositoriesService; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.transport.MockTransportService; import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; import java.util.HashMap; import java.util.Map; import java.util.concurrent.ExecutionException; @@ -33,79 +27,8 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.greaterThan; -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) -public class RemoteStoreRestoreIT extends RemoteStoreBaseIntegTestCase { - private static final String INDEX_NAME = "remote-store-test-idx-1"; - private static final String INDEX_NAMES = "test-remote-store-1,test-remote-store-2,remote-store-test-index-1,remote-store-test-index-2"; - private static final String INDEX_NAMES_WILDCARD = "test-remote-store-*,remote-store-test-index-*"; - private static final String TOTAL_OPERATIONS = "total-operations"; - private static final String MAX_SEQ_NO_TOTAL = "max-seq-no-total"; - - @Override - public Settings indexSettings() { - return remoteStoreIndexSettings(0); - } - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class); - } - - private void restore(String... indices) { - boolean restoreAllShards = randomBoolean(); - if (restoreAllShards) { - assertAcked(client().admin().indices().prepareClose(indices)); - } - client().admin() - .cluster() - .restoreRemoteStore( - new RestoreRemoteStoreRequest().indices(indices).restoreAllShards(restoreAllShards), - PlainActionFuture.newFuture() - ); - } - - private void verifyRestoredData(Map indexStats, String indexName) throws Exception { - ensureYellowAndNoInitializingShards(indexName); - ensureGreen(indexName); - // This is to ensure that shards that were already assigned will get latest count - refresh(indexName); - assertBusy( - () -> assertHitCount(client().prepareSearch(indexName).setSize(0).get(), indexStats.get(TOTAL_OPERATIONS)), - 30, - TimeUnit.SECONDS - ); - IndexResponse response = indexSingleDoc(indexName); - if (indexStats.containsKey(MAX_SEQ_NO_TOTAL + "-shard-" + response.getShardId().id())) { - assertEquals(indexStats.get(MAX_SEQ_NO_TOTAL + "-shard-" + response.getShardId().id()) + 1, response.getSeqNo()); - } - refresh(indexName); - assertBusy( - () -> assertHitCount(client().prepareSearch(indexName).setSize(0).get(), indexStats.get(TOTAL_OPERATIONS) + 1), - 30, - TimeUnit.SECONDS - ); - } - - private void prepareCluster(int numClusterManagerNodes, int numDataOnlyNodes, String indices, int replicaCount, int shardCount) { - prepareCluster(numClusterManagerNodes, numDataOnlyNodes, indices, replicaCount, shardCount, Settings.EMPTY); - } - - private void prepareCluster( - int numClusterManagerNodes, - int numDataOnlyNodes, - String indices, - int replicaCount, - int shardCount, - Settings clusterSettings - ) { - internalCluster().startClusterManagerOnlyNodes(numClusterManagerNodes, clusterSettings); - internalCluster().startDataOnlyNodes(numDataOnlyNodes, clusterSettings); - for (String index : indices.split(",")) { - createIndex(index, remoteStoreIndexSettings(replicaCount, shardCount)); - ensureYellowAndNoInitializingShards(index); - ensureGreen(index); - } - } +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 0) +public class RemoteStoreRestoreIT extends BaseRemoteStoreRestoreIT { /** * Simulates all data restored using Remote Translog Store. diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsFromNodesStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsFromNodesStatsIT.java new file mode 100644 index 0000000000000..6e796bdae5a4a --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsFromNodesStatsIT.java @@ -0,0 +1,209 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsResponse; +import org.opensearch.action.admin.indices.stats.CommonStatsFlags; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.remote.RemoteSegmentStats; +import org.opensearch.index.translog.RemoteTranslogStats; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Before; + +import java.util.concurrent.TimeUnit; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteStoreStatsFromNodesStatsIT extends RemoteStoreBaseIntegTestCase { + private static final String INDEX_NAME = "remote-index-1"; + private static final int DATA_NODE_COUNT = 2; + private static final int CLUSTER_MANAGER_NODE_COUNT = 3; + + @Before + public void setup() { + setupCustomCluster(); + } + + private void setupCustomCluster() { + internalCluster().startClusterManagerOnlyNodes(CLUSTER_MANAGER_NODE_COUNT); + internalCluster().startDataOnlyNodes(DATA_NODE_COUNT); + ensureStableCluster(DATA_NODE_COUNT + CLUSTER_MANAGER_NODE_COUNT); + } + + /** + * - Creates two indices with single primary shard, pinned to a single node. + * - Index documents in both of them and forces a fresh for both + * - Polls the _remotestore/stats API for individual index level stats + * - Adds up requisite fields from the API output, repeats this for the 2nd index + * - Polls _nodes/stats and verifies that the total values at node level adds up + * to the values capture in the previous step + */ + public void testNodesStatsParityWithOnlyPrimaryShards() { + String[] dataNodes = internalCluster().getDataNodeNames().toArray(String[]::new); + String randomDataNode = dataNodes[randomIntBetween(0, dataNodes.length - 1)]; + String firstIndex = INDEX_NAME + "1"; + String secondIndex = INDEX_NAME + "2"; + + // Create first index + createIndex( + firstIndex, + Settings.builder().put(remoteStoreIndexSettings(0, 1)).put("index.routing.allocation.require._name", randomDataNode).build() + ); + ensureGreen(firstIndex); + indexSingleDoc(firstIndex, true); + + // Create second index + createIndex( + secondIndex, + Settings.builder().put(remoteStoreIndexSettings(0, 1)).put("index.routing.allocation.require._name", randomDataNode).build() + ); + ensureGreen(secondIndex); + indexSingleDoc(secondIndex, true); + + assertNodeStatsParityOnNode(randomDataNode, firstIndex, secondIndex); + } + + /** + * - Creates two indices with single primary shard and single replica + * - Index documents in both of them and forces a fresh for both + * - Polls the _remotestore/stats API for individual index level stats + * - Adds up requisite fields from the API output for both indices + * - Polls _nodes/stats and verifies that the total values at node level adds up + * to the values capture in the previous step + * - Repeats the above 3 steps for the second node + */ + public void testNodesStatsParityWithReplicaShards() throws Exception { + String firstIndex = INDEX_NAME + "1"; + String secondIndex = INDEX_NAME + "2"; + + createIndex(firstIndex, Settings.builder().put(remoteStoreIndexSettings(1, 1)).build()); + ensureGreen(firstIndex); + indexSingleDoc(firstIndex, true); + + // Create second index + createIndex(secondIndex, Settings.builder().put(remoteStoreIndexSettings(1, 1)).build()); + ensureGreen(secondIndex); + indexSingleDoc(secondIndex, true); + + assertBusy(() -> assertNodeStatsParityAcrossNodes(firstIndex, secondIndex), 15, TimeUnit.SECONDS); + } + + /** + * Ensures that node stats shows 0 values for dedicated cluster manager nodes + * since cluster manager nodes does not participate in indexing + */ + public void testZeroRemoteStatsOnNodesStatsForClusterManager() { + createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); + ensureGreen(INDEX_NAME); + indexSingleDoc(INDEX_NAME); + refresh(INDEX_NAME); + + NodesStatsResponse nodesStatsResponseForClusterManager = client().admin() + .cluster() + .prepareNodesStats(internalCluster().getClusterManagerName()) + .setIndices(new CommonStatsFlags().set(CommonStatsFlags.Flag.Segments, true).set(CommonStatsFlags.Flag.Translog, true)) + .get(); + + assertTrue( + nodesStatsResponseForClusterManager.getNodes().get(0).getNode().isClusterManagerNode() + && !nodesStatsResponseForClusterManager.getNodes().get(0).getNode().isDataNode() + ); + assertZeroRemoteSegmentStats( + nodesStatsResponseForClusterManager.getNodes().get(0).getIndices().getSegments().getRemoteSegmentStats() + ); + assertZeroRemoteTranslogStats( + nodesStatsResponseForClusterManager.getNodes().get(0).getIndices().getTranslog().getRemoteTranslogStats() + ); + + NodesStatsResponse nodesStatsResponseForDataNode = client().admin() + .cluster() + .prepareNodesStats(primaryNodeName(INDEX_NAME)) + .setIndices(new CommonStatsFlags().set(CommonStatsFlags.Flag.Segments, true).set(CommonStatsFlags.Flag.Translog, true)) + .get(); + + assertTrue(nodesStatsResponseForDataNode.getNodes().get(0).getNode().isDataNode()); + RemoteSegmentStats remoteSegmentStats = nodesStatsResponseForDataNode.getNodes() + .get(0) + .getIndices() + .getSegments() + .getRemoteSegmentStats(); + assertTrue(remoteSegmentStats.getUploadBytesStarted() > 0); + assertTrue(remoteSegmentStats.getUploadBytesSucceeded() > 0); + + RemoteTranslogStats remoteTranslogStats = nodesStatsResponseForDataNode.getNodes() + .get(0) + .getIndices() + .getTranslog() + .getRemoteTranslogStats(); + assertTrue(remoteTranslogStats.getUploadBytesStarted() > 0); + assertTrue(remoteTranslogStats.getUploadBytesSucceeded() > 0); + } + + private void assertZeroRemoteSegmentStats(RemoteSegmentStats remoteSegmentStats) { + // Compare with fresh object because all values default to 0 in default fresh object + assertEquals(new RemoteSegmentStats(), remoteSegmentStats); + } + + private void assertZeroRemoteTranslogStats(RemoteTranslogStats remoteTranslogStats) { + // Compare with fresh object because all values default to 0 in default fresh object + assertEquals(new RemoteTranslogStats(), remoteTranslogStats); + } + + private static void assertNodeStatsParityAcrossNodes(String... indices) { + for (String dataNode : internalCluster().getDataNodeNames()) { + assertNodeStatsParityOnNode(dataNode, indices); + } + } + + private static void assertNodeStatsParityOnNode(String dataNode, String... indices) { + RemoteSegmentStats remoteSegmentStatsCumulative = new RemoteSegmentStats(); + RemoteTranslogStats remoteTranslogStatsCumulative = new RemoteTranslogStats(); + for (String index : indices) { + // Fetch _remotestore/stats + RemoteStoreStatsResponse remoteStoreStats = client(dataNode).admin() + .cluster() + .prepareRemoteStoreStats(index, "0") + .setLocal(true) + .get(); + remoteSegmentStatsCumulative.add(new RemoteSegmentStats(remoteStoreStats.getRemoteStoreStats()[0].getSegmentStats())); + remoteTranslogStatsCumulative.add(new RemoteTranslogStats(remoteStoreStats.getRemoteStoreStats()[0].getTranslogStats())); + } + + // Fetch _nodes/stats + NodesStatsResponse nodesStatsResponse = client().admin() + .cluster() + .prepareNodesStats(dataNode) + .setIndices(new CommonStatsFlags().set(CommonStatsFlags.Flag.Segments, true).set(CommonStatsFlags.Flag.Translog, true)) + .get(); + + // assert segment stats + RemoteSegmentStats remoteSegmentStatsFromNodesStats = nodesStatsResponse.getNodes() + .get(0) + .getIndices() + .getSegments() + .getRemoteSegmentStats(); + assertEquals(remoteSegmentStatsCumulative, remoteSegmentStatsFromNodesStats); + // Ensure that total upload time has non-zero value if there has been segments uploaded from the node + if (remoteSegmentStatsCumulative.getUploadBytesStarted() > 0) { + assertTrue(remoteSegmentStatsCumulative.getTotalUploadTime() > 0); + } + // Ensure that total download time has non-zero value if there has been segments downloaded to the node + if (remoteSegmentStatsCumulative.getDownloadBytesStarted() > 0) { + assertTrue(remoteSegmentStatsCumulative.getTotalDownloadTime() > 0); + } + + // assert translog stats + RemoteTranslogStats remoteTranslogStatsFromNodesStats = nodesStatsResponse.getNodes() + .get(0) + .getIndices() + .getTranslog() + .getRemoteTranslogStats(); + assertEquals(remoteTranslogStatsCumulative, remoteTranslogStatsFromNodesStats); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java index 1b817408596ab..45c3ef7f5bae5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java @@ -9,7 +9,6 @@ package org.opensearch.remotestore; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.indices.replication.SegmentReplicationIT; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.After; @@ -22,9 +21,6 @@ /** * This class runs Segment Replication Integ test suite with remote store enabled. - * Setup is similar to SegmentReplicationRemoteStoreIT but this also enables the segment replication using remote store which - * is behind SEGMENT_REPLICATION_EXPERIMENTAL flag. After this is moved out of experimental, we can combine and keep only one - * test suite for Segment and Remote store integration tests. */ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class SegmentReplicationUsingRemoteStoreIT extends SegmentReplicationIT { @@ -47,15 +43,6 @@ protected boolean segmentReplicationWithRemoteEnabled() { return true; } - @Override - protected Settings featureFlagSettings() { - return Settings.builder() - .put(super.featureFlagSettings()) - .put(FeatureFlags.REMOTE_STORE, "true") - .put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, "true") - .build(); - } - @Before public void setup() { internalCluster().startClusterManagerOnlyNode(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java index fa0944e5bfee0..0da4d81a8871e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java @@ -9,7 +9,6 @@ package org.opensearch.remotestore; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.SegmentReplicationPressureIT; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.After; @@ -22,8 +21,6 @@ /** * This class executes the SegmentReplicationPressureIT suite with remote store integration enabled. - * Setup is similar to SegmentReplicationPressureIT but this also enables the segment replication using remote store which - * is behind SEGMENT_REPLICATION_EXPERIMENTAL flag. */ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class SegmentReplicationWithRemoteStorePressureIT extends SegmentReplicationPressureIT { @@ -36,15 +33,6 @@ protected boolean segmentReplicationWithRemoteEnabled() { return true; } - @Override - protected Settings featureFlagSettings() { - return Settings.builder() - .put(super.featureFlagSettings()) - .put(FeatureFlags.REMOTE_STORE, "true") - .put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, "true") - .build(); - } - @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java index 9ec8cee2685fe..066d82483ae91 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java @@ -44,7 +44,6 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; @@ -57,7 +56,6 @@ import org.opensearch.repositories.RepositoryShardId; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.snapshots.mockstore.MockRepository; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchIntegTestCase; import java.nio.file.Path; @@ -159,7 +157,6 @@ public void testCloneSnapshotIndex() throws Exception { public void testCloneShallowSnapshotIndex() throws Exception { disableRepoConsistencyCheck("This test uses remote store repository"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final String remoteStoreRepoName = "remote-store-repo-name"; final Path remoteStoreRepoPath = randomRepoPath(); internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName, remoteStoreRepoPath)); @@ -204,7 +201,6 @@ public void testCloneShallowSnapshotIndex() throws Exception { public void testShallowCloneNameAvailability() throws Exception { disableRepoConsistencyCheck("This test uses remote store repository"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final String remoteStoreRepoName = "remote-store-repo-name"; final Path remoteStorePath = randomRepoPath().toAbsolutePath(); internalCluster().startClusterManagerOnlyNode( @@ -245,7 +241,6 @@ public void testShallowCloneNameAvailability() throws Exception { public void testCloneAfterRepoShallowSettingEnabled() throws Exception { disableRepoConsistencyCheck("This test uses remote store repository"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final String remoteStoreRepoName = "remote-store-repo-name"; final Path remoteStoreRepoPath = randomRepoPath(); internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName, remoteStoreRepoPath)); @@ -280,7 +275,6 @@ public void testCloneAfterRepoShallowSettingEnabled() throws Exception { public void testCloneAfterRepoShallowSettingDisabled() throws Exception { disableRepoConsistencyCheck("This test uses remote store repository"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final String remoteStoreRepoName = "remote-store-repo-name"; final Path remoteStoreRepoPath = randomRepoPath(); internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName, remoteStoreRepoPath)); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java index 448b860683668..e79bf1c16b586 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java @@ -16,9 +16,7 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchIntegTestCase; import java.nio.file.Path; @@ -41,7 +39,6 @@ public class DeleteSnapshotIT extends AbstractSnapshotIntegTestCase { public void testDeleteSnapshot() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final Path remoteStoreRepoPath = randomRepoPath(); internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); @@ -69,7 +66,6 @@ public void testDeleteSnapshot() throws Exception { public void testDeleteShallowCopySnapshot() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final Path remoteStoreRepoPath = randomRepoPath(); internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); @@ -98,8 +94,6 @@ public void testDeleteShallowCopySnapshot() throws Exception { // Deleting multiple shallow copy snapshots as part of single delete call with repo having only shallow copy snapshots. public void testDeleteMultipleShallowCopySnapshotsCase1() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - final Path remoteStoreRepoPath = randomRepoPath(); internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); @@ -142,8 +136,6 @@ public void testDeleteMultipleShallowCopySnapshotsCase1() throws Exception { @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8610") public void testDeleteMultipleShallowCopySnapshotsCase2() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - final Path remoteStoreRepoPath = randomRepoPath(); internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); final String dataNode = internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); @@ -228,8 +220,6 @@ public void testDeleteMultipleShallowCopySnapshotsCase2() throws Exception { @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8610") public void testDeleteMultipleShallowCopySnapshotsCase3() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - final Path remoteStoreRepoPath = randomRepoPath(); internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); @@ -288,8 +278,6 @@ public void testDeleteMultipleShallowCopySnapshotsCase3() throws Exception { public void testRemoteStoreCleanupForDeletedIndex() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - final Path remoteStoreRepoPath = randomRepoPath(); internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java index 979df95547d06..8e2580aba1745 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java @@ -39,7 +39,6 @@ import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; import org.junit.Before; @@ -67,8 +66,6 @@ protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) .put(ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.getKey(), 0) // We have tests that check by-timestamp order - .put(FeatureFlags.REMOTE_STORE, "true") - .put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, "true") .put(remoteStoreClusterSettings(remoteStoreRepoName, absolutePath)) .build(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java index 7327f1127eab2..7117818451e14 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java @@ -45,7 +45,6 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.rest.RestStatus; @@ -82,11 +81,6 @@ import static org.hamcrest.Matchers.nullValue; public class RestoreSnapshotIT extends AbstractSnapshotIntegTestCase { - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(FeatureFlags.REMOTE_STORE, "true").build(); - } - public void testParallelRestoreOperations() { String indexName1 = "testindex1"; String indexName2 = "testindex2"; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java index 30394611ac48f..c574233d25051 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java @@ -47,7 +47,6 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.Strings; import org.opensearch.core.common.unit.ByteSizeUnit; @@ -77,7 +76,6 @@ protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) .put(ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.getKey(), 0) // We have tests that check by-timestamp order - .put(FeatureFlags.REMOTE_STORE, "true") .build(); } diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index e9fe10b15e817..46775466aa615 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -972,12 +972,8 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestCatAction(catActions)); registerHandler.accept(new RestDecommissionAction()); registerHandler.accept(new RestGetDecommissionStateAction()); - - // Remote Store APIs - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { - registerHandler.accept(new RestRemoteStoreStatsAction()); - registerHandler.accept(new RestRestoreRemoteStoreAction()); - } + registerHandler.accept(new RestRemoteStoreStatsAction()); + registerHandler.accept(new RestRestoreRemoteStoreAction()); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/crypto/CryptoSettings.java b/server/src/main/java/org/opensearch/action/admin/cluster/crypto/CryptoSettings.java index 0d289e3552cdf..bd783b349bed4 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/crypto/CryptoSettings.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/crypto/CryptoSettings.java @@ -31,7 +31,6 @@ * @opensearch.internal */ public class CryptoSettings implements Writeable, ToXContentObject { - private String keyProviderName; private String keyProviderType; private Settings settings = EMPTY_SETTINGS; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStats.java index 8f422bc11f876..f292fcec7ccac 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStats.java @@ -240,7 +240,7 @@ static final class RoutingFields { /** * Fields for remote store stats response */ - static final class UploadStatsFields { + public static final class UploadStatsFields { /** * Lag in terms of bytes b/w local and remote store */ @@ -294,12 +294,12 @@ static final class UploadStatsFields { /** * Count of files uploaded to remote store */ - static final String TOTAL_UPLOADS = "total_uploads"; + public static final String TOTAL_UPLOADS = "total_uploads"; /** * Represents the total uploads to remote store in bytes */ - static final String TOTAL_UPLOAD_SIZE = "total_upload_size"; + public static final String TOTAL_UPLOAD_SIZE = "total_upload_size"; /** * Total time spent on remote store uploads @@ -367,17 +367,17 @@ static final class DownloadStatsFields { /** * Reusable sub fields for {@link UploadStatsFields} and {@link DownloadStatsFields} */ - static final class SubFields { - static final String STARTED = "started"; - static final String SUCCEEDED = "succeeded"; - static final String FAILED = "failed"; + public static final class SubFields { + public static final String STARTED = "started"; + public static final String SUCCEEDED = "succeeded"; + public static final String FAILED = "failed"; - static final String STARTED_BYTES = "started_bytes"; - static final String SUCCEEDED_BYTES = "succeeded_bytes"; - static final String FAILED_BYTES = "failed_bytes"; + public static final String STARTED_BYTES = "started_bytes"; + public static final String SUCCEEDED_BYTES = "succeeded_bytes"; + public static final String FAILED_BYTES = "failed_bytes"; static final String DOWNLOAD = "download"; - static final String UPLOAD = "upload"; + public static final String UPLOAD = "upload"; /** * Moving avg over last N values stat diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index 6f58a532b48a1..256850da0d503 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -39,7 +39,6 @@ import org.opensearch.common.Nullable; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -152,7 +151,7 @@ public RestoreSnapshotRequest(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_2_7_0)) { storageType = in.readEnum(StorageType.class); } - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE) && in.getVersion().onOrAfter(Version.V_2_9_0)) { + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { sourceRemoteStoreRepository = in.readOptionalString(); } } @@ -176,7 +175,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_7_0)) { out.writeEnum(storageType); } - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE) && out.getVersion().onOrAfter(Version.V_2_9_0)) { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { out.writeOptionalString(sourceRemoteStoreRepository); } } @@ -616,11 +615,6 @@ public RestoreSnapshotRequest source(Map source) { } } else if (name.equals("source_remote_store_repository")) { - if (!FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { - throw new IllegalArgumentException( - "Unsupported parameter " + name + ". Please enable remote store feature flag for this experimental feature" - ); - } if (entry.getValue() instanceof String) { setSourceRemoteStoreRepository((String) entry.getValue()); } else { @@ -671,7 +665,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (storageType != null) { storageType.toXContent(builder); } - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE) && sourceRemoteStoreRepository != null) { + if (sourceRemoteStoreRepository != null) { builder.field("source_remote_store_repository", sourceRemoteStoreRepository); } builder.endObject(); @@ -701,48 +695,29 @@ public boolean equals(Object o) { && Objects.equals(indexSettings, that.indexSettings) && Arrays.equals(ignoreIndexSettings, that.ignoreIndexSettings) && Objects.equals(snapshotUuid, that.snapshotUuid) - && Objects.equals(storageType, that.storageType); - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { - equals = Objects.equals(sourceRemoteStoreRepository, that.sourceRemoteStoreRepository); - } + && Objects.equals(storageType, that.storageType) + && Objects.equals(sourceRemoteStoreRepository, that.sourceRemoteStoreRepository); return equals; } @Override public int hashCode() { int result; - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { - result = Objects.hash( - snapshot, - repository, - indicesOptions, - renamePattern, - renameReplacement, - waitForCompletion, - includeGlobalState, - partial, - includeAliases, - indexSettings, - snapshotUuid, - storageType, - sourceRemoteStoreRepository - ); - } else { - result = Objects.hash( - snapshot, - repository, - indicesOptions, - renamePattern, - renameReplacement, - waitForCompletion, - includeGlobalState, - partial, - includeAliases, - indexSettings, - snapshotUuid, - storageType - ); - } + result = Objects.hash( + snapshot, + repository, + indicesOptions, + renamePattern, + renameReplacement, + waitForCompletion, + includeGlobalState, + partial, + includeAliases, + indexSettings, + snapshotUuid, + storageType, + sourceRemoteStoreRepository + ); result = 31 * result + Arrays.hashCode(indices); result = 31 * result + Arrays.hashCode(ignoreIndexSettings); return result; diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java index efc21b2c03808..fddda0ef1f9a7 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java @@ -72,7 +72,6 @@ import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.action.ActionListener; @@ -539,7 +538,7 @@ protected Releasable checkPrimaryLimits(BulkShardRequest request, boolean rerout } // TODO - While removing remote store flag, this can be encapsulated to single class with common interface for backpressure // service - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE) && remoteStorePressureService.isSegmentsUploadBackpressureEnabled()) { + if (remoteStorePressureService.isSegmentsUploadBackpressureEnabled()) { remoteStorePressureService.validateSegmentsUploadLag(request.shardId()); } } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java index fa32618b96ef3..a339852e6ed8d 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java @@ -52,7 +52,7 @@ import java.util.Set; import static org.opensearch.cluster.coordination.Coordinator.ZEN1_BWC_TERM; -import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled; /** * The core class of the cluster state coordination algorithm, directly implementing the @@ -101,7 +101,7 @@ public CoordinationState( .getLastAcceptedState() .getLastAcceptedConfiguration(); this.publishVotes = new VoteCollection(); - this.isRemoteStateEnabled = REMOTE_CLUSTER_STATE_ENABLED_SETTING.get(settings); + this.isRemoteStateEnabled = isRemoteStoreClusterStateEnabled(settings); } public long getCurrentTerm() { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/CryptoMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/CryptoMetadata.java index bf55ef0837ea9..27803cb106005 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/CryptoMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/CryptoMetadata.java @@ -27,6 +27,10 @@ * @opensearch.internal */ public class CryptoMetadata implements Writeable { + static final public String CRYPTO_METADATA_KEY = "crypto_metadata"; + static final public String KEY_PROVIDER_NAME_KEY = "key_provider_name"; + static final public String KEY_PROVIDER_TYPE_KEY = "key_provider_type"; + static final public String SETTINGS_KEY = "settings"; private final String keyProviderName; private final String keyProviderType; private final Settings settings; @@ -104,17 +108,17 @@ public static CryptoMetadata fromXContent(XContentParser parser) throws IOExcept while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { String currentFieldName = parser.currentName(); - if ("key_provider_name".equals(currentFieldName)) { + if (KEY_PROVIDER_NAME_KEY.equals(currentFieldName)) { if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { throw new OpenSearchParseException("failed to parse crypto metadata [{}], unknown type"); } keyProviderName = parser.text(); - } else if ("key_provider_type".equals(currentFieldName)) { + } else if (KEY_PROVIDER_TYPE_KEY.equals(currentFieldName)) { if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { throw new OpenSearchParseException("failed to parse crypto metadata [{}], unknown type"); } keyProviderType = parser.text(); - } else if ("settings".equals(currentFieldName)) { + } else if (SETTINGS_KEY.equals(currentFieldName)) { if (parser.nextToken() != XContentParser.Token.START_OBJECT) { throw new OpenSearchParseException("failed to parse crypto metadata [{}], unknown type"); } @@ -130,10 +134,10 @@ public static CryptoMetadata fromXContent(XContentParser parser) throws IOExcept } public void toXContent(CryptoMetadata cryptoMetadata, XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startObject("crypto_metadata"); - builder.field("key_provider_name", cryptoMetadata.keyProviderName()); - builder.field("key_provider_type", cryptoMetadata.keyProviderType()); - builder.startObject("settings"); + builder.startObject(CRYPTO_METADATA_KEY); + builder.field(KEY_PROVIDER_NAME_KEY, cryptoMetadata.keyProviderName()); + builder.field(KEY_PROVIDER_TYPE_KEY, cryptoMetadata.keyProviderType()); + builder.startObject(SETTINGS_KEY); cryptoMetadata.settings().toXContent(builder, params); builder.endObject(); builder.endObject(); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java index 3cde53c3ae74b..1ab402e1bde4e 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java @@ -232,7 +232,7 @@ public static RepositoriesMetadata fromXContent(XContentParser parser) throws IO throw new OpenSearchParseException("failed to parse repository [{}], unknown type", name); } pendingGeneration = parser.longValue(); - } else if ("crypto_metadata".equals(currentFieldName)) { + } else if (CryptoMetadata.CRYPTO_METADATA_KEY.equals(currentFieldName)) { if (parser.nextToken() != XContentParser.Token.START_OBJECT) { throw new OpenSearchParseException("failed to parse repository [{}], unknown type", name); } diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java index fd70d319780c8..b12698c8a320e 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java @@ -454,7 +454,7 @@ public Builder initializeAsRemoteStoreRestore( IndexMetadata indexMetadata, RemoteStoreRecoverySource recoverySource, Map indexShardRoutingTableMap, - boolean restoreAllShards + boolean forceRecoverAllPrimaries ) { final UnassignedInfo unassignedInfo = new UnassignedInfo( UnassignedInfo.Reason.EXISTING_INDEX_RESTORED, @@ -466,21 +466,20 @@ public Builder initializeAsRemoteStoreRestore( } for (int shardNumber = 0; shardNumber < indexMetadata.getNumberOfShards(); shardNumber++) { ShardId shardId = new ShardId(index, shardNumber); - if (indexShardRoutingTableMap.containsKey(shardId) == false) { + if (forceRecoverAllPrimaries == false && indexShardRoutingTableMap.containsKey(shardId) == false) { throw new IllegalStateException("IndexShardRoutingTable is not present for shardId: " + shardId); } IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); IndexShardRoutingTable indexShardRoutingTable = indexShardRoutingTableMap.get(shardId); - if (restoreAllShards || indexShardRoutingTable.primaryShard().unassigned()) { + if (forceRecoverAllPrimaries || indexShardRoutingTable == null || indexShardRoutingTable.primaryShard().unassigned()) { // Primary shard to be recovered from remote store. indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(shardId, true, recoverySource, unassignedInfo)); // All the replica shards to be recovered from peer recovery. - indexShardRoutingTable.replicaShards() - .forEach( - shardRouting -> indexShardRoutingBuilder.addShard( - ShardRouting.newUnassigned(shardId, false, PeerRecoverySource.INSTANCE, unassignedInfo) - ) + for (int replicaNumber = 0; replicaNumber < indexMetadata.getNumberOfReplicas(); replicaNumber++) { + indexShardRoutingBuilder.addShard( + ShardRouting.newUnassigned(shardId, false, PeerRecoverySource.INSTANCE, unassignedInfo) ); + } } else { // Primary is either active or initializing. Do not trigger restore. indexShardRoutingBuilder.addShard(indexShardRoutingTable.primaryShard()); diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java index bcfc324b202b9..2b56163f852e8 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java @@ -576,10 +576,10 @@ public Builder addAsRemoteStoreRestore( IndexMetadata indexMetadata, RemoteStoreRecoverySource recoverySource, Map indexShardRoutingTableMap, - boolean restoreAllShards + boolean forceRecoveryPrimary ) { IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetadata.getIndex()) - .initializeAsRemoteStoreRestore(indexMetadata, recoverySource, indexShardRoutingTableMap, restoreAllShards); + .initializeAsRemoteStoreRestore(indexMetadata, recoverySource, indexShardRoutingTableMap, forceRecoveryPrimary); add(indexRoutingBuilder); return this; } diff --git a/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java index 5365b5a9d8bdf..4d2d69e473438 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java +++ b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java @@ -9,9 +9,9 @@ package org.opensearch.common.blobstore; import org.opensearch.cluster.metadata.CryptoMetadata; -import org.opensearch.crypto.CryptoManagerRegistry; +import org.opensearch.common.crypto.CryptoHandler; +import org.opensearch.crypto.CryptoHandlerRegistry; import org.opensearch.crypto.CryptoRegistryException; -import org.opensearch.encryption.CryptoManager; import java.io.IOException; import java.util.Map; @@ -25,7 +25,7 @@ public class EncryptedBlobStore implements BlobStore { private final BlobStore blobStore; - private final CryptoManager cryptoManager; + private final CryptoHandler cryptoHandler; /** * Constructs an EncryptedBlobStore that wraps the provided BlobStore with encryption capabilities based on the @@ -36,17 +36,16 @@ public class EncryptedBlobStore implements BlobStore { * @throws CryptoRegistryException If the CryptoManager is not found during encrypted BlobStore creation. */ public EncryptedBlobStore(BlobStore blobStore, CryptoMetadata cryptoMetadata) { - CryptoManagerRegistry cryptoManagerRegistry = CryptoManagerRegistry.getInstance(); - assert cryptoManagerRegistry != null : "CryptoManagerRegistry is not initialized"; - this.cryptoManager = cryptoManagerRegistry.fetchCryptoManager(cryptoMetadata); - if (cryptoManager == null) { + CryptoHandlerRegistry cryptoHandlerRegistry = CryptoHandlerRegistry.getInstance(); + assert cryptoHandlerRegistry != null : "CryptoManagerRegistry is not initialized"; + this.cryptoHandler = cryptoHandlerRegistry.fetchCryptoHandler(cryptoMetadata); + if (cryptoHandler == null) { throw new CryptoRegistryException( cryptoMetadata.keyProviderName(), cryptoMetadata.keyProviderType(), "Crypto manager not found during encrypted blob store creation." ); } - this.cryptoManager.incRef(); this.blobStore = blobStore; } @@ -61,12 +60,9 @@ public EncryptedBlobStore(BlobStore blobStore, CryptoMetadata cryptoMetadata) { public BlobContainer blobContainer(BlobPath path) { BlobContainer blobContainer = blobStore.blobContainer(path); if (blobContainer instanceof AsyncMultiStreamBlobContainer) { - return new AsyncMultiStreamEncryptedBlobContainer<>( - (AsyncMultiStreamBlobContainer) blobContainer, - cryptoManager.getCryptoProvider() - ); + return new AsyncMultiStreamEncryptedBlobContainer<>((AsyncMultiStreamBlobContainer) blobContainer, cryptoHandler); } - return new EncryptedBlobContainer<>(blobContainer, cryptoManager.getCryptoProvider()); + return new EncryptedBlobContainer<>(blobContainer, cryptoHandler); } /** @@ -87,7 +83,7 @@ public Map stats() { */ @Override public void close() throws IOException { - cryptoManager.decRef(); + cryptoHandler.close(); blobStore.close(); } diff --git a/server/src/main/java/org/opensearch/common/blobstore/transfer/RemoteTransferContainer.java b/server/src/main/java/org/opensearch/common/blobstore/transfer/RemoteTransferContainer.java index c7cfef5c5ce3d..5808f51f01efc 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/transfer/RemoteTransferContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/transfer/RemoteTransferContainer.java @@ -10,7 +10,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.CorruptIndexException; +import org.apache.lucene.store.IndexInput; import org.opensearch.common.CheckedTriFunction; import org.opensearch.common.SetOnce; import org.opensearch.common.StreamContext; @@ -19,11 +21,13 @@ import org.opensearch.common.blobstore.transfer.stream.OffsetRangeInputStream; import org.opensearch.common.blobstore.transfer.stream.ResettableCheckedInputStream; import org.opensearch.common.io.InputStreamContainer; +import org.opensearch.common.util.ByteUtils; import java.io.Closeable; import java.io.IOException; import java.io.InputStream; import java.util.Objects; +import java.util.zip.CRC32; import com.jcraft.jzlib.JZlib; @@ -244,4 +248,17 @@ public void close() throws IOException { throw new IOException("Closure of some of the multi-part streams failed."); } } + + /** + * Compute final checksum for IndexInput container checksum footer added by {@link CodecUtil} + * @param indexInput IndexInput with checksum in footer + * @param checksumBytesLength length of checksum bytes + * @return final computed checksum of entire indexInput + */ + public static long checksumOfChecksum(IndexInput indexInput, int checksumBytesLength) throws IOException { + long storedChecksum = CodecUtil.retrieveChecksum(indexInput); + CRC32 checksumOfChecksum = new CRC32(); + checksumOfChecksum.update(ByteUtils.toByteArrayBE(storedChecksum)); + return JZlib.crc32_combine(storedChecksum, checksumOfChecksum.getValue(), checksumBytesLength); + } } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index b8fe322234140..74224d66400da 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -670,8 +670,8 @@ public void apply(Settings value, Settings current, Settings previous) { // Remote cluster state settings RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING, - RemoteClusterStateService.REMOTE_CLUSTER_STATE_REPOSITORY_SETTING, - RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING + RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING, + IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING ) ) ); @@ -684,8 +684,6 @@ public void apply(Settings value, Settings current, Settings previous) { * setting should be moved to {@link #BUILT_IN_CLUSTER_SETTINGS}. */ public static final Map, List> FEATURE_FLAGGED_CLUSTER_SETTINGS = Map.of( - List.of(FeatureFlags.REMOTE_STORE), - List.of(IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING), List.of(FeatureFlags.CONCURRENT_SEGMENT_SEARCH), List.of( SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING, diff --git a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java index e109d2a871cef..90abc0a0765c1 100644 --- a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java @@ -36,7 +36,6 @@ protected FeatureFlagSettings( new HashSet<>( Arrays.asList( FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL_SETTING, - FeatureFlags.REMOTE_STORE_SETTING, FeatureFlags.EXTENSIONS_SETTING, FeatureFlags.IDENTITY_SETTING, FeatureFlags.CONCURRENT_SEGMENT_SEARCH_SETTING, diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 30a3550d62c8a..5b2afc44600bd 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -213,6 +213,11 @@ public final class IndexScopedSettings extends AbstractScopedSettings { // Settings for remote translog IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, + // Settings for remote store enablement + IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING, + IndexMetadata.INDEX_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING, + IndexMetadata.INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING, + // validate that built-in similarities don't get redefined Setting.groupSetting("index.similarity.", (s) -> { Map groups = s.getAsGroups(); @@ -236,12 +241,6 @@ public final class IndexScopedSettings extends AbstractScopedSettings { * setting should be moved to {@link #BUILT_IN_INDEX_SETTINGS}. */ public static final Map> FEATURE_FLAGGED_INDEX_SETTINGS = Map.of( - FeatureFlags.REMOTE_STORE, - List.of( - IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING, - IndexMetadata.INDEX_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING, - IndexMetadata.INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING - ), FeatureFlags.CONCURRENT_SEGMENT_SEARCH, List.of(IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING) ); diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index e2663b56c5cca..b89d2d0549823 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -20,19 +20,12 @@ * @opensearch.internal */ public class FeatureFlags { - /** * Gates the visibility of the segment replication experimental features that allows users to test unreleased beta features. */ public static final String SEGMENT_REPLICATION_EXPERIMENTAL = "opensearch.experimental.feature.segment_replication_experimental.enabled"; - /** - * Gates the visibility of the index setting that allows persisting data to remote store along with local disk. - * Once the feature is ready for production release, this feature flag can be removed. - */ - public static final String REMOTE_STORE = "opensearch.experimental.feature.remote_store.enabled"; - /** * Gates the ability for Searchable Snapshots to read snapshots that are older than the * guaranteed backward compatibility for OpenSearch (one prior major version) on a best effort basis. @@ -96,8 +89,6 @@ public static boolean isEnabled(String featureFlagName) { Property.NodeScope ); - public static final Setting REMOTE_STORE_SETTING = Setting.boolSetting(REMOTE_STORE, false, Property.NodeScope); - public static final Setting EXTENSIONS_SETTING = Setting.boolSetting(EXTENSIONS, false, Property.NodeScope); public static final Setting IDENTITY_SETTING = Setting.boolSetting(IDENTITY, false, Property.NodeScope); diff --git a/server/src/main/java/org/opensearch/crypto/CryptoManagerRegistry.java b/server/src/main/java/org/opensearch/crypto/CryptoHandlerRegistry.java similarity index 58% rename from server/src/main/java/org/opensearch/crypto/CryptoManagerRegistry.java rename to server/src/main/java/org/opensearch/crypto/CryptoHandlerRegistry.java index c6a8b56a8d8c8..0a14331be35f7 100644 --- a/server/src/main/java/org/opensearch/crypto/CryptoManagerRegistry.java +++ b/server/src/main/java/org/opensearch/crypto/CryptoHandlerRegistry.java @@ -13,12 +13,11 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.cluster.metadata.CryptoMetadata; import org.opensearch.common.SetOnce; +import org.opensearch.common.crypto.CryptoHandler; import org.opensearch.common.crypto.MasterKeyProvider; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.encryption.CryptoManager; -import org.opensearch.encryption.CryptoManagerFactory; import org.opensearch.plugins.CryptoKeyProviderPlugin; +import org.opensearch.plugins.CryptoPlugin; import java.util.HashMap; import java.util.List; @@ -27,19 +26,19 @@ /** * During node bootstrap, installed key provider extensions responsible for generating data keys are loaded. - * Crypto factories against the respective extensions are cached. A crypto factory is used to register crypto - * manager against an {@link org.opensearch.common.blobstore.EncryptedBlobStore} + * Crypto factories against the respective KP plugins are cached. A crypto factory is used to register crypto + * handler against an {@link org.opensearch.common.blobstore.EncryptedBlobStore} */ -public class CryptoManagerRegistry { - private static final Logger logger = LogManager.getLogger(CryptoManagerRegistry.class); +public class CryptoHandlerRegistry { + private static final Logger logger = LogManager.getLogger(CryptoHandlerRegistry.class); // Package private for tests SetOnce> registry = new SetOnce<>(); // Package private for tests - SetOnce cryptoManagerFactory = new SetOnce(); - private final Map registeredCryptoManagers = new HashMap<>(); + SetOnce cryptoHandlerPlugin = new SetOnce<>(); + private final Map registeredCryptoHandlers = new HashMap<>(); - private static volatile CryptoManagerRegistry instance; + private static volatile CryptoHandlerRegistry instance; private static final Object lock = new Object(); /** @@ -48,22 +47,38 @@ public class CryptoManagerRegistry { * @param cryptoPlugins The list of installed crypto key provider plugins. * @param settings Crypto settings. */ - protected CryptoManagerRegistry(List cryptoPlugins, Settings settings) { - cryptoManagerFactory.set(new CryptoManagerFactory("ALG_AES_256_GCM_HKDF_SHA512_COMMIT_KEY", TimeValue.timeValueDays(2), 500)); - registry.set(loadCryptoFactories(cryptoPlugins)); + protected CryptoHandlerRegistry( + List cryptoPlugins, + List cryptoKeyProviderPlugins, + Settings settings + ) { + if (cryptoPlugins == null || cryptoPlugins.size() == 0) { + return; + } + if (cryptoPlugins.size() > 1) { + // We can remove this to support multiple implementations in future if needed. + throw new IllegalStateException("More than 1 implementation of crypto plugin found."); + } + + cryptoHandlerPlugin.set(cryptoPlugins.get(0)); + registry.set(loadCryptoFactories(cryptoKeyProviderPlugins)); } - public static CryptoManagerRegistry getInstance() { + public static CryptoHandlerRegistry getInstance() { return instance; } - public static CryptoManagerRegistry initRegistry(List cryptoPlugins, Settings settings) { - CryptoManagerRegistry curInstance = instance; + public static CryptoHandlerRegistry initRegistry( + List cryptoPlugins, + List cryptoKeyProviderPlugins, + Settings settings + ) { + CryptoHandlerRegistry curInstance = instance; if (curInstance == null) { synchronized (lock) { curInstance = instance; if (curInstance == null) { - instance = curInstance = new CryptoManagerRegistry(cryptoPlugins, settings); + instance = curInstance = new CryptoHandlerRegistry(cryptoPlugins, cryptoKeyProviderPlugins, settings); } } } @@ -71,23 +86,23 @@ public static CryptoManagerRegistry initRegistry(List c } // For tests - protected Map loadCryptoFactories(List cryptoPlugins) { + protected Map loadCryptoFactories(List cryptoKPPlugins) { Map cryptoFactories = new HashMap<>(); - for (CryptoKeyProviderPlugin cryptoPlugin : cryptoPlugins) { - if (cryptoFactories.containsKey(cryptoPlugin.type())) { - throw new IllegalArgumentException("Crypto plugin key provider type [" + cryptoPlugin.type() + "] is already registered"); + for (CryptoKeyProviderPlugin cryptoKPPlugin : cryptoKPPlugins) { + if (cryptoFactories.containsKey(cryptoKPPlugin.type())) { + throw new IllegalArgumentException("Crypto plugin key provider type [" + cryptoKPPlugin.type() + "] is already registered"); } - cryptoFactories.put(cryptoPlugin.type(), cryptoPlugin); + cryptoFactories.put(cryptoKPPlugin.type(), cryptoKPPlugin); } return Map.copyOf(cryptoFactories); } /** - * Retrieves the crypto factory associated with the given key provider type (extension id). + * Retrieves the crypto factory associated with the given key provider type . * - * @param keyProviderType The unique extension type for which the factory is to be fetched. - * @return The crypto factory used to create {@link CryptoManager} + * @param keyProviderType The unique provider type for which the factory is to be fetched. + * @return The crypto factory used to create {@link CryptoHandler} * instances in a {@link org.opensearch.common.blobstore.EncryptedBlobStore}. * @throws IllegalStateException If the crypto registry is not yet loaded. */ @@ -106,26 +121,26 @@ public CryptoKeyProviderPlugin getCryptoKeyProviderPlugin(String keyProviderType * @return The crypto manager for performing encrypt/decrypt operations. * @throws CryptoRegistryException If the key provider is not installed or there is an error during crypto manager creation. */ - public CryptoManager fetchCryptoManager(CryptoMetadata cryptoMetadata) { - CryptoManager cryptoManager = registeredCryptoManagers.get(cryptoMetadata); - if (cryptoManager == null) { - synchronized (registeredCryptoManagers) { - cryptoManager = registeredCryptoManagers.get(cryptoMetadata); - if (cryptoManager == null) { + public CryptoHandler fetchCryptoHandler(CryptoMetadata cryptoMetadata) { + CryptoHandler cryptoHandler = registeredCryptoHandlers.get(cryptoMetadata); + if (cryptoHandler == null) { + synchronized (registeredCryptoHandlers) { + cryptoHandler = registeredCryptoHandlers.get(cryptoMetadata); + if (cryptoHandler == null) { Runnable onClose = () -> { - synchronized (registeredCryptoManagers) { - registeredCryptoManagers.remove(cryptoMetadata); + synchronized (registeredCryptoHandlers) { + registeredCryptoHandlers.remove(cryptoMetadata); } }; - cryptoManager = createCryptoManager(cryptoMetadata, onClose); - registeredCryptoManagers.put(cryptoMetadata, cryptoManager); + cryptoHandler = createCryptoHandler(cryptoMetadata, onClose); + registeredCryptoHandlers.put(cryptoMetadata, cryptoHandler); } } } - return cryptoManager; + return cryptoHandler; } - private CryptoManager createCryptoManager(CryptoMetadata cryptoMetadata, Runnable onClose) { + private CryptoHandler createCryptoHandler(CryptoMetadata cryptoMetadata, Runnable onClose) { logger.debug("creating crypto client [{}][{}]", cryptoMetadata.keyProviderType(), cryptoMetadata.keyProviderName()); CryptoKeyProviderPlugin keyProviderPlugin = getCryptoKeyProviderPlugin(cryptoMetadata.keyProviderType()); if (keyProviderPlugin == null) { @@ -134,8 +149,8 @@ private CryptoManager createCryptoManager(CryptoMetadata cryptoMetadata, Runnabl try { MasterKeyProvider masterKeyProvider = keyProviderPlugin.createKeyProvider(cryptoMetadata); - return Objects.requireNonNull(cryptoManagerFactory.get()) - .getOrCreateCryptoManager(masterKeyProvider, cryptoMetadata.keyProviderName(), cryptoMetadata.keyProviderType(), onClose); + return Objects.requireNonNull(cryptoHandlerPlugin.get()) + .getOrCreateCryptoHandler(masterKeyProvider, cryptoMetadata.keyProviderName(), cryptoMetadata.keyProviderType(), onClose); } catch (Exception e) { logger.warn( diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java index 4df18e4014c3d..e42ac8daa3b1c 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java @@ -63,6 +63,8 @@ import org.opensearch.env.NodeMetadata; import org.opensearch.gateway.remote.ClusterMetadataManifest; import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.index.recovery.RemoteStoreRestoreService; +import org.opensearch.index.recovery.RemoteStoreRestoreService.RemoteRestoreResult; import org.opensearch.node.Node; import org.opensearch.plugins.MetadataUpgrader; import org.opensearch.repositories.RepositoryMissingException; @@ -84,7 +86,7 @@ import java.util.function.UnaryOperator; import static org.opensearch.common.util.concurrent.OpenSearchExecutors.daemonThreadFactory; -import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled; /** * Loads (and maybe upgrades) cluster metadata at startup, and persistently stores cluster metadata for future restarts. @@ -126,7 +128,8 @@ public void start( MetadataUpgrader metadataUpgrader, PersistedClusterStateService persistedClusterStateService, RemoteClusterStateService remoteClusterStateService, - PersistedStateRegistry persistedStateRegistry + PersistedStateRegistry persistedStateRegistry, + RemoteStoreRestoreService remoteStoreRestoreService ) { assert this.persistedStateRegistry == null : "Persisted state registry should only be set once"; this.persistedStateRegistry = persistedStateRegistry; @@ -154,7 +157,7 @@ public void start( PersistedState remotePersistedState = null; boolean success = false; try { - final ClusterState clusterState = prepareInitialClusterState( + ClusterState clusterState = prepareInitialClusterState( transportService, clusterService, ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings)) @@ -164,10 +167,28 @@ public void start( ); if (DiscoveryNode.isClusterManagerNode(settings)) { - persistedState = new LucenePersistedState(persistedClusterStateService, currentTerm, clusterState); - if (REMOTE_CLUSTER_STATE_ENABLED_SETTING.get(settings) == true) { + if (isRemoteStoreClusterStateEnabled(settings)) { + // If the cluster UUID loaded from local is unknown (_na_) then fetch the best state from remote + // If there is no valid state on remote, continue with initial empty state + // If there is a valid state, then restore index metadata using this state + if (ClusterState.UNKNOWN_UUID.equals(clusterState.metadata().clusterUUID())) { + String lastKnownClusterUUID = remoteClusterStateService.getLastKnownUUIDFromRemote( + clusterState.getClusterName().value() + ); + if (!ClusterState.UNKNOWN_UUID.equals(lastKnownClusterUUID)) { + // Load state from remote + final RemoteRestoreResult remoteRestoreResult = remoteStoreRestoreService.restore( + clusterState, + lastKnownClusterUUID, + false, + new String[] {} + ); + clusterState = remoteRestoreResult.getClusterState(); + } + } remotePersistedState = new RemotePersistedState(remoteClusterStateService); } + persistedState = new LucenePersistedState(persistedClusterStateService, currentTerm, clusterState); } else { persistedState = new AsyncLucenePersistedState( settings, @@ -651,12 +672,6 @@ public void setCurrentTerm(long currentTerm) { @Override public void setLastAcceptedState(ClusterState clusterState) { try { - if (lastAcceptedState == null || lastAcceptedState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { - // On the initial bootstrap, repository will not be available. So we do not persist the cluster state and bail out. - logger.info("Cluster is not yet ready to publish state to remote store"); - lastAcceptedState = clusterState; - return; - } final ClusterMetadataManifest manifest; if (shouldWriteFullClusterState(clusterState)) { manifest = remoteClusterStateService.writeFullMetadata(clusterState); @@ -706,13 +721,8 @@ private boolean shouldWriteFullClusterState(ClusterState clusterState) { @Override public void markLastAcceptedStateAsCommitted() { try { - if (lastAcceptedState == null - || lastAcceptedManifest == null - || lastAcceptedState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { - // On the initial bootstrap, repository will not be available. So we do not persist the cluster state and bail out. - logger.trace("Cluster is not yet ready to publish state to remote store"); - return; - } + assert lastAcceptedState != null : "Last accepted state is not present"; + assert lastAcceptedManifest != null : "Last accepted manifest is not present"; final ClusterMetadataManifest committedManifest = remoteClusterStateService.markLastStateAsCommitted( lastAcceptedState, lastAcceptedManifest diff --git a/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java index cac77f9996438..0ebbdc81661ad 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java +++ b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java @@ -41,6 +41,7 @@ public class ClusterMetadataManifest implements Writeable, ToXContentFragment { private static final ParseField NODE_ID_FIELD = new ParseField("node_id"); private static final ParseField COMMITTED_FIELD = new ParseField("committed"); private static final ParseField INDICES_FIELD = new ParseField("indices"); + private static final ParseField PREVIOUS_CLUSTER_UUID = new ParseField("previous_cluster_uuid"); private static long term(Object[] fields) { return (long) fields[0]; @@ -74,6 +75,10 @@ private static List indices(Object[] fields) { return (List) fields[7]; } + private static String previousClusterUUID(Object[] fields) { + return (String) fields[8]; + } + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "cluster_metadata_manifest", fields -> new ClusterMetadataManifest( @@ -84,7 +89,8 @@ private static List indices(Object[] fields) { opensearchVersion(fields), nodeId(fields), committed(fields), - indices(fields) + indices(fields), + previousClusterUUID(fields) ) ); @@ -101,6 +107,7 @@ private static List indices(Object[] fields) { (p, c) -> UploadedIndexMetadata.fromXContent(p), INDICES_FIELD ); + PARSER.declareString(ConstructingObjectParser.constructorArg(), PREVIOUS_CLUSTER_UUID); } private final List indices; @@ -111,6 +118,7 @@ private static List indices(Object[] fields) { private final Version opensearchVersion; private final String nodeId; private final boolean committed; + private final String previousClusterUUID; public List getIndices() { return indices; @@ -144,6 +152,10 @@ public boolean isCommitted() { return committed; } + public String getPreviousClusterUUID() { + return previousClusterUUID; + } + public ClusterMetadataManifest( long clusterTerm, long version, @@ -152,7 +164,8 @@ public ClusterMetadataManifest( Version opensearchVersion, String nodeId, boolean committed, - List indices + List indices, + String previousClusterUUID ) { this.clusterTerm = clusterTerm; this.stateVersion = version; @@ -162,6 +175,7 @@ public ClusterMetadataManifest( this.nodeId = nodeId; this.committed = committed; this.indices = Collections.unmodifiableList(indices); + this.previousClusterUUID = previousClusterUUID; } public ClusterMetadataManifest(StreamInput in) throws IOException { @@ -173,6 +187,7 @@ public ClusterMetadataManifest(StreamInput in) throws IOException { this.nodeId = in.readString(); this.committed = in.readBoolean(); this.indices = Collections.unmodifiableList(in.readList(UploadedIndexMetadata::new)); + this.previousClusterUUID = in.readString(); } public static Builder builder() { @@ -199,6 +214,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } builder.endArray(); + builder.field(PREVIOUS_CLUSTER_UUID.getPreferredName(), getPreviousClusterUUID()); return builder; } @@ -212,6 +228,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(nodeId); out.writeBoolean(committed); out.writeCollection(indices); + out.writeString(previousClusterUUID); } @Override @@ -230,12 +247,23 @@ public boolean equals(Object o) { && Objects.equals(stateUUID, that.stateUUID) && Objects.equals(opensearchVersion, that.opensearchVersion) && Objects.equals(nodeId, that.nodeId) - && Objects.equals(committed, that.committed); + && Objects.equals(committed, that.committed) + && Objects.equals(previousClusterUUID, that.previousClusterUUID); } @Override public int hashCode() { - return Objects.hash(indices, clusterTerm, stateVersion, clusterUUID, stateUUID, opensearchVersion, nodeId, committed); + return Objects.hash( + indices, + clusterTerm, + stateVersion, + clusterUUID, + stateUUID, + opensearchVersion, + nodeId, + committed, + previousClusterUUID + ); } @Override @@ -261,6 +289,7 @@ public static class Builder { private String stateUUID; private Version opensearchVersion; private String nodeId; + private String previousClusterUUID; private boolean committed; public Builder indices(List indices) { @@ -307,6 +336,11 @@ public List getIndices() { return indices; } + public Builder previousClusterUUID(String previousClusterUUID) { + this.previousClusterUUID = previousClusterUUID; + return this; + } + public Builder() { indices = new ArrayList<>(); } @@ -320,6 +354,7 @@ public Builder(ClusterMetadataManifest manifest) { this.nodeId = manifest.nodeId; this.committed = manifest.committed; this.indices = new ArrayList<>(manifest.indices); + this.previousClusterUUID = manifest.previousClusterUUID; } public ClusterMetadataManifest build() { @@ -331,7 +366,8 @@ public ClusterMetadataManifest build() { opensearchVersion, nodeId, committed, - indices + indices, + previousClusterUUID ); } diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index 980c2f9cf3ce4..2aa3384b0f33a 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -10,7 +10,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.Version; +import org.opensearch.action.LatchedActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Nullable; @@ -22,8 +24,12 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.Index; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.node.Node; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.blobstore.BlobStoreRepository; @@ -34,17 +40,23 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Base64; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.function.LongSupplier; import java.util.function.Supplier; import java.util.stream.Collectors; import static org.opensearch.gateway.PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled; /** * A Service which provides APIs to upload and download cluster metadata from remote store. @@ -57,6 +69,8 @@ public class RemoteClusterStateService implements Closeable { public static final String METADATA_MANIFEST_NAME_FORMAT = "%s"; + public static final int INDEX_METADATA_UPLOAD_WAIT_MILLIS = 20000; + public static final ChecksumBlobStoreFormat INDEX_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( "index-metadata", METADATA_NAME_FORMAT, @@ -71,26 +85,21 @@ public class RemoteClusterStateService implements Closeable { /** * Used to specify if cluster state metadata should be published to remote store */ - // TODO The remote state enabled and repository settings should be read from node attributes. - // Dependent on https://github.com/opensearch-project/OpenSearch/pull/9105/ public static final Setting REMOTE_CLUSTER_STATE_ENABLED_SETTING = Setting.boolSetting( "cluster.remote_store.state.enabled", false, Property.NodeScope, Property.Final ); - /** - * Used to specify default repo to use for cluster state metadata upload - */ - public static final Setting REMOTE_CLUSTER_STATE_REPOSITORY_SETTING = Setting.simpleString( - "cluster.remote_store.state.repository", - "", - Property.NodeScope, - Property.Final - ); + private static final Logger logger = LogManager.getLogger(RemoteClusterStateService.class); public static final String DELIMITER = "__"; + private static final String CLUSTER_STATE_PATH_TOKEN = "cluster-state"; + private static final String INDEX_PATH_TOKEN = "index"; + private static final String MANIFEST_PATH_TOKEN = "manifest"; + private static final String MANIFEST_FILE_PREFIX = "manifest"; + private static final String INDEX_METADATA_FILE_PREFIX = "metadata"; private final String nodeId; private final Supplier repositoriesService; @@ -106,7 +115,7 @@ public RemoteClusterStateService( ClusterSettings clusterSettings, LongSupplier relativeTimeNanosSupplier ) { - assert REMOTE_CLUSTER_STATE_ENABLED_SETTING.get(settings) == true : "Remote cluster state is not enabled"; + assert isRemoteStoreClusterStateEnabled(settings) : "Remote cluster state is not enabled"; this.nodeId = nodeId; this.repositoriesService = repositoriesService; this.settings = settings; @@ -128,27 +137,20 @@ public ClusterMetadataManifest writeFullMetadata(ClusterState clusterState) thro logger.error("Local node is not elected cluster manager. Exiting"); return null; } - ensureRepositorySet(); - final List allUploadedIndexMetadata = new ArrayList<>(); - // todo parallel upload + // should fetch the previous cluster UUID before writing full cluster state. + // Whenever a new election happens, a new leader will be elected and it might have stale previous UUID + final String previousClusterUUID = fetchPreviousClusterUUID( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ); + // any validations before/after upload ? - for (IndexMetadata indexMetadata : clusterState.metadata().indices().values()) { - // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/index/ftqsCnn9TgOX/metadata_4_1690947200 - final String indexMetadataKey = writeIndexMetadata( - clusterState.getClusterName().value(), - clusterState.getMetadata().clusterUUID(), - indexMetadata, - indexMetadataFileName(indexMetadata) - ); - final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata( - indexMetadata.getIndex().getName(), - indexMetadata.getIndexUUID(), - indexMetadataKey - ); - allUploadedIndexMetadata.add(uploadedIndexMetadata); - } - final ClusterMetadataManifest manifest = uploadManifest(clusterState, allUploadedIndexMetadata, false); + final List allUploadedIndexMetadata = writeIndexMetadataParallel( + clusterState, + new ArrayList<>(clusterState.metadata().indices().values()) + ); + final ClusterMetadataManifest manifest = uploadManifest(clusterState, allUploadedIndexMetadata, previousClusterUUID, false); final long durationMillis = TimeValue.nsecToMSec(relativeTimeNanosSupplier.getAsLong() - startTimeNanos); if (durationMillis >= slowWriteLoggingThreshold.getMillis()) { logger.warn( @@ -170,8 +172,8 @@ public ClusterMetadataManifest writeFullMetadata(ClusterState clusterState) thro /** * This method uploads the diff between the previous cluster state and the current cluster state. The previous manifest file is needed to create the new - * manifest. The new manifest file is created by using the unchanged metadata from the previous manifest and the new metadata changes from the current cluster - * state. + * manifest. The new manifest file is created by using the unchanged metadata from the previous manifest and the new metadata changes from the current + * cluster state. * * @return The uploaded ClusterMetadataManifest file */ @@ -197,6 +199,9 @@ public ClusterMetadataManifest writeIncrementalMetadata( final Map allUploadedIndexMetadata = previousManifest.getIndices() .stream() .collect(Collectors.toMap(UploadedIndexMetadata::getIndexName, Function.identity())); + + List toUpload = new ArrayList<>(); + for (final IndexMetadata indexMetadata : clusterState.metadata().indices().values()) { final Long previousVersion = previousStateIndexMetadataVersionByName.get(indexMetadata.getIndex().getName()); if (previousVersion == null || indexMetadata.getVersion() != previousVersion) { @@ -207,30 +212,25 @@ public ClusterMetadataManifest writeIncrementalMetadata( indexMetadata.getVersion() ); numIndicesUpdated++; - final String indexMetadataKey = writeIndexMetadata( - clusterState.getClusterName().value(), - clusterState.getMetadata().clusterUUID(), - indexMetadata, - indexMetadataFileName(indexMetadata) - ); - final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata( - indexMetadata.getIndex().getName(), - indexMetadata.getIndexUUID(), - indexMetadataKey - ); - allUploadedIndexMetadata.put(indexMetadata.getIndex().getName(), uploadedIndexMetadata); + toUpload.add(indexMetadata); } else { numIndicesUnchanged++; } previousStateIndexMetadataVersionByName.remove(indexMetadata.getIndex().getName()); } + List uploadedIndexMetadataList = writeIndexMetadataParallel(clusterState, toUpload); + uploadedIndexMetadataList.forEach( + uploadedIndexMetadata -> allUploadedIndexMetadata.put(uploadedIndexMetadata.getIndexName(), uploadedIndexMetadata) + ); + for (String removedIndexName : previousStateIndexMetadataVersionByName.keySet()) { allUploadedIndexMetadata.remove(removedIndexName); } final ClusterMetadataManifest manifest = uploadManifest( clusterState, - allUploadedIndexMetadata.values().stream().collect(Collectors.toList()), + new ArrayList<>(allUploadedIndexMetadata.values()), + previousManifest.getPreviousClusterUUID(), false ); final long durationMillis = TimeValue.nsecToMSec(relativeTimeNanosSupplier.getAsLong() - startTimeNanos); @@ -254,6 +254,118 @@ public ClusterMetadataManifest writeIncrementalMetadata( return manifest; } + /** + * Uploads provided IndexMetadata's to remote store in parallel. The call is blocking so the method waits for upload to finish and then return. + * + * @param clusterState current ClusterState + * @param toUpload list of IndexMetadata to upload + * @return {@code List} list of IndexMetadata uploaded to remote + */ + private List writeIndexMetadataParallel(ClusterState clusterState, List toUpload) + throws IOException { + List exceptionList = Collections.synchronizedList(new ArrayList<>(toUpload.size())); + final CountDownLatch latch = new CountDownLatch(toUpload.size()); + List result = new ArrayList<>(toUpload.size()); + + LatchedActionListener latchedActionListener = new LatchedActionListener<>( + ActionListener.wrap((UploadedIndexMetadata uploadedIndexMetadata) -> { + logger.trace( + String.format(Locale.ROOT, "IndexMetadata uploaded successfully for %s", uploadedIndexMetadata.getIndexName()) + ); + result.add(uploadedIndexMetadata); + }, ex -> { + assert ex instanceof IndexMetadataTransferException; + logger.error( + () -> new ParameterizedMessage("Exception during transfer of IndexMetadata to Remote {}", ex.getMessage()), + ex + ); + exceptionList.add(ex); + }), + latch + ); + + for (IndexMetadata indexMetadata : toUpload) { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/index/ftqsCnn9TgOX/metadata_4_1690947200 + writeIndexMetadataAsync(clusterState, indexMetadata, latchedActionListener); + } + + try { + if (latch.await(INDEX_METADATA_UPLOAD_WAIT_MILLIS, TimeUnit.MILLISECONDS) == false) { + IndexMetadataTransferException ex = new IndexMetadataTransferException( + String.format( + Locale.ROOT, + "Timed out waiting for transfer of index metadata to complete - %s", + toUpload.stream().map(IndexMetadata::getIndex).map(Index::toString).collect(Collectors.joining("")) + ) + ); + exceptionList.forEach(ex::addSuppressed); + throw ex; + } + } catch (InterruptedException ex) { + exceptionList.forEach(ex::addSuppressed); + IndexMetadataTransferException exception = new IndexMetadataTransferException( + String.format( + Locale.ROOT, + "Timed out waiting for transfer of index metadata to complete - %s", + toUpload.stream().map(IndexMetadata::getIndex).map(Index::toString).collect(Collectors.joining("")) + ), + ex + ); + Thread.currentThread().interrupt(); + throw exception; + } + if (exceptionList.size() > 0) { + IndexMetadataTransferException exception = new IndexMetadataTransferException( + String.format( + Locale.ROOT, + "Exception during transfer of IndexMetadata to Remote %s", + toUpload.stream().map(IndexMetadata::getIndex).map(Index::toString).collect(Collectors.joining("")) + ) + ); + exceptionList.forEach(exception::addSuppressed); + throw exception; + } + return result; + } + + /** + * Allows async Upload of IndexMetadata to remote + * + * @param clusterState current ClusterState + * @param indexMetadata {@link IndexMetadata} to upload + * @param latchedActionListener listener to respond back on after upload finishes + */ + private void writeIndexMetadataAsync( + ClusterState clusterState, + IndexMetadata indexMetadata, + LatchedActionListener latchedActionListener + ) throws IOException { + final BlobContainer indexMetadataContainer = indexMetadataContainer( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID(), + indexMetadata.getIndexUUID() + ); + final String indexMetadataFilename = indexMetadataFileName(indexMetadata); + ActionListener completionListener = ActionListener.wrap( + resp -> latchedActionListener.onResponse( + new UploadedIndexMetadata( + indexMetadata.getIndex().getName(), + indexMetadata.getIndexUUID(), + indexMetadataContainer.path().buildAsString() + indexMetadataFilename + ) + ), + ex -> latchedActionListener.onFailure(new IndexMetadataTransferException(indexMetadata.getIndex().toString(), ex)) + ); + + INDEX_METADATA_FORMAT.writeAsync( + indexMetadata, + indexMetadataContainer, + indexMetadataFilename, + blobStoreRepository.getCompressor(), + completionListener + ); + } + @Nullable public ClusterMetadataManifest markLastStateAsCommitted(ClusterState clusterState, ClusterMetadataManifest previousManifest) throws IOException { @@ -263,12 +375,7 @@ public ClusterMetadataManifest markLastStateAsCommitted(ClusterState clusterStat } assert clusterState != null : "Last accepted cluster state is not set"; assert previousManifest != null : "Last cluster metadata manifest is not set"; - return uploadManifest(clusterState, previousManifest.getIndices(), true); - } - - public ClusterState getLatestClusterState(String clusterUUID) { - // todo - return null; + return uploadManifest(clusterState, previousManifest.getIndices(), previousManifest.getPreviousClusterUUID(), true); } @Override @@ -278,13 +385,11 @@ public void close() throws IOException { } } - // Visible for testing - void ensureRepositorySet() { - if (blobStoreRepository != null) { - return; - } - assert REMOTE_CLUSTER_STATE_ENABLED_SETTING.get(settings) == true : "Remote cluster state is not enabled"; - final String remoteStoreRepo = REMOTE_CLUSTER_STATE_REPOSITORY_SETTING.get(settings); + public void start() { + assert isRemoteStoreClusterStateEnabled(settings) == true : "Remote cluster state is not enabled"; + final String remoteStoreRepo = settings.get( + Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY + ); assert remoteStoreRepo != null : "Remote Cluster State repository is not configured"; final Repository repository = repositoriesService.get().repository(remoteStoreRepo); assert repository instanceof BlobStoreRepository : "Repository should be instance of BlobStoreRepository"; @@ -294,6 +399,7 @@ void ensureRepositorySet() { private ClusterMetadataManifest uploadManifest( ClusterState clusterState, List uploadedIndexMetadata, + String previousClusterUUID, boolean committed ) throws IOException { synchronized (this) { @@ -306,36 +412,39 @@ private ClusterMetadataManifest uploadManifest( Version.CURRENT, nodeId, committed, - uploadedIndexMetadata + uploadedIndexMetadata, + previousClusterUUID ); writeMetadataManifest(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID(), manifest, manifestFileName); return manifest; } } - private String writeIndexMetadata(String clusterName, String clusterUUID, IndexMetadata uploadIndexMetadata, String fileName) - throws IOException { - final BlobContainer indexMetadataContainer = indexMetadataContainer(clusterName, clusterUUID, uploadIndexMetadata.getIndexUUID()); - INDEX_METADATA_FORMAT.write(uploadIndexMetadata, indexMetadataContainer, fileName, blobStoreRepository.getCompressor()); - // returning full path - return indexMetadataContainer.path().buildAsString() + fileName; - } - private void writeMetadataManifest(String clusterName, String clusterUUID, ClusterMetadataManifest uploadManifest, String fileName) throws IOException { final BlobContainer metadataManifestContainer = manifestContainer(clusterName, clusterUUID); CLUSTER_METADATA_MANIFEST_FORMAT.write(uploadManifest, metadataManifestContainer, fileName, blobStoreRepository.getCompressor()); } + private String fetchPreviousClusterUUID(String clusterName, String clusterUUID) { + final Optional latestManifest = getLatestClusterMetadataManifest(clusterName, clusterUUID); + if (!latestManifest.isPresent()) { + final String previousClusterUUID = getLastKnownUUIDFromRemote(clusterName); + assert !clusterUUID.equals(previousClusterUUID) : "Last cluster UUID is same current cluster UUID"; + return previousClusterUUID; + } + return latestManifest.get().getPreviousClusterUUID(); + } + private BlobContainer indexMetadataContainer(String clusterName, String clusterUUID, String indexUUID) { // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/index/ftqsCnn9TgOX return blobStoreRepository.blobStore() .blobContainer( blobStoreRepository.basePath() - .add(Base64.getUrlEncoder().withoutPadding().encodeToString(clusterName.getBytes(StandardCharsets.UTF_8))) - .add("cluster-state") + .add(encodeString(clusterName)) + .add(CLUSTER_STATE_PATH_TOKEN) .add(clusterUUID) - .add("index") + .add(INDEX_PATH_TOKEN) .add(indexUUID) ); } @@ -345,10 +454,19 @@ private BlobContainer manifestContainer(String clusterName, String clusterUUID) return blobStoreRepository.blobStore() .blobContainer( blobStoreRepository.basePath() - .add(Base64.getUrlEncoder().withoutPadding().encodeToString(clusterName.getBytes(StandardCharsets.UTF_8))) - .add("cluster-state") + .add(encodeString(clusterName)) + .add(CLUSTER_STATE_PATH_TOKEN) .add(clusterUUID) - .add("manifest") + .add(MANIFEST_PATH_TOKEN) + ); + } + + private BlobContainer clusterUUIDContainer(String clusterName) { + return blobStoreRepository.blobStore() + .blobContainer( + blobStoreRepository.basePath() + .add(Base64.getUrlEncoder().withoutPadding().encodeToString(clusterName.getBytes(StandardCharsets.UTF_8))) + .add(CLUSTER_STATE_PATH_TOKEN) ); } @@ -360,7 +478,7 @@ private static String getManifestFileName(long term, long version) { // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/manifest/manifest_2147483642_2147483637_456536447 return String.join( DELIMITER, - "manifest", + MANIFEST_FILE_PREFIX, RemoteStoreUtils.invertLong(term), RemoteStoreUtils.invertLong(version), RemoteStoreUtils.invertLong(System.currentTimeMillis()) @@ -368,21 +486,31 @@ private static String getManifestFileName(long term, long version) { } private static String indexMetadataFileName(IndexMetadata indexMetadata) { - return String.join(DELIMITER, "metadata", String.valueOf(indexMetadata.getVersion()), String.valueOf(System.currentTimeMillis())); + return String.join( + DELIMITER, + INDEX_METADATA_FILE_PREFIX, + String.valueOf(indexMetadata.getVersion()), + String.valueOf(System.currentTimeMillis()) + ); } /** * Fetch latest index metadata from remote cluster state + * * @param clusterUUID uuid of cluster state to refer to in remote * @param clusterName name of the cluster * @return {@code Map} latest IndexUUID to IndexMetadata map */ public Map getLatestIndexMetadata(String clusterName, String clusterUUID) throws IOException { + start(); Map remoteIndexMetadata = new HashMap<>(); - ClusterMetadataManifest clusterMetadataManifest = getLatestClusterMetadataManifest(clusterName, clusterUUID); - assert Objects.equals(clusterUUID, clusterMetadataManifest.getClusterUUID()) + Optional clusterMetadataManifest = getLatestClusterMetadataManifest(clusterName, clusterUUID); + if (!clusterMetadataManifest.isPresent()) { + throw new IllegalStateException("Latest index metadata is not present for the provided clusterUUID"); + } + assert Objects.equals(clusterUUID, clusterMetadataManifest.get().getClusterUUID()) : "Corrupt ClusterMetadataManifest found. Cluster UUID mismatch."; - for (UploadedIndexMetadata uploadedIndexMetadata : clusterMetadataManifest.getIndices()) { + for (UploadedIndexMetadata uploadedIndexMetadata : clusterMetadataManifest.get().getIndices()) { IndexMetadata indexMetadata = getIndexMetadata(clusterName, clusterUUID, uploadedIndexMetadata); remoteIndexMetadata.put(uploadedIndexMetadata.getIndexUUID(), indexMetadata); } @@ -391,6 +519,7 @@ public Map getLatestIndexMetadata(String clusterName, Str /** * Fetch index metadata from remote cluster state + * * @param clusterUUID uuid of cluster state to refer to in remote * @param clusterName name of the cluster * @param uploadedIndexMetadata {@link UploadedIndexMetadata} contains details about remote location of index metadata @@ -398,9 +527,10 @@ public Map getLatestIndexMetadata(String clusterName, Str */ private IndexMetadata getIndexMetadata(String clusterName, String clusterUUID, UploadedIndexMetadata uploadedIndexMetadata) { try { + String[] splitPath = uploadedIndexMetadata.getUploadedFilename().split("/"); return INDEX_METADATA_FORMAT.read( indexMetadataContainer(clusterName, clusterUUID, uploadedIndexMetadata.getIndexUUID()), - uploadedIndexMetadata.getUploadedFilename(), + splitPath[splitPath.length - 1], blobStoreRepository.getNamedXContentRegistry() ); } catch (IOException e) { @@ -413,22 +543,115 @@ private IndexMetadata getIndexMetadata(String clusterName, String clusterUUID, U /** * Fetch latest ClusterMetadataManifest from remote state store + * * @param clusterUUID uuid of cluster state to refer to in remote * @param clusterName name of the cluster * @return ClusterMetadataManifest */ - public ClusterMetadataManifest getLatestClusterMetadataManifest(String clusterName, String clusterUUID) { - String latestManifestFileName = getLatestManifestFileName(clusterName, clusterUUID); - return fetchRemoteClusterMetadataManifest(clusterName, clusterUUID, latestManifestFileName); + public Optional getLatestClusterMetadataManifest(String clusterName, String clusterUUID) { + Optional latestManifestFileName = getLatestManifestFileName(clusterName, clusterUUID); + if (latestManifestFileName.isPresent()) { + return Optional.of(fetchRemoteClusterMetadataManifest(clusterName, clusterUUID, latestManifestFileName.get())); + } + return Optional.empty(); + } + + /** + * Fetch the previous cluster UUIDs from remote state store and return the most recent valid cluster UUID + * + * @param clusterName The cluster name for which previous cluster UUID is to be fetched + * @return Last valid cluster UUID + */ + public String getLastKnownUUIDFromRemote(String clusterName) { + try { + Set clusterUUIDs = getAllClusterUUIDs(clusterName); + Map latestManifests = getLatestManifestForAllClusterUUIDs(clusterName, clusterUUIDs); + List validChain = createClusterChain(latestManifests); + if (validChain.isEmpty()) { + return ClusterState.UNKNOWN_UUID; + } + return validChain.get(0); + } catch (IOException e) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Error while fetching previous UUIDs from remote store for cluster name: %s", clusterName) + ); + } + } + + private Set getAllClusterUUIDs(String clusterName) throws IOException { + Map clusterUUIDMetadata = clusterUUIDContainer(clusterName).children(); + if (clusterUUIDMetadata == null) { + return Collections.emptySet(); + } + return Collections.unmodifiableSet(clusterUUIDMetadata.keySet()); + } + + private Map getLatestManifestForAllClusterUUIDs(String clusterName, Set clusterUUIDs) { + Map manifestsByClusterUUID = new HashMap<>(); + for (String clusterUUID : clusterUUIDs) { + try { + Optional manifest = getLatestClusterMetadataManifest(clusterName, clusterUUID); + manifestsByClusterUUID.put(clusterUUID, manifest.get()); + } catch (Exception e) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Exception in fetching manifest for clusterUUID: %s", clusterUUID) + ); + } + } + return manifestsByClusterUUID; + } + + /** + * This method creates a valid cluster UUID chain. + * + * @param manifestsByClusterUUID Map of latest ClusterMetadataManifest for every cluster UUID + * @return List of cluster UUIDs. The first element is the most recent cluster UUID in the chain + */ + private List createClusterChain(final Map manifestsByClusterUUID) { + final Map clusterUUIDGraph = manifestsByClusterUUID.values() + .stream() + .collect(Collectors.toMap(ClusterMetadataManifest::getClusterUUID, ClusterMetadataManifest::getPreviousClusterUUID)); + final List validClusterUUIDs = manifestsByClusterUUID.values() + .stream() + .filter(m -> !isInvalidClusterUUID(m) && !clusterUUIDGraph.containsValue(m.getClusterUUID())) + .map(ClusterMetadataManifest::getClusterUUID) + .collect(Collectors.toList()); + if (validClusterUUIDs.isEmpty()) { + logger.info("There is no valid previous cluster UUID"); + return Collections.emptyList(); + } + if (validClusterUUIDs.size() > 1) { + throw new IllegalStateException( + String.format( + Locale.ROOT, + "The system has ended into multiple valid cluster states in the remote store. " + + "Please check their latest manifest to decide which one you want to keep. Valid Cluster UUIDs: - %s", + validClusterUUIDs + ) + ); + } + final List validChain = new ArrayList<>(); + String currentUUID = validClusterUUIDs.get(0); + while (!ClusterState.UNKNOWN_UUID.equals(currentUUID)) { + validChain.add(currentUUID); + // Getting the previous cluster UUID of a cluster UUID from the clusterUUID Graph + currentUUID = clusterUUIDGraph.get(currentUUID); + } + return validChain; + } + + private boolean isInvalidClusterUUID(ClusterMetadataManifest manifest) { + return !manifest.isCommitted() && manifest.getIndices().isEmpty(); } /** * Fetch latest ClusterMetadataManifest file from remote state store + * * @param clusterUUID uuid of cluster state to refer to in remote * @param clusterName name of the cluster * @return latest ClusterMetadataManifest filename */ - private String getLatestManifestFileName(String clusterName, String clusterUUID) throws IllegalStateException { + private Optional getLatestManifestFileName(String clusterName, String clusterUUID) throws IllegalStateException { try { /** * {@link BlobContainer#listBlobsByPrefixInSortedOrder} will get the latest manifest file @@ -436,22 +659,23 @@ private String getLatestManifestFileName(String clusterName, String clusterUUID) * when sorted in LEXICOGRAPHIC order the latest uploaded manifest file comes on top. */ List manifestFilesMetadata = manifestContainer(clusterName, clusterUUID).listBlobsByPrefixInSortedOrder( - "manifest" + DELIMITER, + MANIFEST_FILE_PREFIX + DELIMITER, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC ); if (manifestFilesMetadata != null && !manifestFilesMetadata.isEmpty()) { - return manifestFilesMetadata.get(0).name(); + return Optional.of(manifestFilesMetadata.get(0).name()); } } catch (IOException e) { throw new IllegalStateException("Error while fetching latest manifest file for remote cluster state", e); } - - throw new IllegalStateException(String.format(Locale.ROOT, "Remote Cluster State not found - %s", clusterUUID)); + logger.info("No manifest file present in remote store for cluster name: {}, cluster UUID: {}", clusterName, clusterUUID); + return Optional.empty(); } /** * Fetch ClusterMetadataManifest from remote state store + * * @param clusterUUID uuid of cluster state to refer to in remote * @param clusterName name of the cluster * @return ClusterMetadataManifest @@ -468,4 +692,22 @@ private ClusterMetadataManifest fetchRemoteClusterMetadataManifest(String cluste throw new IllegalStateException(String.format(Locale.ROOT, "Error while downloading cluster metadata - %s", filename), e); } } + + public static String encodeString(String content) { + return Base64.getUrlEncoder().withoutPadding().encodeToString(content.getBytes(StandardCharsets.UTF_8)); + } + + /** + * Exception for IndexMetadata transfer failures to remote + */ + static class IndexMetadataTransferException extends RuntimeException { + + public IndexMetadataTransferException(String errorDesc) { + super(errorDesc); + } + + public IndexMetadataTransferException(String errorDesc, Throwable cause) { + super(errorDesc, cause); + } + } } diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index ff2a67e99a94d..03c71351294d5 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -1063,11 +1063,11 @@ public boolean isSegRepEnabled() { } public boolean isSegRepLocalEnabled() { - return isSegRepEnabled() && !isSegRepWithRemoteEnabled(); + return isSegRepEnabled() && !isRemoteStoreEnabled(); } public boolean isSegRepWithRemoteEnabled() { - return isSegRepEnabled() && isRemoteStoreEnabled() && FeatureFlags.isEnabled(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL); + return isSegRepEnabled() && isRemoteStoreEnabled(); } /** diff --git a/server/src/main/java/org/opensearch/index/codec/CodecAliases.java b/server/src/main/java/org/opensearch/index/codec/CodecAliases.java new file mode 100644 index 0000000000000..066c092e86db8 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/CodecAliases.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec; + +import org.apache.lucene.codecs.Codec; +import org.opensearch.common.annotation.ExperimentalApi; + +import java.util.Set; + +/** + * This {@link CodecAliases} to provide aliases for the {@link Codec}. + * + * @opensearch.internal + */ +@ExperimentalApi +public interface CodecAliases { + + /** + * Retrieves a set of aliases for an codec. + * + * @return A non-null set of alias strings. If no aliases are available, an empty set should be returned. + */ + default Set aliases() { + return Set.of(); + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/CodecService.java b/server/src/main/java/org/opensearch/index/codec/CodecService.java index a1913d5fa2061..775fc88b504f5 100644 --- a/server/src/main/java/org/opensearch/index/codec/CodecService.java +++ b/server/src/main/java/org/opensearch/index/codec/CodecService.java @@ -39,14 +39,10 @@ import org.opensearch.common.Nullable; import org.opensearch.common.collect.MapBuilder; import org.opensearch.index.IndexSettings; -import org.opensearch.index.codec.customcodecs.ZstdCodec; -import org.opensearch.index.codec.customcodecs.ZstdNoDictCodec; import org.opensearch.index.mapper.MapperService; import java.util.Map; -import static org.opensearch.index.engine.EngineConfig.INDEX_CODEC_COMPRESSION_LEVEL_SETTING; - /** * Since Lucene 4.0 low level index segments are read and written through a * codec layer that allows to use use-case specific file formats & @@ -67,27 +63,20 @@ public class CodecService { * the raw unfiltered lucene default. useful for testing */ public static final String LUCENE_DEFAULT_CODEC = "lucene_default"; - public static final String ZSTD_CODEC = "zstd"; - public static final String ZSTD_NO_DICT_CODEC = "zstd_no_dict"; public CodecService(@Nullable MapperService mapperService, IndexSettings indexSettings, Logger logger) { final MapBuilder codecs = MapBuilder.newMapBuilder(); assert null != indexSettings; - int compressionLevel = indexSettings.getValue(INDEX_CODEC_COMPRESSION_LEVEL_SETTING); if (mapperService == null) { codecs.put(DEFAULT_CODEC, new Lucene95Codec()); codecs.put(LZ4, new Lucene95Codec()); codecs.put(BEST_COMPRESSION_CODEC, new Lucene95Codec(Mode.BEST_COMPRESSION)); codecs.put(ZLIB, new Lucene95Codec(Mode.BEST_COMPRESSION)); - codecs.put(ZSTD_CODEC, new ZstdCodec(compressionLevel)); - codecs.put(ZSTD_NO_DICT_CODEC, new ZstdNoDictCodec(compressionLevel)); } else { codecs.put(DEFAULT_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); codecs.put(LZ4, new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); codecs.put(BEST_COMPRESSION_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_COMPRESSION, mapperService, logger)); codecs.put(ZLIB, new PerFieldMappingPostingFormatCodec(Mode.BEST_COMPRESSION, mapperService, logger)); - codecs.put(ZSTD_CODEC, new ZstdCodec(mapperService, logger, compressionLevel)); - codecs.put(ZSTD_NO_DICT_CODEC, new ZstdNoDictCodec(mapperService, logger, compressionLevel)); } codecs.put(LUCENE_DEFAULT_CODEC, Codec.getDefault()); for (String codec : Codec.availableCodecs()) { diff --git a/server/src/main/java/org/opensearch/index/engine/EngineConfig.java b/server/src/main/java/org/opensearch/index/engine/EngineConfig.java index 3351931a6b068..00f8ff6d3cd40 100644 --- a/server/src/main/java/org/opensearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/opensearch/index/engine/EngineConfig.java @@ -49,6 +49,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.index.IndexSettings; +import org.opensearch.index.codec.CodecAliases; import org.opensearch.index.codec.CodecService; import org.opensearch.index.codec.CodecSettings; import org.opensearch.index.mapper.ParsedDocument; @@ -133,18 +134,26 @@ public Supplier retentionLeasesSupplier() { case "lz4": case "best_compression": case "zlib": - case "zstd": - case "zstd_no_dict": case "lucene_default": return s; default: - if (Codec.availableCodecs().contains(s) == false) { // we don't error message the not officially supported ones - throw new IllegalArgumentException( - "unknown value for [index.codec] must be one of [default, lz4, best_compression, zlib, zstd, zstd_no_dict] but was: " - + s - ); + if (Codec.availableCodecs().contains(s)) { + return s; } - return s; + + for (String codecName : Codec.availableCodecs()) { + Codec codec = Codec.forName(codecName); + if (codec instanceof CodecAliases) { + CodecAliases codecWithAlias = (CodecAliases) codec; + if (codecWithAlias.aliases().contains(s)) { + return s; + } + } + } + + throw new IllegalArgumentException( + "unknown value for [index.codec] must be one of [default, lz4, best_compression, zlib] but was: " + s + ); } }, Property.IndexScope, Property.NodeScope); @@ -181,9 +190,6 @@ public void validate(String key, Object value, Object dependency) { private static void doValidateCodecSettings(final String codec) { switch (codec) { - case "zstd": - case "zstd_no_dict": - return; case "best_compression": case "zlib": case "lucene_default": @@ -198,6 +204,18 @@ private static void doValidateCodecSettings(final String codec) { return; } } + for (String codecName : Codec.availableCodecs()) { + Codec availableCodec = Codec.forName(codecName); + if (availableCodec instanceof CodecAliases) { + CodecAliases availableCodecWithAlias = (CodecAliases) availableCodec; + if (availableCodecWithAlias.aliases().contains(codec)) { + if (availableCodec instanceof CodecSettings + && ((CodecSettings) availableCodec).supports(INDEX_CODEC_COMPRESSION_LEVEL_SETTING)) { + return; + } + } + } + } } throw new IllegalArgumentException("Compression level cannot be set for the " + codec + " codec."); } @@ -238,6 +256,7 @@ private EngineConfig(Builder builder) { this.codecService = builder.codecService; this.eventListener = builder.eventListener; codecName = builder.indexSettings.getValue(INDEX_CODEC_SETTING); + // We need to make the indexing buffer for this shard at least as large // as the amount of memory that is available for all engines on the // local node so that decisions to flush segments to disk are made by diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index b529dfbe13bf4..570a2b186841a 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -24,6 +24,7 @@ import org.opensearch.index.seqno.SeqNoStats; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.translog.Translog; +import org.opensearch.index.translog.TranslogCorruptedException; import org.opensearch.index.translog.TranslogDeletionPolicy; import org.opensearch.index.translog.TranslogException; import org.opensearch.index.translog.TranslogManager; @@ -71,6 +72,7 @@ public NRTReplicationEngine(EngineConfig engineConfig) { store.incRef(); NRTReplicationReaderManager readerManager = null; WriteOnlyTranslogManager translogManagerRef = null; + boolean success = false; try { this.replicaFileTracker = new ReplicaFileTracker(store::deleteQuiet); this.lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); @@ -124,9 +126,17 @@ public void onAfterTranslogSync() { engineConfig.getPrimaryModeSupplier() ); this.translogManager = translogManagerRef; - } catch (IOException e) { - IOUtils.closeWhileHandlingException(store::decRef, readerManager, translogManagerRef); + success = true; + } catch (IOException | TranslogCorruptedException e) { throw new EngineCreationFailureException(shardId, "failed to create engine", e); + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(readerManager, translogManagerRef); + if (isClosed.get() == false) { + // failure, we need to dec the store reference + store.decRef(); + } + } } } diff --git a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java index 9fdd2ff9f759d..5cdff14cae360 100644 --- a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java +++ b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java @@ -10,28 +10,40 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.opensearch.Version; import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.MetadataCreateIndexService; +import org.opensearch.cluster.metadata.MetadataIndexUpgradeService; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Nullable; import org.opensearch.common.UUIDs; +import org.opensearch.common.collect.Tuple; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.indices.ShardLimitValidator; import org.opensearch.repositories.IndexId; import org.opensearch.snapshots.RestoreInfo; import org.opensearch.snapshots.RestoreService; +import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Map; +import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; @@ -49,87 +61,47 @@ public class RemoteStoreRestoreService { private final AllocationService allocationService; - public RemoteStoreRestoreService(ClusterService clusterService, AllocationService allocationService) { + private final MetadataCreateIndexService createIndexService; + + private final MetadataIndexUpgradeService metadataIndexUpgradeService; + + private final ShardLimitValidator shardLimitValidator; + + private final RemoteClusterStateService remoteClusterStateService; + + public RemoteStoreRestoreService( + ClusterService clusterService, + AllocationService allocationService, + MetadataCreateIndexService createIndexService, + MetadataIndexUpgradeService metadataIndexUpgradeService, + ShardLimitValidator shardLimitValidator, + RemoteClusterStateService remoteClusterStateService + ) { this.clusterService = clusterService; this.allocationService = allocationService; + this.createIndexService = createIndexService; + this.metadataIndexUpgradeService = metadataIndexUpgradeService; + this.shardLimitValidator = shardLimitValidator; + this.remoteClusterStateService = remoteClusterStateService; } + /** + * Restores data from remote store for indices specified in the restore request. + * + * @param request restore request + * @param listener restore listener + */ public void restore(RestoreRemoteStoreRequest request, final ActionListener listener) { clusterService.submitStateUpdateTask("restore[remote_store]", new ClusterStateUpdateTask() { - final String restoreUUID = UUIDs.randomBase64UUID(); + String restoreUUID; RestoreInfo restoreInfo = null; @Override public ClusterState execute(ClusterState currentState) { - // Updating cluster state - ClusterState.Builder builder = ClusterState.builder(currentState); - Metadata.Builder mdBuilder = Metadata.builder(currentState.metadata()); - ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); - RoutingTable.Builder rtBuilder = RoutingTable.builder(currentState.routingTable()); - - List indicesToBeRestored = new ArrayList<>(); - int totalShards = 0; - for (String index : request.indices()) { - IndexMetadata currentIndexMetadata = currentState.metadata().index(index); - if (currentIndexMetadata == null) { - // ToDo: Handle index metadata does not exist case. (GitHub #3457) - logger.warn("Remote store restore is not supported for non-existent index. Skipping: {}", index); - continue; - } - if (currentIndexMetadata.getSettings().getAsBoolean(SETTING_REMOTE_STORE_ENABLED, false)) { - IndexMetadata updatedIndexMetadata = currentIndexMetadata; - if (request.restoreAllShards()) { - if (currentIndexMetadata.getState() != IndexMetadata.State.CLOSE) { - throw new IllegalStateException( - "cannot restore index [" - + index - + "] because an open index " - + "with same name already exists in the cluster. Close the existing index" - ); - } - updatedIndexMetadata = IndexMetadata.builder(currentIndexMetadata) - .state(IndexMetadata.State.OPEN) - .version(1 + currentIndexMetadata.getVersion()) - .mappingVersion(1 + currentIndexMetadata.getMappingVersion()) - .settingsVersion(1 + currentIndexMetadata.getSettingsVersion()) - .aliasesVersion(1 + currentIndexMetadata.getAliasesVersion()) - .build(); - } - - Map indexShardRoutingTableMap = currentState.routingTable() - .index(index) - .shards() - .values() - .stream() - .collect(Collectors.toMap(IndexShardRoutingTable::shardId, Function.identity())); - - IndexId indexId = new IndexId(index, updatedIndexMetadata.getIndexUUID()); - - RecoverySource.RemoteStoreRecoverySource recoverySource = new RecoverySource.RemoteStoreRecoverySource( - restoreUUID, - updatedIndexMetadata.getCreationVersion(), - indexId - ); - rtBuilder.addAsRemoteStoreRestore( - updatedIndexMetadata, - recoverySource, - indexShardRoutingTableMap, - request.restoreAllShards() - ); - blocks.updateBlocks(updatedIndexMetadata); - mdBuilder.put(updatedIndexMetadata, true); - indicesToBeRestored.add(index); - totalShards += updatedIndexMetadata.getNumberOfShards(); - } else { - logger.warn("Remote store is not enabled for index: {}", index); - } - } - - restoreInfo = new RestoreInfo("remote_store", indicesToBeRestored, totalShards, totalShards); - - RoutingTable rt = rtBuilder.build(); - ClusterState updatedState = builder.metadata(mdBuilder).blocks(blocks).routingTable(rt).build(); - return allocationService.reroute(updatedState, "restored from remote store"); + RemoteRestoreResult remoteRestoreResult = restore(currentState, null, request.restoreAllShards(), request.indices()); + restoreUUID = remoteRestoreResult.getRestoreUUID(); + restoreInfo = remoteRestoreResult.getRestoreInfo(); + return remoteRestoreResult.getClusterState(); } @Override @@ -140,7 +112,7 @@ public void onFailure(String source, Exception e) { @Override public TimeValue timeout() { - return request.masterNodeTimeout(); + return request.clusterManagerNodeTimeout(); } @Override @@ -148,6 +120,227 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS listener.onResponse(new RestoreService.RestoreCompletionResponse(restoreUUID, null, restoreInfo)); } }); + } + + /** + * Executes remote restore + * @param currentState current cluster state + * @param restoreClusterUUID cluster UUID used to restore IndexMetadata + * @param restoreAllShards indicates if all shards of the index needs to be restored. This flag is ignored if remoteClusterUUID is provided + * @param indexNames list of indices to restore. This list is ignored if remoteClusterUUID is provided + * @return remote restore result + */ + public RemoteRestoreResult restore( + ClusterState currentState, + @Nullable String restoreClusterUUID, + boolean restoreAllShards, + String[] indexNames + ) { + Map> indexMetadataMap = new HashMap<>(); + boolean metadataFromRemoteStore = (restoreClusterUUID == null + || restoreClusterUUID.isEmpty() + || restoreClusterUUID.isBlank()) == false; + if (metadataFromRemoteStore) { + try { + remoteClusterStateService.getLatestIndexMetadata(currentState.getClusterName().value(), restoreClusterUUID) + .values() + .forEach(indexMetadata -> { + indexMetadataMap.put(indexMetadata.getIndex().getName(), new Tuple<>(true, indexMetadata)); + }); + } catch (IOException e) { + throw new IllegalStateException("Unable to restore remote index metadata", e); + } + } else { + for (String indexName : indexNames) { + IndexMetadata indexMetadata = currentState.metadata().index(indexName); + if (indexMetadata == null) { + logger.warn("Index restore is not supported for non-existent index. Skipping: {}", indexName); + } else { + indexMetadataMap.put(indexName, new Tuple<>(false, indexMetadata)); + } + } + } + validate(currentState, indexMetadataMap, restoreClusterUUID, restoreAllShards); + return executeRestore(currentState, indexMetadataMap, restoreAllShards); + } + + /** + * Executes remote restore + * @param currentState current cluster state + * @param indexMetadataMap map of index metadata to restore + * @param restoreAllShards indicates if all shards of the index needs to be restored + * @return remote restore result + */ + private RemoteRestoreResult executeRestore( + ClusterState currentState, + Map> indexMetadataMap, + boolean restoreAllShards + ) { + final String restoreUUID = UUIDs.randomBase64UUID(); + List indicesToBeRestored = new ArrayList<>(); + int totalShards = 0; + ClusterState.Builder builder = ClusterState.builder(currentState); + Metadata.Builder mdBuilder = Metadata.builder(currentState.metadata()); + ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); + RoutingTable.Builder rtBuilder = RoutingTable.builder(currentState.routingTable()); + for (Map.Entry> indexMetadataEntry : indexMetadataMap.entrySet()) { + String indexName = indexMetadataEntry.getKey(); + IndexMetadata indexMetadata = indexMetadataEntry.getValue().v2(); + boolean metadataFromRemoteStore = indexMetadataEntry.getValue().v1(); + IndexMetadata updatedIndexMetadata = indexMetadata; + if (restoreAllShards || metadataFromRemoteStore) { + updatedIndexMetadata = IndexMetadata.builder(indexMetadata) + .state(IndexMetadata.State.OPEN) + .version(1 + indexMetadata.getVersion()) + .mappingVersion(1 + indexMetadata.getMappingVersion()) + .settingsVersion(1 + indexMetadata.getSettingsVersion()) + .aliasesVersion(1 + indexMetadata.getAliasesVersion()) + .build(); + } + + IndexId indexId = new IndexId(indexName, updatedIndexMetadata.getIndexUUID()); + + Map indexShardRoutingTableMap = new HashMap<>(); + if (metadataFromRemoteStore == false) { + indexShardRoutingTableMap = currentState.routingTable() + .index(indexName) + .shards() + .values() + .stream() + .collect(Collectors.toMap(IndexShardRoutingTable::shardId, Function.identity())); + } + + RecoverySource.RemoteStoreRecoverySource recoverySource = new RecoverySource.RemoteStoreRecoverySource( + restoreUUID, + updatedIndexMetadata.getCreationVersion(), + indexId + ); + rtBuilder.addAsRemoteStoreRestore( + updatedIndexMetadata, + recoverySource, + indexShardRoutingTableMap, + restoreAllShards || metadataFromRemoteStore + ); + blocks.updateBlocks(updatedIndexMetadata); + mdBuilder.put(updatedIndexMetadata, true); + indicesToBeRestored.add(indexName); + totalShards += updatedIndexMetadata.getNumberOfShards(); + } + + RestoreInfo restoreInfo = new RestoreInfo("remote_store", indicesToBeRestored, totalShards, totalShards); + + RoutingTable rt = rtBuilder.build(); + ClusterState updatedState = builder.metadata(mdBuilder).blocks(blocks).routingTable(rt).build(); + return RemoteRestoreResult.build(restoreUUID, restoreInfo, allocationService.reroute(updatedState, "restored from remote store")); + } + + /** + * Performs various validations needed before executing restore + * @param currentState current cluster state + * @param indexMetadataMap map of index metadata to restore + * @param restoreClusterUUID cluster UUID used to restore IndexMetadata + * @param restoreAllShards indicates if all shards of the index needs to be restored. This flat is ignored if remoteClusterUUID is provided + */ + private void validate( + ClusterState currentState, + Map> indexMetadataMap, + @Nullable String restoreClusterUUID, + boolean restoreAllShards + ) throws IllegalStateException, IllegalArgumentException { + String errorMsg = "cannot restore index [%s] because an open index with same name/uuid already exists in the cluster."; + + // Restore with current cluster UUID will fail as same indices would be present in the cluster which we are trying to + // restore + if (currentState.metadata().clusterUUID().equals(restoreClusterUUID)) { + throw new IllegalArgumentException("clusterUUID to restore from should be different from current cluster UUID"); + } + for (Map.Entry> indexMetadataEntry : indexMetadataMap.entrySet()) { + String indexName = indexMetadataEntry.getKey(); + IndexMetadata indexMetadata = indexMetadataEntry.getValue().v2(); + String indexUUID = indexMetadata.getIndexUUID(); + boolean metadataFromRemoteStore = indexMetadataEntry.getValue().v1(); + if (indexMetadata.getSettings().getAsBoolean(SETTING_REMOTE_STORE_ENABLED, false)) { + if (metadataFromRemoteStore) { + Set graveyardIndexNames = new HashSet<>(); + Set graveyardIndexUUID = new HashSet<>(); + Set liveClusterIndexUUIDs = currentState.metadata() + .indices() + .values() + .stream() + .map(IndexMetadata::getIndexUUID) + .collect(Collectors.toSet()); + + currentState.metadata().indexGraveyard().getTombstones().forEach(tombstone -> { + graveyardIndexNames.add(tombstone.getIndex().getName()); + graveyardIndexUUID.add(tombstone.getIndex().getUUID()); + }); + + // Since updates to graveyard are synced to remote we should neven land in a situation where remote contain index + // metadata for graveyard index. + assert graveyardIndexNames.contains(indexName) == false : String.format( + Locale.ROOT, + "Index name [%s] exists in graveyard!", + indexName + ); + assert graveyardIndexUUID.contains(indexUUID) == false : String.format( + Locale.ROOT, + "Index UUID [%s] exists in graveyard!", + indexUUID + ); + + // Any indices being restored from remote cluster state should not already be part of the cluster as this causes + // conflict + boolean sameNameIndexExists = currentState.metadata().hasIndex(indexName); + boolean sameUUIDIndexExists = liveClusterIndexUUIDs.contains(indexUUID); + if (sameNameIndexExists || sameUUIDIndexExists) { + String finalErrorMsg = String.format(Locale.ROOT, errorMsg, indexName); + logger.info(finalErrorMsg); + throw new IllegalStateException(finalErrorMsg); + } + + Version minIndexCompatibilityVersion = currentState.getNodes().getMaxNodeVersion().minimumIndexCompatibilityVersion(); + metadataIndexUpgradeService.upgradeIndexMetadata(indexMetadata, minIndexCompatibilityVersion); + boolean isHidden = IndexMetadata.INDEX_HIDDEN_SETTING.get(indexMetadata.getSettings()); + createIndexService.validateIndexName(indexName, currentState); + createIndexService.validateDotIndex(indexName, isHidden); + shardLimitValidator.validateShardLimit(indexName, indexMetadata.getSettings(), currentState); + } else if (restoreAllShards && IndexMetadata.State.CLOSE.equals(indexMetadata.getState()) == false) { + throw new IllegalStateException(String.format(Locale.ROOT, errorMsg, indexName) + " Close the existing index."); + } + } else { + logger.warn("Remote store is not enabled for index: {}", indexName); + } + } + } + + /** + * Result of a remote restore operation. + */ + public static class RemoteRestoreResult { + private final ClusterState clusterState; + private final RestoreInfo restoreInfo; + private final String restoreUUID; + + private RemoteRestoreResult(String restoreUUID, RestoreInfo restoreInfo, ClusterState clusterState) { + this.clusterState = clusterState; + this.restoreInfo = restoreInfo; + this.restoreUUID = restoreUUID; + } + + public static RemoteRestoreResult build(String restoreUUID, RestoreInfo restoreInfo, ClusterState clusterState) { + return new RemoteRestoreResult(restoreUUID, restoreInfo, clusterState); + } + + public ClusterState getClusterState() { + return clusterState; + } + + public RestoreInfo getRestoreInfo() { + return restoreInfo; + } + public String getRestoreUUID() { + return restoreUUID; + } } } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java index b79ae7765c45f..c7863536adf20 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java @@ -19,6 +19,7 @@ import org.opensearch.index.shard.IndexShard; import java.io.IOException; +import java.util.Objects; /** * Tracks remote store segment download and upload stats @@ -300,4 +301,40 @@ static final class Fields { static final String TOTAL_TIME_SPENT = "total_time_spent"; static final String TOTAL_TIME_SPENT_IN_MILLIS = "total_time_spent_in_millis"; } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + RemoteSegmentStats that = (RemoteSegmentStats) o; + return uploadBytesStarted == that.uploadBytesStarted + && uploadBytesFailed == that.uploadBytesFailed + && uploadBytesSucceeded == that.uploadBytesSucceeded + && downloadBytesStarted == that.downloadBytesStarted + && downloadBytesFailed == that.downloadBytesFailed + && downloadBytesSucceeded == that.downloadBytesSucceeded + && maxRefreshTimeLag == that.maxRefreshTimeLag + && maxRefreshBytesLag == that.maxRefreshBytesLag + && totalRefreshBytesLag == that.totalRefreshBytesLag + && totalUploadTime == that.totalUploadTime + && totalDownloadTime == that.totalDownloadTime; + } + + @Override + public int hashCode() { + return Objects.hash( + uploadBytesStarted, + uploadBytesFailed, + uploadBytesSucceeded, + downloadBytesStarted, + downloadBytesFailed, + downloadBytesSucceeded, + maxRefreshTimeLag, + maxRefreshBytesLag, + totalRefreshBytesLag, + totalUploadTime, + totalDownloadTime + ); + } } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 250061c53bda9..8ed75330f938e 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -168,6 +168,7 @@ import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.index.translog.RemoteBlobStoreInternalTranslogFactory; import org.opensearch.index.translog.RemoteFsTranslog; +import org.opensearch.index.translog.RemoteTranslogStats; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogConfig; import org.opensearch.index.translog.TranslogFactory; @@ -1406,7 +1407,15 @@ public FieldDataStats fieldDataStats(String... fields) { } public TranslogStats translogStats() { - return getEngine().translogManager().getTranslogStats(); + TranslogStats translogStats = getEngine().translogManager().getTranslogStats(); + // Populate remote_store stats only if the index is remote store backed + if (indexSettings.isRemoteStoreEnabled()) { + translogStats.addRemoteTranslogStats( + new RemoteTranslogStats(remoteStoreStatsTrackerFactory.getRemoteTranslogTransferTracker(shardId).stats()) + ); + } + + return translogStats; } public CompletionStats completionStats(String... fields) { @@ -2327,6 +2336,7 @@ private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier) t } private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier, boolean syncFromRemote) throws IOException { + syncFromRemote = syncFromRemote && indexSettings.isRemoteSnapshot() == false; assert Thread.holdsLock(mutex) == false : "opening engine under mutex"; if (state != IndexShardState.RECOVERING) { throw new IndexShardNotRecoveringException(shardId, state); @@ -3719,8 +3729,7 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro internalRefreshListener.add( new RemoteStoreRefreshListener( this, - // Add the checkpoint publisher if the Segment Replciation via remote store is enabled. - indexSettings.isSegRepWithRemoteEnabled() ? this.checkpointPublisher : SegmentReplicationCheckpointPublisher.EMPTY, + this.checkpointPublisher, remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(shardId()) ) ); diff --git a/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java index 5250ce6230ffa..594b7f99cd85a 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; @@ -29,7 +28,6 @@ import org.opensearch.common.blobstore.transfer.RemoteTransferContainer; import org.opensearch.common.blobstore.transfer.stream.OffsetRangeIndexInputStream; import org.opensearch.common.blobstore.transfer.stream.OffsetRangeInputStream; -import org.opensearch.common.util.ByteUtils; import org.opensearch.core.action.ActionListener; import org.opensearch.index.store.exception.ChecksumCombinationException; @@ -47,9 +45,8 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.UnaryOperator; import java.util.stream.Collectors; -import java.util.zip.CRC32; -import com.jcraft.jzlib.JZlib; +import static org.opensearch.common.blobstore.transfer.RemoteTransferContainer.checksumOfChecksum; /** * A {@code RemoteDirectory} provides an abstraction layer for storing a list of files to a remote store. @@ -401,11 +398,8 @@ private void uploadBlob( private long calculateChecksumOfChecksum(Directory directory, String file) throws IOException { try (IndexInput indexInput = directory.openInput(file, IOContext.DEFAULT)) { - long storedChecksum = CodecUtil.retrieveChecksum(indexInput); - CRC32 checksumOfChecksum = new CRC32(); - checksumOfChecksum.update(ByteUtils.toByteArrayBE(storedChecksum)); try { - return JZlib.crc32_combine(storedChecksum, checksumOfChecksum.getValue(), SEGMENT_CHECKSUM_BYTES); + return checksumOfChecksum(indexInput, SEGMENT_CHECKSUM_BYTES); } catch (Exception e) { throw new ChecksumCombinationException( "Potentially corrupted file: Checksum combination failed while combining stored checksum " diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteTranslogStats.java b/server/src/main/java/org/opensearch/index/translog/RemoteTranslogStats.java new file mode 100644 index 0000000000000..966f8ebc2875a --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/RemoteTranslogStats.java @@ -0,0 +1,192 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog; + +import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStats; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; + +import java.io.IOException; +import java.util.Objects; + +/** + * Encapsulates the stats related to Remote Translog Store operations + * + * @opensearch.internal + */ +public class RemoteTranslogStats implements ToXContentFragment, Writeable { + /** + * Total number of Remote Translog Store uploads that have been started + */ + private long totalUploadsStarted; + + /** + * Total number of Remote Translog Store uploads that have failed. + */ + private long totalUploadsFailed; + + /** + * Total number of Remote Translog Store uploads that have been successful. + */ + private long totalUploadsSucceeded; + + /** + * Total number of byte uploads to Remote Translog Store that have been started. + */ + private long uploadBytesStarted; + + /** + * Total number of byte uploads to Remote Translog Store that have failed. + */ + private long uploadBytesFailed; + + /** + * Total number of byte uploads to Remote Translog Store that have been successful. + */ + private long uploadBytesSucceeded; + + static final String REMOTE_STORE = "remote_store"; + + public RemoteTranslogStats() {} + + public RemoteTranslogStats(StreamInput in) throws IOException { + this.totalUploadsStarted = in.readVLong(); + this.totalUploadsFailed = in.readVLong(); + this.totalUploadsSucceeded = in.readVLong(); + this.uploadBytesStarted = in.readVLong(); + this.uploadBytesFailed = in.readVLong(); + this.uploadBytesSucceeded = in.readVLong(); + } + + public RemoteTranslogStats(RemoteTranslogTransferTracker.Stats transferTrackerStats) { + this.totalUploadsStarted = transferTrackerStats.totalUploadsStarted; + this.totalUploadsFailed = transferTrackerStats.totalUploadsFailed; + this.totalUploadsSucceeded = transferTrackerStats.totalUploadsSucceeded; + this.uploadBytesStarted = transferTrackerStats.uploadBytesStarted; + this.uploadBytesFailed = transferTrackerStats.uploadBytesFailed; + this.uploadBytesSucceeded = transferTrackerStats.uploadBytesSucceeded; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(totalUploadsStarted); + out.writeVLong(totalUploadsFailed); + out.writeVLong(totalUploadsSucceeded); + out.writeVLong(uploadBytesStarted); + out.writeVLong(uploadBytesFailed); + out.writeVLong(uploadBytesSucceeded); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + RemoteTranslogStats other = (RemoteTranslogStats) obj; + + return this.totalUploadsStarted == other.totalUploadsStarted + && this.totalUploadsFailed == other.totalUploadsFailed + && this.totalUploadsSucceeded == other.totalUploadsSucceeded + && this.uploadBytesStarted == other.uploadBytesStarted + && this.uploadBytesFailed == other.uploadBytesFailed + && this.uploadBytesSucceeded == other.uploadBytesSucceeded; + } + + @Override + public int hashCode() { + return Objects.hash( + totalUploadsStarted, + totalUploadsFailed, + totalUploadsSucceeded, + uploadBytesStarted, + uploadBytesFailed, + uploadBytesSucceeded + ); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(REMOTE_STORE); + + builder.startObject(RemoteStoreStats.SubFields.UPLOAD); + addRemoteTranslogUploadStatsXContent(builder); + builder.endObject(); // translog.remote_store.upload + + builder.endObject(); // translog.remote_store + + return builder; + } + + public long getTotalUploadsStarted() { + return totalUploadsStarted; + } + + public long getTotalUploadsFailed() { + return totalUploadsFailed; + } + + public long getTotalUploadsSucceeded() { + return totalUploadsSucceeded; + } + + public long getUploadBytesStarted() { + return uploadBytesStarted; + } + + public long getUploadBytesFailed() { + return uploadBytesFailed; + } + + public long getUploadBytesSucceeded() { + return uploadBytesSucceeded; + } + + public void add(RemoteTranslogStats other) { + if (other == null) { + return; + } + + this.totalUploadsStarted += other.totalUploadsStarted; + this.totalUploadsFailed += other.totalUploadsFailed; + this.totalUploadsSucceeded += other.totalUploadsSucceeded; + this.uploadBytesStarted += other.uploadBytesStarted; + this.uploadBytesFailed += other.uploadBytesFailed; + this.uploadBytesSucceeded += other.uploadBytesSucceeded; + } + + void addRemoteTranslogUploadStatsXContent(XContentBuilder builder) throws IOException { + builder.startObject(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOADS); + builder.field(RemoteStoreStats.SubFields.STARTED, totalUploadsStarted) + .field(RemoteStoreStats.SubFields.FAILED, totalUploadsFailed) + .field(RemoteStoreStats.SubFields.SUCCEEDED, totalUploadsSucceeded); + builder.endObject(); + + builder.startObject(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOAD_SIZE); + builder.humanReadableField( + RemoteStoreStats.SubFields.STARTED_BYTES, + RemoteStoreStats.SubFields.STARTED, + new ByteSizeValue(uploadBytesStarted) + ); + builder.humanReadableField( + RemoteStoreStats.SubFields.FAILED_BYTES, + RemoteStoreStats.SubFields.FAILED, + new ByteSizeValue(uploadBytesFailed) + ); + builder.humanReadableField( + RemoteStoreStats.SubFields.SUCCEEDED_BYTES, + RemoteStoreStats.SubFields.SUCCEEDED, + new ByteSizeValue(uploadBytesSucceeded) + ); + builder.endObject(); + } +} diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogStats.java b/server/src/main/java/org/opensearch/index/translog/TranslogStats.java index cf279334c7557..a4699cea671a0 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogStats.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogStats.java @@ -31,6 +31,7 @@ package org.opensearch.index.translog; +import org.opensearch.Version; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -48,14 +49,21 @@ * @opensearch.internal */ public class TranslogStats implements Writeable, ToXContentFragment { - + private static final String TRANSLOG = "translog"; private long translogSizeInBytes; private int numberOfOperations; private long uncommittedSizeInBytes; private int uncommittedOperations; private long earliestLastModifiedAge; - public TranslogStats() {} + /** + * Stats related to the Remote Translog Store operations + */ + private final RemoteTranslogStats remoteTranslogStats; + + public TranslogStats() { + remoteTranslogStats = new RemoteTranslogStats(); + } public TranslogStats(StreamInput in) throws IOException { numberOfOperations = in.readVInt(); @@ -63,6 +71,9 @@ public TranslogStats(StreamInput in) throws IOException { uncommittedOperations = in.readVInt(); uncommittedSizeInBytes = in.readVLong(); earliestLastModifiedAge = in.readVLong(); + remoteTranslogStats = in.getVersion().onOrAfter(Version.V_2_10_0) + ? in.readOptionalWriteable(RemoteTranslogStats::new) + : new RemoteTranslogStats(); } public TranslogStats( @@ -87,27 +98,37 @@ public TranslogStats( if (earliestLastModifiedAge < 0) { throw new IllegalArgumentException("earliestLastModifiedAge must be >= 0"); } + this.numberOfOperations = numberOfOperations; this.translogSizeInBytes = translogSizeInBytes; this.uncommittedSizeInBytes = uncommittedSizeInBytes; this.uncommittedOperations = uncommittedOperations; this.earliestLastModifiedAge = earliestLastModifiedAge; + this.remoteTranslogStats = new RemoteTranslogStats(); + } + + public void addRemoteTranslogStats(RemoteTranslogStats remoteTranslogStats) { + if (this.remoteTranslogStats != null) { + this.remoteTranslogStats.add(remoteTranslogStats); + } } - public void add(TranslogStats translogStats) { - if (translogStats == null) { + public void add(TranslogStats other) { + if (other == null) { return; } - this.numberOfOperations += translogStats.numberOfOperations; - this.translogSizeInBytes += translogStats.translogSizeInBytes; - this.uncommittedOperations += translogStats.uncommittedOperations; - this.uncommittedSizeInBytes += translogStats.uncommittedSizeInBytes; + this.numberOfOperations += other.numberOfOperations; + this.translogSizeInBytes += other.translogSizeInBytes; + this.uncommittedOperations += other.uncommittedOperations; + this.uncommittedSizeInBytes += other.uncommittedSizeInBytes; if (this.earliestLastModifiedAge == 0) { - this.earliestLastModifiedAge = translogStats.earliestLastModifiedAge; + this.earliestLastModifiedAge = other.earliestLastModifiedAge; } else { - this.earliestLastModifiedAge = Math.min(this.earliestLastModifiedAge, translogStats.earliestLastModifiedAge); + this.earliestLastModifiedAge = Math.min(this.earliestLastModifiedAge, other.earliestLastModifiedAge); } + + addRemoteTranslogStats(other.remoteTranslogStats); } public long getTranslogSizeInBytes() { @@ -132,15 +153,20 @@ public long getEarliestLastModifiedAge() { return earliestLastModifiedAge; } + public RemoteTranslogStats getRemoteTranslogStats() { + return remoteTranslogStats; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject("translog"); - builder.field("operations", numberOfOperations); - builder.humanReadableField("size_in_bytes", "size", new ByteSizeValue(translogSizeInBytes)); - builder.field("uncommitted_operations", uncommittedOperations); - builder.humanReadableField("uncommitted_size_in_bytes", "uncommitted_size", new ByteSizeValue(uncommittedSizeInBytes)); - builder.field("earliest_last_modified_age", earliestLastModifiedAge); + builder.startObject(TRANSLOG); + addLocalTranslogStatsXContent(builder); + if (remoteTranslogStats != null) { + builder = remoteTranslogStats.toXContent(builder, params); + } + builder.endObject(); + return builder; } @@ -156,5 +182,16 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(uncommittedOperations); out.writeVLong(uncommittedSizeInBytes); out.writeVLong(earliestLastModifiedAge); + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { + out.writeOptionalWriteable(remoteTranslogStats); + } + } + + private void addLocalTranslogStatsXContent(XContentBuilder builder) throws IOException { + builder.field("operations", numberOfOperations); + builder.humanReadableField("size_in_bytes", "size", new ByteSizeValue(translogSizeInBytes)); + builder.field("uncommitted_operations", uncommittedOperations); + builder.humanReadableField("uncommitted_size_in_bytes", "uncommitted_size", new ByteSizeValue(uncommittedSizeInBytes)); + builder.field("earliest_last_modified_age", earliestLastModifiedAge); } } diff --git a/server/src/main/java/org/opensearch/indices/IndicesModule.java b/server/src/main/java/org/opensearch/indices/IndicesModule.java index c6ae8b988aed0..5c2137ec742a4 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesModule.java +++ b/server/src/main/java/org/opensearch/indices/IndicesModule.java @@ -38,7 +38,6 @@ import org.opensearch.action.admin.indices.rollover.MaxSizeCondition; import org.opensearch.action.resync.TransportResyncReplicationAction; import org.opensearch.common.inject.AbstractModule; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.NamedWriteableRegistry.Entry; @@ -288,9 +287,7 @@ protected void configure() { bind(RetentionLeaseSyncer.class).asEagerSingleton(); bind(SegmentReplicationCheckpointPublisher.class).asEagerSingleton(); bind(SegmentReplicationPressureService.class).asEagerSingleton(); - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { - bind(RemoteStorePressureService.class).asEagerSingleton(); - } + bind(RemoteStorePressureService.class).asEagerSingleton(); } /** diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 2e2a0762ea489..2ed3cb8d9e8ea 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -69,7 +69,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.AbstractRefCounted; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.OpenSearchExecutors; @@ -457,12 +456,9 @@ protected void closeInternal() { this.clusterDefaultRefreshInterval = CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.get(clusterService.getSettings()); clusterService.getClusterSettings() .addSettingsUpdateConsumer(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING, this::onRefreshIntervalUpdate); - - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { - this.clusterRemoteTranslogBufferInterval = CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(clusterService.getSettings()); - clusterService.getClusterSettings() - .addSettingsUpdateConsumer(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, this::setClusterRemoteTranslogBufferInterval); - } + this.clusterRemoteTranslogBufferInterval = CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(clusterService.getSettings()); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, this::setClusterRemoteTranslogBufferInterval); } /** diff --git a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java index 0904dc5fa18a4..dc538a03de595 100644 --- a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java @@ -55,7 +55,6 @@ import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.core.action.ActionListener; @@ -223,10 +222,7 @@ public IndicesClusterStateService( ); indexEventListeners.add(segmentReplicationTargetService); indexEventListeners.add(segmentReplicationSourceService); - // if remote store feature is not enabled, do not wire the remote upload pressure service as an IndexEventListener. - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { - indexEventListeners.add(remoteStoreStatsTrackerFactory); - } + indexEventListeners.add(remoteStoreStatsTrackerFactory); this.segmentReplicationTargetService = segmentReplicationTargetService; this.builtInIndexListener = Collections.unmodifiableList(indexEventListeners); this.indicesService = indicesService; diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index b9c5c3352adc9..e06207a0c7bff 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -115,7 +115,7 @@ import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.crypto.CryptoManagerRegistry; +import org.opensearch.crypto.CryptoHandlerRegistry; import org.opensearch.discovery.Discovery; import org.opensearch.discovery.DiscoveryModule; import org.opensearch.env.Environment; @@ -175,6 +175,7 @@ import org.opensearch.plugins.CircuitBreakerPlugin; import org.opensearch.plugins.ClusterPlugin; import org.opensearch.plugins.CryptoKeyProviderPlugin; +import org.opensearch.plugins.CryptoPlugin; import org.opensearch.plugins.DiscoveryPlugin; import org.opensearch.plugins.EnginePlugin; import org.opensearch.plugins.ExtensionAwarePlugin; @@ -271,6 +272,7 @@ import static org.opensearch.env.NodeEnvironment.collectFileCacheDataPath; import static org.opensearch.index.ShardIndexingPressureSettings.SHARD_INDEXING_PRESSURE_ENABLED_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreAttributePresent; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled; /** * A node represent a node within a cluster ({@code cluster.name}). The {@link #client()} can be used @@ -680,7 +682,7 @@ protected Node( threadPool::relativeTimeInMillis ); final RemoteClusterStateService remoteClusterStateService; - if (RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.get(settings) == true) { + if (isRemoteStoreClusterStateEnabled(settings)) { remoteClusterStateService = new RemoteClusterStateService( nodeEnvironment.nodeId(), repositoriesServiceReference::get, @@ -939,7 +941,11 @@ protected Node( xContentRegistry, recoverySettings ); - CryptoManagerRegistry.initRegistry(pluginsService.filterPlugins(CryptoKeyProviderPlugin.class), settings); + CryptoHandlerRegistry.initRegistry( + pluginsService.filterPlugins(CryptoPlugin.class), + pluginsService.filterPlugins(CryptoKeyProviderPlugin.class), + settings + ); RepositoriesService repositoryService = repositoriesModule.getRepositoryService(); repositoriesServiceReference.set(repositoryService); SnapshotsService snapshotsService = new SnapshotsService( @@ -974,9 +980,14 @@ protected Node( indicesService, clusterInfoService::getClusterInfo ); + RemoteStoreRestoreService remoteStoreRestoreService = new RemoteStoreRestoreService( clusterService, - clusterModule.getAllocationService() + clusterModule.getAllocationService(), + metadataCreateIndexService, + metadataIndexUpgradeService, + shardLimitValidator, + remoteClusterStateService ); final DiskThresholdMonitor diskThresholdMonitor = new DiskThresholdMonitor( @@ -1324,6 +1335,10 @@ public Node start() throws NodeValidationException { injector.getInstance(PeerRecoverySourceService.class).start(); injector.getInstance(SegmentReplicationSourceService.class).start(); + final RemoteClusterStateService remoteClusterStateService = injector.getInstance(RemoteClusterStateService.class); + if (remoteClusterStateService != null) { + remoteClusterStateService.start(); + } // Load (and maybe upgrade) the metadata stored on disk final GatewayMetaState gatewayMetaState = injector.getInstance(GatewayMetaState.class); gatewayMetaState.start( @@ -1335,7 +1350,8 @@ public Node start() throws NodeValidationException { injector.getInstance(MetadataUpgrader.class), injector.getInstance(PersistedClusterStateService.class), injector.getInstance(RemoteClusterStateService.class), - injector.getInstance(PersistedStateRegistry.class) + injector.getInstance(PersistedStateRegistry.class), + injector.getInstance(RemoteStoreRestoreService.class) ); if (Assertions.ENABLED) { try { diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java index b514089966484..7b2a6c34d3db6 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java @@ -8,10 +8,12 @@ package org.opensearch.node.remotestore; +import org.opensearch.cluster.metadata.CryptoMetadata; import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; +import org.opensearch.gateway.remote.RemoteClusterStateService; import org.opensearch.node.Node; import org.opensearch.repositories.blobstore.BlobStoreRepository; @@ -35,7 +37,13 @@ public class RemoteStoreNodeAttribute { public static final String REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX = "remote_store"; public static final String REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY = "remote_store.segment.repository"; public static final String REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY = "remote_store.translog.repository"; + public static final String REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY = "remote_store.state.repository"; public static final String REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT = "remote_store.repository.%s.type"; + public static final String REMOTE_STORE_REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT = "remote_store.repository.%s." + + CryptoMetadata.CRYPTO_METADATA_KEY; + public static final String REMOTE_STORE_REPOSITORY_CRYPTO_SETTINGS_PREFIX = REMOTE_STORE_REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT + + "." + + CryptoMetadata.SETTINGS_KEY; public static final String REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX = "remote_store.repository.%s.settings."; private final RepositoriesMetadata repositoriesMetadata; @@ -55,6 +63,34 @@ private String validateAttributeNonNull(DiscoveryNode node, String attributeKey) return attributeValue; } + private CryptoMetadata buildCryptoMetadata(DiscoveryNode node, String repositoryName) { + String metadataKey = String.format(Locale.getDefault(), REMOTE_STORE_REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT, repositoryName); + boolean isRepoEncrypted = node.getAttributes().keySet().stream().anyMatch(key -> key.startsWith(metadataKey)); + if (isRepoEncrypted == false) { + return null; + } + + String keyProviderName = validateAttributeNonNull(node, metadataKey + "." + CryptoMetadata.KEY_PROVIDER_NAME_KEY); + String keyProviderType = validateAttributeNonNull(node, metadataKey + "." + CryptoMetadata.KEY_PROVIDER_TYPE_KEY); + + String settingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_CRYPTO_SETTINGS_PREFIX, + repositoryName + ); + + Map settingsMap = node.getAttributes() + .keySet() + .stream() + .filter(key -> key.startsWith(settingsAttributeKeyPrefix)) + .collect(Collectors.toMap(key -> key.replace(settingsAttributeKeyPrefix + ".", ""), key -> node.getAttributes().get(key))); + + Settings.Builder settings = Settings.builder(); + settingsMap.forEach(settings::put); + + return new CryptoMetadata(keyProviderName, keyProviderType, settings.build()); + } + private Map validateSettingsAttributesNonNull(DiscoveryNode node, String repositoryName) { String settingsAttributeKeyPrefix = String.format( Locale.getDefault(), @@ -86,10 +122,12 @@ private RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String na Settings.Builder settings = Settings.builder(); settingsMap.forEach(settings::put); + CryptoMetadata cryptoMetadata = buildCryptoMetadata(node, name); + // Repository metadata built here will always be for a system repository. settings.put(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING.getKey(), true); - return new RepositoryMetadata(name, type, settings.build()); + return new RepositoryMetadata(name, type, settings.build(), cryptoMetadata); } private RepositoriesMetadata buildRepositoriesMetadata(DiscoveryNode node) { @@ -98,6 +136,7 @@ private RepositoriesMetadata buildRepositoriesMetadata(DiscoveryNode node) { repositoryNames.add(validateAttributeNonNull(node, REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY)); repositoryNames.add(validateAttributeNonNull(node, REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY)); + repositoryNames.add(validateAttributeNonNull(node, REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY)); for (String repositoryName : repositoryNames) { repositoryMetadataList.add(buildRepositoryMetadata(node, repositoryName)); @@ -110,6 +149,10 @@ public static boolean isRemoteStoreAttributePresent(Settings settings) { return settings.getByPrefix(Node.NODE_ATTRIBUTES.getKey() + REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX).isEmpty() == false; } + public static boolean isRemoteStoreClusterStateEnabled(Settings settings) { + return RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.get(settings) && isRemoteStoreAttributePresent(settings); + } + public RepositoriesMetadata getRepositoriesMetadata() { return this.repositoriesMetadata; } diff --git a/server/src/main/java/org/opensearch/plugins/CryptoPlugin.java b/server/src/main/java/org/opensearch/plugins/CryptoPlugin.java new file mode 100644 index 0000000000000..ad348d07e23d3 --- /dev/null +++ b/server/src/main/java/org/opensearch/plugins/CryptoPlugin.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugins; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.crypto.CryptoHandler; +import org.opensearch.common.crypto.MasterKeyProvider; + +/** + * Crypto plugin to provide encryption and decryption support. + * @opensearch.api + */ +@ExperimentalApi +public interface CryptoPlugin { + + /** + * To create a crypto handler for handling encryption and decryption ops. + * @param keyProvider key provider instance to provide keys used in encrypting data. + * @param keyProviderName Name of key provider to distinguish between multiple instances created with different + * configurations of same keyProviderType. + * @param keyProviderType Unique type of key provider to distinguish between different key provider implementations. + * @param onClose Closes key provider or other clean up operations on close. + * @return crypto handler instance. + */ + CryptoHandler getOrCreateCryptoHandler( + MasterKeyProvider keyProvider, + String keyProviderName, + String keyProviderType, + Runnable onClose + ); +} diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java index ab343aabd3a82..3d6679b3ef80e 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java @@ -64,7 +64,6 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.action.ActionListener; @@ -683,12 +682,6 @@ public static void validateRepositoryMetadataSettings( + minVersionInCluster ); } - if (REMOTE_STORE_INDEX_SHALLOW_COPY.get(repositoryMetadataSettings) && !FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { - throw new RepositoryException( - repositoryName, - "setting " + REMOTE_STORE_INDEX_SHALLOW_COPY.getKey() + " cannot be enabled, as remote store feature is not enabled." - ); - } } private static void ensureRepositoryNotInUse(ClusterState clusterState, String repository) { diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java b/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java index 9048757405108..744e2fbd1bfc7 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java @@ -42,7 +42,11 @@ import org.apache.lucene.util.BytesRef; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.CheckedFunction; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.stream.write.WritePriority; +import org.opensearch.common.blobstore.transfer.RemoteTransferContainer; +import org.opensearch.common.blobstore.transfer.stream.OffsetRangeIndexInputStream; import org.opensearch.common.io.Streams; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lucene.store.ByteArrayIndexInput; @@ -50,6 +54,7 @@ import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.compress.Compressor; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -58,6 +63,7 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.gateway.CorruptStateException; +import org.opensearch.index.store.exception.ChecksumCombinationException; import org.opensearch.snapshots.SnapshotInfo; import java.io.IOException; @@ -67,6 +73,8 @@ import java.util.Locale; import java.util.Map; +import static org.opensearch.common.blobstore.transfer.RemoteTransferContainer.checksumOfChecksum; + /** * Snapshot metadata file format used in v2.0 and above * @@ -167,6 +175,61 @@ public void write(final T obj, final BlobContainer blobContainer, final String n blobContainer.writeBlob(blobName, bytes.streamInput(), bytes.length(), false); } + /** + * Writes blob with resolving the blob name using {@link #blobName} method. + * Leverages the multipart upload if supported by the blobContainer. + * + * @param obj object to be serialized + * @param blobContainer blob container + * @param name blob name + * @param compressor whether to use compression + * @param listener listener to listen to write result + */ + public void writeAsync( + final T obj, + final BlobContainer blobContainer, + final String name, + final Compressor compressor, + ActionListener listener + ) throws IOException { + if (blobContainer instanceof AsyncMultiStreamBlobContainer == false) { + write(obj, blobContainer, name, compressor); + listener.onResponse(null); + return; + } + final String blobName = blobName(name); + final BytesReference bytes = serialize(obj, blobName, compressor); + final String resourceDescription = "ChecksumBlobStoreFormat.writeAsync(blob=\"" + blobName + "\")"; + try (IndexInput input = new ByteArrayIndexInput(resourceDescription, BytesReference.toBytes(bytes))) { + long expectedChecksum; + try { + expectedChecksum = checksumOfChecksum(input.clone(), 8); + } catch (Exception e) { + throw new ChecksumCombinationException( + "Potentially corrupted file: Checksum combination failed while combining stored checksum " + + "and calculated checksum of stored checksum", + resourceDescription, + e + ); + } + + try ( + RemoteTransferContainer remoteTransferContainer = new RemoteTransferContainer( + blobName, + blobName, + bytes.length(), + true, + WritePriority.HIGH, + (size, position) -> new OffsetRangeIndexInputStream(input, size, position), + expectedChecksum, + true + ) + ) { + ((AsyncMultiStreamBlobContainer) blobContainer).asyncBlobUpload(remoteTransferContainer.createWriteContext(), listener); + } + } + } + public BytesReference serialize(final T obj, final String blobName, final Compressor compressor) throws IOException { try (BytesStreamOutput outputStream = new BytesStreamOutput()) { try ( diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index df056c75d66f0..b50ce17a3d391 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -42,12 +42,15 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.metrics.OperationStats; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.indices.breaker.AllCircuitBreakerStats; import org.opensearch.core.indices.breaker.CircuitBreakerStats; import org.opensearch.discovery.DiscoveryStats; import org.opensearch.http.HttpStats; import org.opensearch.index.ReplicationStats; import org.opensearch.index.remote.RemoteSegmentStats; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; +import org.opensearch.index.translog.RemoteTranslogStats; import org.opensearch.indices.NodeIndicesStats; import org.opensearch.ingest.IngestStats; import org.opensearch.monitor.fs.FsInfo; @@ -463,6 +466,12 @@ public void testSerialization() throws IOException { assertEquals(remoteSegmentStats.getTotalRefreshBytesLag(), deserializedRemoteSegmentStats.getTotalRefreshBytesLag()); assertEquals(remoteSegmentStats.getTotalUploadTime(), deserializedRemoteSegmentStats.getTotalUploadTime()); assertEquals(remoteSegmentStats.getTotalDownloadTime(), deserializedRemoteSegmentStats.getTotalDownloadTime()); + + RemoteTranslogStats remoteTranslogStats = nodeIndicesStats.getTranslog().getRemoteTranslogStats(); + RemoteTranslogStats deserializedRemoteTranslogStats = deserializedNodeIndicesStats.getTranslog() + .getRemoteTranslogStats(); + assertEquals(remoteTranslogStats, deserializedRemoteTranslogStats); + ReplicationStats replicationStats = nodeIndicesStats.getSegments().getReplicationStats(); ReplicationStats deserializedReplicationStats = deserializedNodeIndicesStats.getSegments().getReplicationStats(); @@ -792,6 +801,7 @@ private static NodeIndicesStats getNodeIndicesStats(boolean remoteStoreStats) { NodeIndicesStats indicesStats = null; if (remoteStoreStats) { indicesStats = new NodeIndicesStats(new CommonStats(CommonStatsFlags.ALL), new HashMap<>()); + RemoteSegmentStats remoteSegmentStats = indicesStats.getSegments().getRemoteSegmentStats(); remoteSegmentStats.addUploadBytesStarted(10L); remoteSegmentStats.addUploadBytesSucceeded(10L); @@ -804,10 +814,38 @@ private static NodeIndicesStats getNodeIndicesStats(boolean remoteStoreStats) { remoteSegmentStats.setMaxRefreshTimeLag(2L); remoteSegmentStats.addTotalUploadTime(20L); remoteSegmentStats.addTotalDownloadTime(20L); + + RemoteTranslogStats remoteTranslogStats = indicesStats.getTranslog().getRemoteTranslogStats(); + RemoteTranslogStats otherRemoteTranslogStats = new RemoteTranslogStats(getRandomRemoteTranslogTransferTrackerStats()); + remoteTranslogStats.add(otherRemoteTranslogStats); } return indicesStats; } + private static RemoteTranslogTransferTracker.Stats getRandomRemoteTranslogTransferTrackerStats() { + return new RemoteTranslogTransferTracker.Stats( + new ShardId("test-idx", "test-idx", randomIntBetween(1, 10)), + 0L, + randomLongBetween(100, 500), + randomLongBetween(50, 100), + randomLongBetween(100, 200), + randomLongBetween(10000, 50000), + randomLongBetween(5000, 10000), + randomLongBetween(10000, 20000), + 0L, + 0D, + 0D, + 0D, + 0L, + 0L, + 0L, + 0L, + 0D, + 0D, + 0D + ); + } + private OperationStats getPipelineStats(List pipelineStats, String id) { return pipelineStats.stream().filter(p1 -> p1.getPipelineId().equals(id)).findFirst().map(p2 -> p2.getStats()).orElse(null); } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsActionTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsActionTests.java index 75707f2a7853a..1695e3dea59c6 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsActionTests.java @@ -23,7 +23,6 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.index.Index; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; @@ -32,7 +31,6 @@ import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.transport.MockTransport; import org.opensearch.transport.TransportService; @@ -113,7 +111,6 @@ public void tearDown() throws Exception { } public void testAllShardCopies() throws Exception { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); RoutingTable routingTable = RoutingTable.builder().addAsNew(remoteStoreIndexMetadata).build(); Metadata metadata = Metadata.builder().put(remoteStoreIndexMetadata, false).build(); ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(routingTable).build(); @@ -133,7 +130,6 @@ public void testAllShardCopies() throws Exception { } public void testOnlyLocalShards() throws Exception { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); String[] concreteIndices = new String[] { INDEX.getName() }; RoutingTable routingTable = spy(RoutingTable.builder().addAsNew(remoteStoreIndexMetadata).build()); doReturn(new PlainShardsIterator(routingTable.allShards(INDEX.getName()).stream().map(Mockito::spy).collect(Collectors.toList()))) @@ -161,7 +157,6 @@ public void testOnlyLocalShards() throws Exception { } public void testOnlyRemoteStoreEnabledShardCopies() throws Exception { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); Index NEW_INDEX = new Index("newIndex", "newUUID"); IndexMetadata indexMetadataWithoutRemoteStore = IndexMetadata.builder(NEW_INDEX.getName()) .settings( diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java index 02fae835d7cbf..d1c2dda615992 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java @@ -48,12 +48,14 @@ import org.opensearch.gateway.GatewayMetaState.RemotePersistedState; import org.opensearch.gateway.remote.ClusterMetadataManifest; import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.repositories.fs.FsRepository; import org.opensearch.test.EqualsHashCodeTestUtils; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; import java.io.IOException; import java.util.Collections; +import java.util.Locale; import java.util.Optional; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -62,6 +64,9 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; @@ -920,48 +925,49 @@ public void testHandlePrePublishAndCommitWhenRemoteStateEnabled() throws IOExcep final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); final VotingConfiguration initialConfig = VotingConfiguration.of(node1); final ClusterState clusterState = clusterState(0L, 0L, node1, initialConfig, initialConfig, 42L); - Mockito.when(remoteClusterStateService.writeFullMetadata(clusterState)) - .thenReturn( - new ClusterMetadataManifest( - 0L, - 0L, - randomAlphaOfLength(10), - randomAlphaOfLength(10), - Version.CURRENT, - randomAlphaOfLength(10), - false, - Collections.emptyList() - ) - ); + final ClusterMetadataManifest manifest = new ClusterMetadataManifest( + 0L, + 0L, + randomAlphaOfLength(10), + randomAlphaOfLength(10), + Version.CURRENT, + randomAlphaOfLength(10), + false, + Collections.emptyList(), + randomAlphaOfLength(10) + ); + Mockito.when(remoteClusterStateService.writeFullMetadata(clusterState)).thenReturn(manifest); final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, ps1); persistedStateRegistry.addPersistedState(PersistedStateType.REMOTE, new RemotePersistedState(remoteClusterStateService)); - final Settings settings = Settings.builder() + + String randomRepoName = "randomRepoName"; + String stateRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + randomRepoName + ); + String stateRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + randomRepoName + ); + + Settings settings = Settings.builder() + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, randomRepoName) + .put(stateRepoTypeAttributeKey, FsRepository.TYPE) + .put(stateRepoSettingsAttributeKeyPrefix + "location", "randomRepoPath") .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) .build(); + final CoordinationState coordinationState = createCoordinationState(persistedStateRegistry, node1, settings); coordinationState.handlePrePublish(clusterState); - Mockito.verifyNoInteractions(remoteClusterStateService); + Mockito.verify(remoteClusterStateService, Mockito.times(1)).writeFullMetadata(clusterState); assertThat(persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE).getLastAcceptedState(), equalTo(clusterState)); - final ClusterState clusterState2 = clusterState(0L, 1L, node1, initialConfig, initialConfig, 42L); - final ClusterMetadataManifest manifest2 = new ClusterMetadataManifest( - 0L, - 1L, - randomAlphaOfLength(10), - randomAlphaOfLength(10), - Version.CURRENT, - randomAlphaOfLength(10), - false, - Collections.emptyList() - ); - Mockito.when(remoteClusterStateService.writeFullMetadata(clusterState2)).thenReturn(manifest2); - coordinationState.handlePrePublish(clusterState2); - Mockito.verify(remoteClusterStateService, Mockito.times(1)).writeFullMetadata(clusterState2); - coordinationState.handlePreCommit(); - Mockito.verify(remoteClusterStateService, Mockito.times(1)).markLastStateAsCommitted(clusterState2, manifest2); + Mockito.verify(remoteClusterStateService, Mockito.times(1)).markLastStateAsCommitted(clusterState, manifest); } public static CoordinationState createCoordinationState( diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index e0844fd521d3f..5952cc1bcaac2 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -67,6 +67,7 @@ import java.util.Map; import java.util.stream.Collectors; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; @@ -437,7 +438,8 @@ public void testPreventJoinClusterWithRemoteStoreNodeWithDifferentAttributesJoin for (Map.Entry nodeAttribute : existingNodeAttributes.entrySet()) { if (nodeAttribute.getKey() != REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY - && nodeAttribute.getKey() != REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY) { + && nodeAttribute.getKey() != REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY + && nodeAttribute.getKey() != REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY) { remoteStoreNodeAttributes.put(nodeAttribute.getKey(), nodeAttribute.getValue() + "-new"); validateAttributes(remoteStoreNodeAttributes, currentState, existingNode); remoteStoreNodeAttributes.put(nodeAttribute.getKey(), nodeAttribute.getValue()); @@ -465,6 +467,13 @@ public void testPreventJoinClusterWithRemoteStoreNodeWithDifferentNameAttributes } else if (REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY.equals(nodeAttribute.getKey())) { Map remoteStoreNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO + "new"); validateAttributes(remoteStoreNodeAttributes, currentState, existingNode); + } else if (REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY.equals(nodeAttribute.getKey())) { + Map remoteStoreNodeAttributes = remoteStoreNodeAttributes( + SEGMENT_REPO, + TRANSLOG_REPO, + CLUSTER_STATE_REPO + "new" + ); + validateAttributes(remoteStoreNodeAttributes, currentState, existingNode); } } } @@ -560,7 +569,7 @@ public void testUpdatesClusterStateWithSingleNodeCluster() throws Exception { assertThat(result.executionResults.entrySet(), hasSize(1)); final ClusterStateTaskExecutor.TaskResult taskResult = result.executionResults.values().iterator().next(); assertTrue(taskResult.isSuccess()); - validateRepositoryMetadata(result.resultingState, clusterManagerNode, 2); + validateRepositoryMetadata(result.resultingState, clusterManagerNode, 3); } public void testUpdatesClusterStateWithMultiNodeCluster() throws Exception { @@ -623,7 +632,7 @@ public void testUpdatesClusterStateWithMultiNodeCluster() throws Exception { assertThat(result.executionResults.entrySet(), hasSize(1)); final ClusterStateTaskExecutor.TaskResult taskResult = result.executionResults.values().iterator().next(); assertTrue(taskResult.isSuccess()); - validateRepositoryMetadata(result.resultingState, clusterManagerNode, 2); + validateRepositoryMetadata(result.resultingState, clusterManagerNode, 3); } public void testUpdatesClusterStateWithSingleNodeClusterAndSameRepository() throws Exception { @@ -668,7 +677,7 @@ public void testUpdatesClusterStateWithSingleNodeClusterAndSameRepository() thro assertThat(result.executionResults.entrySet(), hasSize(1)); final ClusterStateTaskExecutor.TaskResult taskResult = result.executionResults.values().iterator().next(); assertTrue(taskResult.isSuccess()); - validateRepositoryMetadata(result.resultingState, clusterManagerNode, 1); + validateRepositoryMetadata(result.resultingState, clusterManagerNode, 2); } public void testUpdatesClusterStateWithMultiNodeClusterAndSameRepository() throws Exception { @@ -729,7 +738,7 @@ public void testUpdatesClusterStateWithMultiNodeClusterAndSameRepository() throw assertThat(result.executionResults.entrySet(), hasSize(1)); final ClusterStateTaskExecutor.TaskResult taskResult = result.executionResults.values().iterator().next(); assertTrue(taskResult.isSuccess()); - validateRepositoryMetadata(result.resultingState, clusterManagerNode, 1); + validateRepositoryMetadata(result.resultingState, clusterManagerNode, 2); } private void validateRepositoryMetadata(ClusterState updatedState, DiscoveryNode existingNode, int expectedRepositories) @@ -737,14 +746,17 @@ private void validateRepositoryMetadata(ClusterState updatedState, DiscoveryNode final RepositoriesMetadata repositoriesMetadata = updatedState.metadata().custom(RepositoriesMetadata.TYPE); assertTrue(repositoriesMetadata.repositories().size() == expectedRepositories); - if (repositoriesMetadata.repositories().size() == 2) { + if (repositoriesMetadata.repositories().size() == 2 || repositoriesMetadata.repositories().size() == 3) { final RepositoryMetadata segmentRepositoryMetadata = buildRepositoryMetadata(existingNode, SEGMENT_REPO); final RepositoryMetadata translogRepositoryMetadata = buildRepositoryMetadata(existingNode, TRANSLOG_REPO); for (RepositoryMetadata repositoryMetadata : repositoriesMetadata.repositories()) { if (repositoryMetadata.name().equals(segmentRepositoryMetadata.name())) { assertTrue(segmentRepositoryMetadata.equalsIgnoreGenerations(repositoryMetadata)); - } else if (repositoryMetadata.name().equals(segmentRepositoryMetadata.name())) { + } else if (repositoryMetadata.name().equals(translogRepositoryMetadata.name())) { assertTrue(translogRepositoryMetadata.equalsIgnoreGenerations(repositoryMetadata)); + } else if (repositoriesMetadata.repositories().size() == 3) { + final RepositoryMetadata clusterStateRepoMetadata = buildRepositoryMetadata(existingNode, CLUSTER_STATE_REPO); + assertTrue(clusterStateRepoMetadata.equalsIgnoreGenerations(repositoryMetadata)); } } } else if (repositoriesMetadata.repositories().size() == 1) { @@ -768,9 +780,14 @@ private DiscoveryNode newDiscoveryNode(Map attributes) { private static final String SEGMENT_REPO = "segment-repo"; private static final String TRANSLOG_REPO = "translog-repo"; + private static final String CLUSTER_STATE_REPO = "cluster-state-repo"; private static final String COMMON_REPO = "remote-repo"; private Map remoteStoreNodeAttributes(String segmentRepoName, String translogRepoName) { + return remoteStoreNodeAttributes(segmentRepoName, translogRepoName, CLUSTER_STATE_REPO); + } + + private Map remoteStoreNodeAttributes(String segmentRepoName, String translogRepoName, String clusterStateRepo) { String segmentRepositoryTypeAttributeKey = String.format( Locale.getDefault(), REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, @@ -791,6 +808,16 @@ private Map remoteStoreNodeAttributes(String segmentRepoName, St REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, translogRepoName ); + String clusterStateRepositoryTypeAttributeKey = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + clusterStateRepo + ); + String clusterStateRepositorySettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + clusterStateRepo + ); return new HashMap<>() { { @@ -802,6 +829,10 @@ private Map remoteStoreNodeAttributes(String segmentRepoName, St putIfAbsent(translogRepositoryTypeAttributeKey, "s3"); putIfAbsent(translogRepositorySettingsAttributeKeyPrefix + "bucket", "translog_bucket"); putIfAbsent(translogRepositorySettingsAttributeKeyPrefix + "base_path", "/translog/path"); + put(REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, clusterStateRepo); + putIfAbsent(clusterStateRepositoryTypeAttributeKey, "s3"); + putIfAbsent(clusterStateRepositorySettingsAttributeKeyPrefix + "bucket", "state_bucket"); + putIfAbsent(clusterStateRepositorySettingsAttributeKeyPrefix + "base_path", "/state/path"); } }; } @@ -812,16 +843,14 @@ private void validateAttributes(Map remoteStoreNodeAttributes, C IllegalStateException.class, () -> JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()) ); - assertTrue( - e.getMessage() - .equals( - "a remote store node [" - + joiningNode - + "] is trying to join a remote store cluster with incompatible node attributes in " - + "comparison with existing node [" - + existingNode - + "]" - ) + assertEquals( + e.getMessage(), + "a remote store node [" + + joiningNode + + "] is trying to join a remote store cluster with incompatible node attributes in " + + "comparison with existing node [" + + existingNode + + "]" ); } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index 86e154c547e07..8adaae6d230cd 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -61,7 +61,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -79,7 +78,6 @@ import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.snapshots.EmptySnapshotsInfoService; import org.opensearch.test.ClusterServiceUtils; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.test.gateway.TestGatewayAllocator; @@ -1221,7 +1219,6 @@ public void testRemoteStoreNoUserOverrideExceptReplicationTypeSegmentIndexSettin .put(segmentRepositoryNameAttributeKey, "my-segment-repo-1") .put(translogRepositoryNameAttributeKey, "my-translog-repo-1") .build(); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); final Settings.Builder requestSettings = Settings.builder(); @@ -1253,7 +1250,6 @@ public void testRemoteStoreImplicitOverrideReplicationTypeToSegmentForRemoteStor .put(segmentRepositoryNameAttributeKey, "my-segment-repo-1") .put(translogRepositoryNameAttributeKey, "my-translog-repo-1") .build(); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); final Settings.Builder requestSettings = Settings.builder(); @@ -1285,7 +1281,6 @@ public void testRemoteStoreNoUserOverrideIndexSettings() { .put(segmentRepositoryNameAttributeKey, "my-segment-repo-1") .put(translogRepositoryNameAttributeKey, "my-translog-repo-1") .build(); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); Settings indexSettings = aggregateIndexSettings( @@ -1337,7 +1332,7 @@ public void testRemoteStoreDisabledByUserIndexSettings() { assertThat(validationErrors.size(), is(1)); assertThat( validationErrors.get(0), - is(String.format(Locale.ROOT, "expected [%s] to be private but it was not", SETTING_REMOTE_STORE_ENABLED)) + is(String.format(Locale.ROOT, "private index setting [%s] can not be set explicitly", SETTING_REMOTE_STORE_ENABLED)) ); })); } @@ -1371,7 +1366,13 @@ public void testRemoteStoreOverrideSegmentRepoIndexSettings() { assertThat(validationErrors.size(), is(1)); assertThat( validationErrors.get(0), - is(String.format(Locale.ROOT, "expected [%s] to be private but it was not", SETTING_REMOTE_SEGMENT_STORE_REPOSITORY)) + is( + String.format( + Locale.ROOT, + "private index setting [%s] can not be set explicitly", + SETTING_REMOTE_SEGMENT_STORE_REPOSITORY + ) + ) ); })); } @@ -1404,7 +1405,13 @@ public void testRemoteStoreOverrideTranslogRepoIndexSettings() { assertThat(validationErrors.size(), is(1)); assertThat( validationErrors.get(0), - is(String.format(Locale.ROOT, "expected [%s] to be private but it was not", SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY)) + is( + String.format( + Locale.ROOT, + "private index setting [%s] can not be set explicitly", + SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY + ) + ) ); })); } diff --git a/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java index 936e2170bed6c..ed08a436d3ca0 100644 --- a/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java +++ b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java @@ -70,6 +70,7 @@ public void testReadContextListener() throws InterruptedException, IOException { assertEquals(NUMBER_OF_PARTS * PART_SIZE, Files.size(fileLocation)); } + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9776") public void testReadContextListenerFailure() throws Exception { Path fileLocation = path.resolve(UUID.randomUUID().toString()); List blobPartStreams = initializeBlobPartStreams(); diff --git a/server/src/test/java/org/opensearch/crypto/CryptoManagerRegistryTests.java b/server/src/test/java/org/opensearch/crypto/CryptoHandlerRegistryTests.java similarity index 72% rename from server/src/test/java/org/opensearch/crypto/CryptoManagerRegistryTests.java rename to server/src/test/java/org/opensearch/crypto/CryptoHandlerRegistryTests.java index f6c8f71bd653c..93a7b3d3eb4b9 100644 --- a/server/src/test/java/org/opensearch/crypto/CryptoManagerRegistryTests.java +++ b/server/src/test/java/org/opensearch/crypto/CryptoHandlerRegistryTests.java @@ -9,10 +9,11 @@ package org.opensearch.crypto; import org.opensearch.cluster.metadata.CryptoMetadata; +import org.opensearch.common.crypto.CryptoHandler; import org.opensearch.common.crypto.MasterKeyProvider; import org.opensearch.common.settings.Settings; -import org.opensearch.encryption.CryptoManager; import org.opensearch.plugins.CryptoKeyProviderPlugin; +import org.opensearch.plugins.CryptoPlugin; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; @@ -24,30 +25,30 @@ import org.mockito.ArgumentMatchers; import org.mockito.Mockito; -public class CryptoManagerRegistryTests extends OpenSearchTestCase { +public class CryptoHandlerRegistryTests extends OpenSearchTestCase { - private TestCryptoManagerRegistry cryptoManagerRegistry; + private TestCryptoHandlerRegistry cryptoManagerRegistry; private String pluginTypeWithCreationFailure; private CryptoKeyProviderPlugin cryptoPlugin1; private CryptoKeyProviderPlugin cryptoPlugin2; @Before public void setup() { - List cryptoPlugins = new ArrayList<>(); + List cryptoKPPlugins = new ArrayList<>(); CryptoKeyProviderPlugin cryptoPlugin1 = Mockito.mock(CryptoKeyProviderPlugin.class); String pluginType1 = UUID.randomUUID().toString(); Mockito.when(cryptoPlugin1.type()).thenReturn(pluginType1); MasterKeyProvider masterKeyProvider1 = Mockito.mock(MasterKeyProvider.class); Mockito.when(cryptoPlugin1.createKeyProvider(ArgumentMatchers.any())).thenReturn(masterKeyProvider1); this.cryptoPlugin1 = cryptoPlugin1; - cryptoPlugins.add(cryptoPlugin1); + cryptoKPPlugins.add(cryptoPlugin1); CryptoKeyProviderPlugin cryptoPlugin2 = Mockito.mock(CryptoKeyProviderPlugin.class); String pluginType2 = UUID.randomUUID().toString(); Mockito.when(cryptoPlugin2.type()).thenReturn(pluginType2); MasterKeyProvider masterKeyProvider2 = Mockito.mock(MasterKeyProvider.class); Mockito.when(cryptoPlugin2.createKeyProvider(ArgumentMatchers.any())).thenReturn(masterKeyProvider2); - cryptoPlugins.add(cryptoPlugin2); + cryptoKPPlugins.add(cryptoPlugin2); this.cryptoPlugin2 = cryptoPlugin2; CryptoKeyProviderPlugin cryptoPluginCreationFailure = Mockito.mock(CryptoKeyProviderPlugin.class); @@ -55,20 +56,33 @@ public void setup() { Mockito.when(cryptoPluginCreationFailure.type()).thenReturn(pluginTypeWithCreationFailure); Mockito.when(cryptoPluginCreationFailure.createKeyProvider(ArgumentMatchers.any())) .thenThrow(new RuntimeException("Injected failure")); - cryptoPlugins.add(cryptoPluginCreationFailure); + cryptoKPPlugins.add(cryptoPluginCreationFailure); + + cryptoManagerRegistry = new TestCryptoHandlerRegistry(new TestCryptoPlugin(), cryptoKPPlugins, Settings.EMPTY); + } + + static class TestCryptoPlugin implements CryptoPlugin { - cryptoManagerRegistry = new TestCryptoManagerRegistry(cryptoPlugins, Settings.EMPTY); + @Override + public CryptoHandler getOrCreateCryptoHandler( + MasterKeyProvider keyProvider, + String keyProviderName, + String keyProviderType, + Runnable onClose + ) { + return Mockito.mock(CryptoHandler.class); + } } - static class TestCryptoManagerRegistry extends CryptoManagerRegistry { + static class TestCryptoHandlerRegistry extends CryptoHandlerRegistry { - protected TestCryptoManagerRegistry(List cryptoPlugins, Settings settings) { - super(cryptoPlugins, settings); + protected TestCryptoHandlerRegistry(CryptoPlugin cryptoPlugin, List cryptoPlugins, Settings settings) { + super(List.of(cryptoPlugin), cryptoPlugins, settings); } @Override - public Map loadCryptoFactories(List cryptoPlugins) { - return super.loadCryptoFactories(cryptoPlugins); + public Map loadCryptoFactories(List cryptoKPPlugins) { + return super.loadCryptoFactories(cryptoKPPlugins); } } @@ -115,44 +129,40 @@ public void testCryptoManagerMissing() { String pluginName = UUID.randomUUID().toString(); String pluginType = UUID.randomUUID().toString(); CryptoMetadata cryptoMetadata = new CryptoMetadata(pluginName, pluginType, Settings.EMPTY); - expectThrows(CryptoRegistryException.class, () -> cryptoManagerRegistry.fetchCryptoManager(cryptoMetadata)); + expectThrows(CryptoRegistryException.class, () -> cryptoManagerRegistry.fetchCryptoHandler(cryptoMetadata)); } public void testCryptoManagerCreationFailure() { String pluginName = UUID.randomUUID().toString(); CryptoMetadata cryptoMetadata = new CryptoMetadata(pluginName, pluginTypeWithCreationFailure, Settings.EMPTY); - expectThrows(CryptoRegistryException.class, () -> cryptoManagerRegistry.fetchCryptoManager(cryptoMetadata)); + expectThrows(CryptoRegistryException.class, () -> cryptoManagerRegistry.fetchCryptoHandler(cryptoMetadata)); } public void testCryptoManagerCreationSuccess() { String pluginName1 = UUID.randomUUID().toString(); CryptoMetadata cryptoMetadata = new CryptoMetadata(pluginName1, cryptoPlugin1.type(), Settings.EMPTY); - CryptoManager createdCryptoManager1 = cryptoManagerRegistry.fetchCryptoManager(cryptoMetadata); - assertNotNull(createdCryptoManager1); - assertEquals(cryptoPlugin1.type(), createdCryptoManager1.type()); - assertEquals(cryptoMetadata.keyProviderName(), createdCryptoManager1.name()); - assertEquals(cryptoMetadata.keyProviderType(), createdCryptoManager1.type()); + CryptoHandler cryptoHandler = cryptoManagerRegistry.fetchCryptoHandler(cryptoMetadata); + assertNotNull(cryptoHandler); String pluginName2 = UUID.randomUUID().toString(); - CryptoManager createdCryptoManager2 = cryptoManagerRegistry.fetchCryptoManager( + CryptoHandler cryptoHandler2 = cryptoManagerRegistry.fetchCryptoHandler( new CryptoMetadata(pluginName2, cryptoPlugin2.type(), Settings.EMPTY) ); - assertNotNull(createdCryptoManager2); - assertEquals(pluginName2, createdCryptoManager2.name()); - assertEquals(cryptoPlugin2.type(), createdCryptoManager2.type()); - CryptoManager createdCryptoManager3 = cryptoManagerRegistry.fetchCryptoManager( + assertNotNull(cryptoHandler2); + CryptoHandler cryptoHandler3 = cryptoManagerRegistry.fetchCryptoHandler( new CryptoMetadata(pluginName1, cryptoPlugin1.type(), Settings.EMPTY) ); - assertNotNull(createdCryptoManager3); - assertEquals(createdCryptoManager1, createdCryptoManager3); + assertNotNull(cryptoHandler3); + assertEquals(cryptoHandler, cryptoHandler3); + assertNotEquals(cryptoHandler2, cryptoHandler); - CryptoManager createdCryptoMgrNewType = cryptoManagerRegistry.fetchCryptoManager( + CryptoHandler cryptoHandlerNewType = cryptoManagerRegistry.fetchCryptoHandler( new CryptoMetadata(pluginName1, cryptoPlugin2.type(), Settings.EMPTY) ); - assertNotNull(createdCryptoMgrNewType); - assertNotEquals(createdCryptoManager1, createdCryptoMgrNewType); - assertNotEquals(createdCryptoManager2, createdCryptoMgrNewType); - assertNotEquals(createdCryptoManager3, createdCryptoMgrNewType); + assertNotNull(cryptoHandlerNewType); + assertNotEquals(cryptoHandler, cryptoHandlerNewType); + assertNotEquals(cryptoHandler2, cryptoHandlerNewType); + assertNotEquals(cryptoHandler3, cryptoHandlerNewType); } } diff --git a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java index c293768151fa4..6a2f4cd0ab300 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java @@ -42,6 +42,7 @@ import org.opensearch.cluster.coordination.CoordinationMetadata; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfigExclusion; import org.opensearch.cluster.coordination.CoordinationState; +import org.opensearch.cluster.coordination.CoordinationState.PersistedState; import org.opensearch.cluster.coordination.PersistedStateRegistry; import org.opensearch.cluster.coordination.PersistedStateRegistry.PersistedStateType; import org.opensearch.cluster.metadata.IndexMetadata; @@ -51,6 +52,7 @@ import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.BigArrays; @@ -63,10 +65,14 @@ import org.opensearch.env.NodeEnvironment; import org.opensearch.env.TestEnvironment; import org.opensearch.gateway.GatewayMetaState.RemotePersistedState; +import org.opensearch.gateway.PersistedClusterStateService.Writer; import org.opensearch.gateway.remote.ClusterMetadataManifest; import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.index.recovery.RemoteStoreRestoreService; +import org.opensearch.index.recovery.RemoteStoreRestoreService.RemoteRestoreResult; import org.opensearch.node.Node; import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.fs.FsRepository; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -79,19 +85,29 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Locale; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; import org.mockito.Mockito; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_INDEX_UUID; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled; import static org.opensearch.test.NodeRoles.nonClusterManagerNode; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; import static org.mockito.Mockito.when; public class GatewayMetaStatePersistedStateTests extends OpenSearchTestCase { @@ -445,7 +461,7 @@ public void testDataOnlyNodePersistence() throws Exception { () -> 0L ); Supplier remoteClusterStateServiceSupplier = () -> { - if (RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.get(settings) == true) { + if (isRemoteStoreClusterStateEnabled(settings)) { return new RemoteClusterStateService( nodeEnvironment.nodeId(), () -> new RepositoriesService( @@ -473,7 +489,8 @@ public void testDataOnlyNodePersistence() throws Exception { null, persistedClusterStateService, remoteClusterStateServiceSupplier.get(), - new PersistedStateRegistry() + new PersistedStateRegistry(), + null ); final CoordinationState.PersistedState persistedState = gateway.getPersistedState(); assertThat(persistedState, instanceOf(GatewayMetaState.AsyncLucenePersistedState.class)); @@ -706,7 +723,7 @@ public void testRemotePersistedState() throws IOException { ); remotePersistedState.setLastAcceptedState(clusterState); - Mockito.verify(remoteClusterStateService, times(0)).writeFullMetadata(Mockito.any()); + Mockito.verify(remoteClusterStateService).writeFullMetadata(clusterState); assertThat(remotePersistedState.getLastAcceptedState(), equalTo(clusterState)); assertThat(remotePersistedState.getCurrentTerm(), equalTo(clusterTerm)); @@ -717,7 +734,7 @@ public void testRemotePersistedState() throws IOException { ); remotePersistedState.setLastAcceptedState(secondClusterState); - Mockito.verify(remoteClusterStateService, times(1)).writeFullMetadata(Mockito.any()); + Mockito.verify(remoteClusterStateService, times(1)).writeFullMetadata(secondClusterState); assertThat(remotePersistedState.getLastAcceptedState(), equalTo(secondClusterState)); assertThat(remotePersistedState.getCurrentTerm(), equalTo(clusterTerm)); @@ -742,26 +759,40 @@ public void testRemotePersistedStateExceptionOnFullStateUpload() throws IOExcept Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()).build() ); - remotePersistedState.setLastAcceptedState(clusterState); - - final ClusterState secondClusterState = createClusterState( - randomNonNegativeLong(), - Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()).build() - ); - - assertThrows(OpenSearchException.class, () -> remotePersistedState.setLastAcceptedState(secondClusterState)); + assertThrows(OpenSearchException.class, () -> remotePersistedState.setLastAcceptedState(clusterState)); } public void testGatewayForRemoteState() throws IOException { MockGatewayMetaState gateway = null; try { - gateway = new MockGatewayMetaState(localNode, bigArrays); + RemoteClusterStateService remoteClusterStateService = mock(RemoteClusterStateService.class); + when(remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster")).thenReturn("test-cluster-uuid"); + RemoteStoreRestoreService remoteStoreRestoreService = mock(RemoteStoreRestoreService.class); + when(remoteStoreRestoreService.restore(any(), any(), anyBoolean(), any())).thenReturn( + RemoteRestoreResult.build("test-cluster-uuid", null, ClusterState.EMPTY_STATE) + ); + gateway = new MockGatewayMetaState(localNode, bigArrays, remoteClusterStateService, remoteStoreRestoreService); final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); - final Settings settingWithRemoteStateEnabled = Settings.builder() - .put(settings) + + String stateRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + "randomRepoName" + ); + String stateRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + "randomRepoName" + ); + + Settings settings = Settings.builder() + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, "randomRepoName") + .put(stateRepoTypeAttributeKey, FsRepository.TYPE) + .put(stateRepoSettingsAttributeKeyPrefix + "location", "randomRepoPath") .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) .build(); - gateway.start(settingWithRemoteStateEnabled, nodeEnvironment, xContentRegistry(), persistedStateRegistry); + gateway.start(settings, nodeEnvironment, xContentRegistry(), persistedStateRegistry); + final CoordinationState.PersistedState persistedState = gateway.getPersistedState(); assertThat(persistedState, instanceOf(GatewayMetaState.LucenePersistedState.class)); assertThat( @@ -777,6 +808,173 @@ public void testGatewayForRemoteState() throws IOException { } } + public void testGatewayForRemoteStateForInitialBootstrap() throws IOException { + MockGatewayMetaState gateway = null; + try { + final RemoteClusterStateService remoteClusterStateService = mock(RemoteClusterStateService.class); + when(remoteClusterStateService.getLastKnownUUIDFromRemote(clusterName.value())).thenReturn(ClusterState.UNKNOWN_UUID); + + final RemoteStoreRestoreService remoteStoreRestoreService = mock(RemoteStoreRestoreService.class); + when(remoteStoreRestoreService.restore(any(), any(), anyBoolean(), any())).thenReturn( + RemoteRestoreResult.build("test-cluster-uuid", null, ClusterState.EMPTY_STATE) + ); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + gateway = newGatewayForRemoteState( + remoteClusterStateService, + remoteStoreRestoreService, + persistedStateRegistry, + ClusterState.EMPTY_STATE + ); + final CoordinationState.PersistedState lucenePersistedState = gateway.getPersistedState(); + PersistedState remotePersistedState = persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE); + verify(remoteClusterStateService).getLastKnownUUIDFromRemote(Mockito.any()); // change this + verifyNoInteractions(remoteStoreRestoreService); + assertThat(remotePersistedState.getLastAcceptedState(), nullValue()); + assertThat(lucenePersistedState.getLastAcceptedState().metadata(), equalTo(ClusterState.EMPTY_STATE.metadata())); + } finally { + IOUtils.close(gateway); + } + } + + public void testGatewayForRemoteStateForNodeReplacement() throws IOException { + MockGatewayMetaState gateway = null; + try { + final RemoteClusterStateService remoteClusterStateService = mock(RemoteClusterStateService.class); + when(remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster")).thenReturn("test-cluster-uuid"); + final ClusterState previousState = createClusterState( + randomNonNegativeLong(), + Metadata.builder() + .coordinationMetadata(CoordinationMetadata.builder().term(randomLong()).build()) + .put( + IndexMetadata.builder("test-index1") + .settings(settings(Version.CURRENT).put(SETTING_INDEX_UUID, randomAlphaOfLength(10))) + .numberOfShards(5) + .numberOfReplicas(1) + .build(), + false + ) + .clusterUUID(randomAlphaOfLength(10)) + .build() + ); + when(remoteClusterStateService.getLastKnownUUIDFromRemote(clusterName.value())).thenReturn( + previousState.metadata().clusterUUID() + ); + + final RemoteStoreRestoreService remoteStoreRestoreService = mock(RemoteStoreRestoreService.class); + when(remoteStoreRestoreService.restore(any(), any(), anyBoolean(), any())).thenReturn( + RemoteRestoreResult.build("test-cluster-uuid", null, previousState) + ); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + gateway = newGatewayForRemoteState( + remoteClusterStateService, + remoteStoreRestoreService, + persistedStateRegistry, + ClusterState.EMPTY_STATE + ); + final CoordinationState.PersistedState lucenePersistedState = gateway.getPersistedState(); + PersistedState remotePersistedState = persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE); + verify(remoteClusterStateService).getLastKnownUUIDFromRemote(Mockito.any()); + verify(remoteStoreRestoreService).restore(any(), any(), anyBoolean(), any()); + assertThat(remotePersistedState.getLastAcceptedState(), nullValue()); + assertThat(lucenePersistedState.getLastAcceptedState().metadata(), equalTo(previousState.metadata())); + } finally { + IOUtils.close(gateway); + } + } + + public void testGatewayForRemoteStateForNodeReboot() throws IOException { + MockGatewayMetaState gateway = null; + try { + final RemoteClusterStateService remoteClusterStateService = mock(RemoteClusterStateService.class); + final RemoteStoreRestoreService remoteStoreRestoreService = mock(RemoteStoreRestoreService.class); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + final IndexMetadata indexMetadata = IndexMetadata.builder("test-index1") + .settings(settings(Version.CURRENT).put(SETTING_INDEX_UUID, randomAlphaOfLength(10))) + .numberOfShards(5) + .numberOfReplicas(1) + .build(); + final ClusterState clusterState = createClusterState( + randomNonNegativeLong(), + Metadata.builder() + .coordinationMetadata(CoordinationMetadata.builder().term(randomLong()).build()) + .put(indexMetadata, false) + .clusterUUID(randomAlphaOfLength(10)) + .build() + ); + gateway = newGatewayForRemoteState(remoteClusterStateService, remoteStoreRestoreService, persistedStateRegistry, clusterState); + final CoordinationState.PersistedState lucenePersistedState = gateway.getPersistedState(); + PersistedState remotePersistedState = persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE); + verifyNoInteractions(remoteClusterStateService); + verifyNoInteractions(remoteStoreRestoreService); + assertThat(remotePersistedState.getLastAcceptedState(), nullValue()); + logger.info("lucene state metadata: {}", lucenePersistedState.getLastAcceptedState().toString()); + logger.info("initial metadata: {}", clusterState.toString()); + assertThat(lucenePersistedState.getLastAcceptedState().metadata().indices().size(), equalTo(1)); + assertThat(lucenePersistedState.getLastAcceptedState().metadata().indices().get("test-index1"), equalTo(indexMetadata)); + } finally { + IOUtils.close(gateway); + } + } + + private MockGatewayMetaState newGatewayForRemoteState( + RemoteClusterStateService remoteClusterStateService, + RemoteStoreRestoreService remoteStoreRestoreService, + PersistedStateRegistry persistedStateRegistry, + ClusterState currentState + ) throws IOException { + MockGatewayMetaState gateway = new MockGatewayMetaState(localNode, bigArrays); + String randomRepoName = "randomRepoName"; + String stateRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + randomRepoName + ); + String stateRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + randomRepoName + ); + Settings settingWithRemoteStateEnabled = Settings.builder() + .put(settings) + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, randomRepoName) + .put(stateRepoTypeAttributeKey, FsRepository.TYPE) + .put(stateRepoSettingsAttributeKeyPrefix + "location", "randomRepoPath") + .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .build(); + final TransportService transportService = mock(TransportService.class); + ClusterService clusterService = mock(ClusterService.class); + when(clusterService.getClusterSettings()).thenReturn( + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + final PersistedClusterStateService persistedClusterStateService = new PersistedClusterStateService( + nodeEnvironment, + xContentRegistry(), + getBigArrays(), + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + () -> 0L + ); + if (!ClusterState.EMPTY_STATE.equals(currentState)) { + Writer writer = persistedClusterStateService.createWriter(); + writer.writeFullStateAndCommit(currentState.term(), currentState); + writer.close(); + } + final MetaStateService metaStateService = mock(MetaStateService.class); + when(metaStateService.loadFullState()).thenReturn(new Tuple<>(Manifest.empty(), ClusterState.EMPTY_STATE.metadata())); + gateway.start( + settingWithRemoteStateEnabled, + transportService, + clusterService, + metaStateService, + null, + null, + persistedClusterStateService, + remoteClusterStateService, + persistedStateRegistry, + remoteStoreRestoreService + ); + return gateway; + } + private static BigArrays getBigArrays() { return usually() ? BigArrays.NON_RECYCLING_INSTANCE diff --git a/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java b/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java index 449af91c34531..9f8dde5ba9d45 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java @@ -36,7 +36,8 @@ public void testClusterMetadataManifestXContent() throws IOException { Version.CURRENT, "test-node-id", false, - Collections.singletonList(uploadedIndexMetadata) + Collections.singletonList(uploadedIndexMetadata), + "prev-cluster-uuid" ); final XContentBuilder builder = JsonXContent.contentBuilder(); builder.startObject(); @@ -58,7 +59,8 @@ public void testClusterMetadataManifestSerializationEqualsHashCode() { Version.CURRENT, "B10RX1f5RJenMQvYccCgSQ", true, - randomUploadedIndexMetadataList() + randomUploadedIndexMetadataList(), + "yfObdx8KSMKKrXf8UyHhM" ); { // Mutate Cluster Term EqualsHashCodeTestUtils.checkEqualsAndHashCode( @@ -165,6 +167,22 @@ public void testClusterMetadataManifestSerializationEqualsHashCode() { } ); } + { // Mutate Previous cluster UUID + EqualsHashCodeTestUtils.checkEqualsAndHashCode( + initialManifest, + orig -> OpenSearchTestCase.copyWriteable( + orig, + new NamedWriteableRegistry(Collections.emptyList()), + ClusterMetadataManifest::new + ), + manifest -> { + ClusterMetadataManifest.Builder builder = ClusterMetadataManifest.builder(manifest); + builder.previousClusterUUID("vZX62DCQEOzGXlxXCrEu"); + return builder.build(); + } + ); + + } } private List randomUploadedIndexMetadataList() { diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index d4e090b046760..8c6ccea940816 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -15,14 +15,21 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.blobstore.stream.write.WriteContext; +import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.common.blobstore.transfer.RemoteTransferContainer; import org.opensearch.common.compress.DeflateCompressor; +import org.opensearch.common.lucene.store.ByteArrayIndexInput; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -31,6 +38,7 @@ import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.RepositoryMissingException; import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.repositories.fs.FsRepository; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.junit.Assert; @@ -44,15 +52,25 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; +import java.util.Optional; +import java.util.Set; import java.util.function.Supplier; +import org.mockito.ArgumentCaptor; import org.mockito.ArgumentMatchers; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -70,10 +88,25 @@ public void setup() { repositoriesServiceSupplier = mock(Supplier.class); repositoriesService = mock(RepositoriesService.class); when(repositoriesServiceSupplier.get()).thenReturn(repositoriesService); - final Settings settings = Settings.builder() + + String stateRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + "remote_store_repository" + ); + String stateRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + "remote_store_repository" + ); + + Settings settings = Settings.builder() + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, "remote_store_repository") + .put(stateRepoTypeAttributeKey, FsRepository.TYPE) + .put(stateRepoSettingsAttributeKeyPrefix + "location", "randomRepoPath") .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) - .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_REPOSITORY_SETTING.getKey(), "remote_store_repository") .build(); + blobStoreRepository = mock(BlobStoreRepository.class); blobStore = mock(BlobStore.class); when(blobStoreRepository.blobStore()).thenReturn(blobStore); @@ -108,22 +141,21 @@ public void testFailInitializationWhenRemoteStateDisabled() throws IOException { ); } - public void testFailWriteFullMetadataWhenRepositoryNotSet() { - final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + public void testFailInitializeWhenRepositoryNotSet() { doThrow(new RepositoryMissingException("repository missing")).when(repositoriesService).repository("remote_store_repository"); - assertThrows(RepositoryMissingException.class, () -> remoteClusterStateService.writeFullMetadata(clusterState)); + assertThrows(RepositoryMissingException.class, () -> remoteClusterStateService.start()); } public void testFailWriteFullMetadataWhenNotBlobRepository() { final FilterRepository filterRepository = mock(FilterRepository.class); when(repositoriesService.repository("remote_store_repository")).thenReturn(filterRepository); - final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); - assertThrows(AssertionError.class, () -> remoteClusterStateService.writeFullMetadata(clusterState)); + assertThrows(AssertionError.class, () -> remoteClusterStateService.start()); } public void testWriteFullMetadataSuccess() throws IOException { final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); mockBlobStoreObjects(); + remoteClusterStateService.start(); final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState); final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); List indices = List.of(uploadedIndexMetadata); @@ -134,6 +166,7 @@ public void testWriteFullMetadataSuccess() throws IOException { .stateVersion(1L) .stateUUID("state-uuid") .clusterUUID("cluster-uuid") + .previousClusterUUID("prev-cluster-uuid") .build(); assertThat(manifest.getIndices().size(), is(1)); @@ -146,8 +179,83 @@ public void testWriteFullMetadataSuccess() throws IOException { assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); } + public void testWriteFullMetadataInParallelSuccess() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + AsyncMultiStreamBlobContainer container = (AsyncMultiStreamBlobContainer) mockBlobStoreObjects(AsyncMultiStreamBlobContainer.class); + + ArgumentCaptor> actionListenerArgumentCaptor = ArgumentCaptor.forClass(ActionListener.class); + ArgumentCaptor writeContextArgumentCaptor = ArgumentCaptor.forClass(WriteContext.class); + + doAnswer((i) -> { + actionListenerArgumentCaptor.getValue().onResponse(null); + return null; + }).when(container).asyncBlobUpload(writeContextArgumentCaptor.capture(), actionListenerArgumentCaptor.capture()); + + remoteClusterStateService.start(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState); + + final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); + List indices = List.of(uploadedIndexMetadata); + + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(indices) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .build(); + + assertThat(manifest.getIndices().size(), is(1)); + assertThat(manifest.getIndices().get(0).getIndexName(), is(uploadedIndexMetadata.getIndexName())); + assertThat(manifest.getIndices().get(0).getIndexUUID(), is(uploadedIndexMetadata.getIndexUUID())); + assertThat(manifest.getIndices().get(0).getUploadedFilename(), notNullValue()); + assertThat(manifest.getClusterTerm(), is(expectedManifest.getClusterTerm())); + assertThat(manifest.getStateVersion(), is(expectedManifest.getStateVersion())); + assertThat(manifest.getClusterUUID(), is(expectedManifest.getClusterUUID())); + assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); + + assertEquals(actionListenerArgumentCaptor.getAllValues().size(), 1); + assertEquals(writeContextArgumentCaptor.getAllValues().size(), 1); + + WriteContext capturedWriteContext = writeContextArgumentCaptor.getValue(); + byte[] writtenBytes = capturedWriteContext.getStreamProvider(Integer.MAX_VALUE).provideStream(0).getInputStream().readAllBytes(); + IndexMetadata writtenIndexMetadata = RemoteClusterStateService.INDEX_METADATA_FORMAT.deserialize( + capturedWriteContext.getFileName(), + blobStoreRepository.getNamedXContentRegistry(), + new BytesArray(writtenBytes) + ); + + assertEquals(capturedWriteContext.getWritePriority(), WritePriority.HIGH); + assertEquals(writtenIndexMetadata.getNumberOfShards(), 1); + assertEquals(writtenIndexMetadata.getNumberOfReplicas(), 0); + assertEquals(writtenIndexMetadata.getIndex().getName(), "test-index"); + assertEquals(writtenIndexMetadata.getIndex().getUUID(), "index-uuid"); + long expectedChecksum = RemoteTransferContainer.checksumOfChecksum(new ByteArrayIndexInput("metadata-filename", writtenBytes), 8); + assertEquals(capturedWriteContext.getExpectedChecksum().longValue(), expectedChecksum); + + } + + public void testWriteFullMetadataInParallelFailure() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + AsyncMultiStreamBlobContainer container = (AsyncMultiStreamBlobContainer) mockBlobStoreObjects(AsyncMultiStreamBlobContainer.class); + + ArgumentCaptor> actionListenerArgumentCaptor = ArgumentCaptor.forClass(ActionListener.class); + + doAnswer((i) -> { + actionListenerArgumentCaptor.getValue().onFailure(new RuntimeException("Cannot upload to remote")); + return null; + }).when(container).asyncBlobUpload(any(WriteContext.class), actionListenerArgumentCaptor.capture()); + + remoteClusterStateService.start(); + assertThrows( + RemoteClusterStateService.IndexMetadataTransferException.class, + () -> remoteClusterStateService.writeFullMetadata(clusterState) + ); + } + public void testFailWriteIncrementalMetadataNonClusterManagerNode() throws IOException { final ClusterState clusterState = generateClusterStateWithOneIndex().build(); + remoteClusterStateService.start(); final ClusterMetadataManifest manifest = remoteClusterStateService.writeIncrementalMetadata(clusterState, clusterState, null); Assert.assertThat(manifest, nullValue()); } @@ -174,7 +282,7 @@ public void testWriteIncrementalMetadataSuccess() throws IOException { final ClusterMetadataManifest previousManifest = ClusterMetadataManifest.builder().indices(Collections.emptyList()).build(); - remoteClusterStateService.ensureRepositorySet(); + remoteClusterStateService.start(); final ClusterMetadataManifest manifest = remoteClusterStateService.writeIncrementalMetadata( previousClusterState, clusterState, @@ -189,6 +297,7 @@ public void testWriteIncrementalMetadataSuccess() throws IOException { .stateVersion(1L) .stateUUID("state-uuid") .clusterUUID("cluster-uuid") + .previousClusterUUID("prev-cluster-uuid") .build(); assertThat(manifest.getIndices().size(), is(1)); @@ -213,7 +322,7 @@ public void testReadLatestMetadataManifestFailedIOException() throws IOException ) ).thenThrow(IOException.class); - remoteClusterStateService.ensureRepositorySet(); + remoteClusterStateService.start(); Exception e = assertThrows( IllegalStateException.class, () -> remoteClusterStateService.getLatestClusterMetadataManifest( @@ -236,15 +345,12 @@ public void testReadLatestMetadataManifestFailedNoManifestFileInRemote() throws ) ).thenReturn(List.of()); - remoteClusterStateService.ensureRepositorySet(); - Exception e = assertThrows( - IllegalStateException.class, - () -> remoteClusterStateService.getLatestClusterMetadataManifest( - clusterState.getClusterName().value(), - clusterState.metadata().clusterUUID() - ) + remoteClusterStateService.start(); + Optional manifest = remoteClusterStateService.getLatestClusterMetadataManifest( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() ); - assertEquals(e.getMessage(), "Remote Cluster State not found - " + clusterState.metadata().clusterUUID()); + assertEquals(manifest, Optional.empty()); } public void testReadLatestMetadataManifestFailedManifestFileRemoveAfterFetchInRemote() throws IOException { @@ -261,7 +367,7 @@ public void testReadLatestMetadataManifestFailedManifestFileRemoveAfterFetchInRe ).thenReturn(Arrays.asList(blobMetadata)); when(blobContainer.readBlob("manifestFileName")).thenThrow(FileNotFoundException.class); - remoteClusterStateService.ensureRepositorySet(); + remoteClusterStateService.start(); Exception e = assertThrows( IllegalStateException.class, () -> remoteClusterStateService.getLatestClusterMetadataManifest( @@ -282,12 +388,13 @@ public void testReadLatestMetadataManifestSuccessButNoIndexMetadata() throws IOE .clusterUUID("cluster-uuid") .nodeId("nodeA") .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID("prev-cluster-uuid") .build(); BlobContainer blobContainer = mockBlobStoreObjects(); mockBlobContainer(blobContainer, expectedManifest, Map.of()); - remoteClusterStateService.ensureRepositorySet(); + remoteClusterStateService.start(); assertEquals( remoteClusterStateService.getLatestIndexMetadata(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID()) .size(), @@ -307,13 +414,14 @@ public void testReadLatestMetadataManifestSuccessButIndexMetadataFetchIOExceptio .clusterUUID("cluster-uuid") .nodeId("nodeA") .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID("prev-cluster-uuid") .build(); BlobContainer blobContainer = mockBlobStoreObjects(); mockBlobContainer(blobContainer, expectedManifest, Map.of()); when(blobContainer.readBlob(uploadedIndexMetadata.getUploadedFilename() + ".dat")).thenThrow(FileNotFoundException.class); - remoteClusterStateService.ensureRepositorySet(); + remoteClusterStateService.start(); Exception e = assertThrows( IllegalStateException.class, () -> remoteClusterStateService.getLatestIndexMetadata( @@ -337,14 +445,15 @@ public void testReadLatestMetadataManifestSuccess() throws IOException { .clusterUUID("cluster-uuid") .nodeId("nodeA") .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID("prev-cluster-uuid") .build(); mockBlobContainer(mockBlobStoreObjects(), expectedManifest, new HashMap<>()); - remoteClusterStateService.ensureRepositorySet(); + remoteClusterStateService.start(); final ClusterMetadataManifest manifest = remoteClusterStateService.getLatestClusterMetadataManifest( clusterState.getClusterName().value(), clusterState.metadata().clusterUUID() - ); + ).get(); assertThat(manifest.getIndices().size(), is(1)); assertThat(manifest.getIndices().get(0).getIndexName(), is(uploadedIndexMetadata.getIndexName())); @@ -358,7 +467,7 @@ public void testReadLatestMetadataManifestSuccess() throws IOException { public void testReadLatestIndexMetadataSuccess() throws IOException { final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); - remoteClusterStateService.ensureRepositorySet(); + remoteClusterStateService.start(); final Index index = new Index("test-index", "index-uuid"); String fileName = "metadata-" + index.getUUID(); @@ -381,6 +490,7 @@ public void testReadLatestIndexMetadataSuccess() throws IOException { .clusterUUID("cluster-uuid") .nodeId("nodeA") .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID("prev-cluster-uuid") .build(); mockBlobContainer(mockBlobStoreObjects(), expectedManifest, Map.of(index.getUUID(), indexMetadata)); @@ -399,7 +509,7 @@ public void testReadLatestIndexMetadataSuccess() throws IOException { public void testMarkLastStateAsCommittedSuccess() throws IOException { final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); mockBlobStoreObjects(); - remoteClusterStateService.ensureRepositorySet(); + remoteClusterStateService.start(); final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); List indices = List.of(uploadedIndexMetadata); final ClusterMetadataManifest previousManifest = ClusterMetadataManifest.builder().indices(indices).build(); @@ -413,6 +523,7 @@ public void testMarkLastStateAsCommittedSuccess() throws IOException { .stateUUID("state-uuid") .clusterUUID("cluster-uuid") .nodeId("nodeA") + .previousClusterUUID("prev-cluster-uuid") .build(); assertThat(manifest.getIndices().size(), is(1)); @@ -425,18 +536,122 @@ public void testMarkLastStateAsCommittedSuccess() throws IOException { assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); } + public void testGetValidPreviousClusterUUID() throws IOException { + Map clusterUUIDsPointers = Map.of( + "cluster-uuid1", + ClusterState.UNKNOWN_UUID, + "cluster-uuid2", + "cluster-uuid1", + "cluster-uuid3", + "cluster-uuid2" + ); + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers); + + remoteClusterStateService.start(); + String previousClusterUUID = remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster"); + assertThat(previousClusterUUID, equalTo("cluster-uuid3")); + } + + public void testGetValidPreviousClusterUUIDForInvalidChain() throws IOException { + Map clusterUUIDsPointers = Map.of( + "cluster-uuid1", + ClusterState.UNKNOWN_UUID, + "cluster-uuid2", + ClusterState.UNKNOWN_UUID, + "cluster-uuid3", + "cluster-uuid2" + ); + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers); + + remoteClusterStateService.start(); + assertThrows(IllegalStateException.class, () -> remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster")); + } + + private void mockObjectsForGettingPreviousClusterUUID(Map clusterUUIDsPointers) throws IOException { + final BlobPath blobPath = mock(BlobPath.class); + when((blobStoreRepository.basePath())).thenReturn(blobPath); + when(blobPath.add(anyString())).thenReturn(blobPath); + when(blobPath.buildAsString()).thenReturn("/blob/path/"); + BlobContainer blobContainer1 = mock(BlobContainer.class); + BlobContainer blobContainer2 = mock(BlobContainer.class); + BlobContainer blobContainer3 = mock(BlobContainer.class); + BlobContainer uuidBlobContainer = mock(BlobContainer.class); + when(blobContainer1.path()).thenReturn(blobPath); + when(blobContainer2.path()).thenReturn(blobPath); + when(blobContainer3.path()).thenReturn(blobPath); + + mockBlobContainerForClusterUUIDs(uuidBlobContainer, clusterUUIDsPointers.keySet()); + final ClusterMetadataManifest clusterManifest1 = generateClusterMetadataManifest( + "cluster-uuid1", + clusterUUIDsPointers.get("cluster-uuid1"), + randomAlphaOfLength(10) + ); + mockBlobContainer(blobContainer1, clusterManifest1, Map.of()); + + final ClusterMetadataManifest clusterManifest2 = generateClusterMetadataManifest( + "cluster-uuid2", + clusterUUIDsPointers.get("cluster-uuid2"), + randomAlphaOfLength(10) + ); + mockBlobContainer(blobContainer2, clusterManifest2, Map.of()); + + final ClusterMetadataManifest clusterManifest3 = generateClusterMetadataManifest( + "cluster-uuid3", + clusterUUIDsPointers.get("cluster-uuid3"), + randomAlphaOfLength(10) + ); + mockBlobContainer(blobContainer3, clusterManifest3, Map.of()); + + when(blobStore.blobContainer(ArgumentMatchers.any())).thenReturn( + uuidBlobContainer, + blobContainer1, + blobContainer1, + blobContainer2, + blobContainer2, + blobContainer3, + blobContainer3 + ); + when(blobStoreRepository.getCompressor()).thenReturn(new DeflateCompressor()); + } + + private ClusterMetadataManifest generateClusterMetadataManifest(String clusterUUID, String previousClusterUUID, String stateUUID) { + return ClusterMetadataManifest.builder() + .indices(List.of()) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID(stateUUID) + .clusterUUID(clusterUUID) + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID(previousClusterUUID) + .committed(true) + .build(); + } + private BlobContainer mockBlobStoreObjects() { + return mockBlobStoreObjects(BlobContainer.class); + } + + private BlobContainer mockBlobStoreObjects(Class blobContainerClazz) { final BlobPath blobPath = mock(BlobPath.class); when((blobStoreRepository.basePath())).thenReturn(blobPath); when(blobPath.add(anyString())).thenReturn(blobPath); when(blobPath.buildAsString()).thenReturn("/blob/path/"); - final BlobContainer blobContainer = mock(BlobContainer.class); + final BlobContainer blobContainer = mock(blobContainerClazz); when(blobContainer.path()).thenReturn(blobPath); - when(blobStore.blobContainer(ArgumentMatchers.any())).thenReturn(blobContainer); + when(blobStore.blobContainer(any())).thenReturn(blobContainer); when(blobStoreRepository.getCompressor()).thenReturn(new DeflateCompressor()); return blobContainer; } + private void mockBlobContainerForClusterUUIDs(BlobContainer blobContainer, Set clusterUUIDs) throws IOException { + Map blobContainerMap = new HashMap<>(); + for (String clusterUUID : clusterUUIDs) { + blobContainerMap.put(clusterUUID, mockBlobStoreObjects()); + } + when(blobContainer.children()).thenReturn(blobContainerMap); + } + private void mockBlobContainer( BlobContainer blobContainer, ClusterMetadataManifest clusterMetadataManifest, diff --git a/server/src/test/java/org/opensearch/index/codec/CodecTests.java b/server/src/test/java/org/opensearch/index/codec/CodecTests.java index 3ca759eaf0781..6f9ff78aa0143 100644 --- a/server/src/test/java/org/opensearch/index/codec/CodecTests.java +++ b/server/src/test/java/org/opensearch/index/codec/CodecTests.java @@ -48,8 +48,6 @@ import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.IndexAnalyzers; -import org.opensearch.index.codec.customcodecs.Lucene95CustomCodec; -import org.opensearch.index.codec.customcodecs.Lucene95CustomStoredFieldsFormat; import org.opensearch.index.engine.EngineConfig; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.similarity.SimilarityService; @@ -95,49 +93,7 @@ public void testZlib() throws Exception { assert codec instanceof PerFieldMappingPostingFormatCodec; } - public void testZstd() throws Exception { - Codec codec = createCodecService(false).codec("zstd"); - assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD, codec); - Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); - assertEquals(Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL, storedFieldsFormat.getCompressionLevel()); - } - - public void testZstdNoDict() throws Exception { - Codec codec = createCodecService(false).codec("zstd_no_dict"); - assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD_NO_DICT, codec); - Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); - assertEquals(Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL, storedFieldsFormat.getCompressionLevel()); - } - - public void testZstdWithCompressionLevel() throws Exception { - int randomCompressionLevel = randomIntBetween(1, 6); - Codec codec = createCodecService(randomCompressionLevel, "zstd").codec("zstd"); - assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD, codec); - Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); - assertEquals(randomCompressionLevel, storedFieldsFormat.getCompressionLevel()); - } - - public void testZstdNoDictWithCompressionLevel() throws Exception { - int randomCompressionLevel = randomIntBetween(1, 6); - Codec codec = createCodecService(randomCompressionLevel, "zstd_no_dict").codec("zstd_no_dict"); - assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD_NO_DICT, codec); - Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); - assertEquals(randomCompressionLevel, storedFieldsFormat.getCompressionLevel()); - } - public void testBestCompressionWithCompressionLevel() { - final Settings zstdSettings = Settings.builder() - .put(INDEX_CODEC_COMPRESSION_LEVEL_SETTING.getKey(), randomIntBetween(1, 6)) - .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), randomFrom(CodecService.ZSTD_CODEC, CodecService.ZSTD_NO_DICT_CODEC)) - .build(); - - // able to validate zstd - final IndexScopedSettings zstdIndexScopedSettings = new IndexScopedSettings( - zstdSettings, - IndexScopedSettings.BUILT_IN_INDEX_SETTINGS - ); - zstdIndexScopedSettings.validate(zstdSettings, true); - final Settings settings = Settings.builder() .put(INDEX_CODEC_COMPRESSION_LEVEL_SETTING.getKey(), randomIntBetween(1, 6)) .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC)) @@ -152,17 +108,6 @@ public void testLuceneCodecsWithCompressionLevel() { String codecName = randomFrom(Codec.availableCodecs()); Codec codec = Codec.forName(codecName); - final Settings customCodecSettings = Settings.builder() - .put(INDEX_CODEC_COMPRESSION_LEVEL_SETTING.getKey(), randomIntBetween(1, 6)) - .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), "Lucene95CustomCodec") - .build(); - - final IndexScopedSettings customCodecIndexScopedSettings = new IndexScopedSettings( - customCodecSettings, - IndexScopedSettings.BUILT_IN_INDEX_SETTINGS - ); - customCodecIndexScopedSettings.validate(customCodecSettings, true); - final Settings settings = Settings.builder() .put(INDEX_CODEC_COMPRESSION_LEVEL_SETTING.getKey(), randomIntBetween(1, 6)) .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), codecName) @@ -178,14 +123,6 @@ public void testLuceneCodecsWithCompressionLevel() { } } - public void testZstandardCompressionLevelSupport() throws Exception { - CodecService codecService = createCodecService(false); - CodecSettings zstdCodec = (CodecSettings) codecService.codec("zstd"); - CodecSettings zstdNoDictCodec = (CodecSettings) codecService.codec("zstd_no_dict"); - assertTrue(zstdCodec.supports(INDEX_CODEC_COMPRESSION_LEVEL_SETTING)); - assertTrue(zstdNoDictCodec.supports(INDEX_CODEC_COMPRESSION_LEVEL_SETTING)); - } - public void testDefaultMapperServiceNull() throws Exception { Codec codec = createCodecService(true).codec("default"); assertStoredFieldsCompressionEquals(Lucene95Codec.Mode.BEST_SPEED, codec); @@ -196,20 +133,6 @@ public void testBestCompressionMapperServiceNull() throws Exception { assertStoredFieldsCompressionEquals(Lucene95Codec.Mode.BEST_COMPRESSION, codec); } - public void testZstdMapperServiceNull() throws Exception { - Codec codec = createCodecService(true).codec("zstd"); - assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD, codec); - Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); - assertEquals(Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL, storedFieldsFormat.getCompressionLevel()); - } - - public void testZstdNoDictMapperServiceNull() throws Exception { - Codec codec = createCodecService(true).codec("zstd_no_dict"); - assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD_NO_DICT, codec); - Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); - assertEquals(Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL, storedFieldsFormat.getCompressionLevel()); - } - public void testExceptionCodecNull() { assertThrows(IllegalArgumentException.class, () -> createCodecService(true).codec(null)); } @@ -226,13 +149,6 @@ private void assertStoredFieldsCompressionEquals(Lucene95Codec.Mode expected, Co assertEquals(expected, Lucene95Codec.Mode.valueOf(v)); } - private void assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode expected, Codec actual) throws Exception { - SegmentReader sr = getSegmentReader(actual); - String v = sr.getSegmentInfo().info.getAttribute(Lucene95CustomStoredFieldsFormat.MODE_KEY); - assertNotNull(v); - assertEquals(expected, Lucene95CustomCodec.Mode.valueOf(v)); - } - private CodecService createCodecService(boolean isMapperServiceNull) throws IOException { Settings nodeSettings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build(); if (isMapperServiceNull) { @@ -241,15 +157,6 @@ private CodecService createCodecService(boolean isMapperServiceNull) throws IOEx return buildCodecService(nodeSettings); } - private CodecService createCodecService(int randomCompressionLevel, String codec) throws IOException { - Settings nodeSettings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .put("index.codec", codec) - .put("index.codec.compression_level", randomCompressionLevel) - .build(); - return buildCodecService(nodeSettings); - } - private CodecService buildCodecService(Settings nodeSettings) throws IOException { IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("_na", nodeSettings); diff --git a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java index 22eb5195af507..7b587e9a83d2d 100644 --- a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java @@ -75,6 +75,28 @@ public void testCreateEngine() throws IOException { } } + public void testCreateEngineWithException() throws IOException { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory()); + try { + // Passing null translogPath to induce failure + final EngineConfig replicaConfig = config( + defaultSettings, + nrtEngineStore, + null, + NoMergePolicy.INSTANCE, + null, + null, + globalCheckpoint::get + ); + new NRTReplicationEngine(replicaConfig); + } catch (Exception e) { + // Ignore as engine creation will fail + } + assertEquals(1, nrtEngineStore.refCount()); + nrtEngineStore.close(); + } + public void testEngineWritesOpsToTranslog() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 140ea0fade4b4..cd272d8f626d0 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -125,6 +125,7 @@ import org.opensearch.index.mapper.VersionFieldMapper; import org.opensearch.index.remote.RemoteSegmentStats; import org.opensearch.index.remote.RemoteSegmentTransferTracker; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.seqno.ReplicationTracker; import org.opensearch.index.seqno.RetentionLease; import org.opensearch.index.seqno.RetentionLeaseSyncer; @@ -137,6 +138,7 @@ import org.opensearch.index.store.StoreUtils; import org.opensearch.index.translog.InternalTranslogFactory; import org.opensearch.index.translog.RemoteBlobStoreInternalTranslogFactory; +import org.opensearch.index.translog.RemoteTranslogStats; import org.opensearch.index.translog.TestTranslog; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogStats; @@ -1821,9 +1823,12 @@ public void testShardStatsWithRemoteStoreEnabled() throws IOException { .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) .build() ); - RemoteSegmentTransferTracker remoteRefreshSegmentTracker = shard.getRemoteStoreStatsTrackerFactory() + RemoteSegmentTransferTracker remoteSegmentTransferTracker = shard.getRemoteStoreStatsTrackerFactory() .getRemoteSegmentTransferTracker(shard.shardId); - populateSampleRemoteStoreStats(remoteRefreshSegmentTracker); + RemoteTranslogTransferTracker remoteTranslogTransferTracker = shard.getRemoteStoreStatsTrackerFactory() + .getRemoteTranslogTransferTracker(shard.shardId); + populateSampleRemoteSegmentStats(remoteSegmentTransferTracker); + populateSampleRemoteTranslogStats(remoteTranslogTransferTracker); ShardStats shardStats = new ShardStats( shard.routingEntry(), shard.shardPath(), @@ -1833,9 +1838,9 @@ public void testShardStatsWithRemoteStoreEnabled() throws IOException { shard.getRetentionLeaseStats() ); RemoteSegmentStats remoteSegmentStats = shardStats.getStats().getSegments().getRemoteSegmentStats(); - assertEquals(remoteRefreshSegmentTracker.getUploadBytesStarted(), remoteSegmentStats.getUploadBytesStarted()); - assertEquals(remoteRefreshSegmentTracker.getUploadBytesSucceeded(), remoteSegmentStats.getUploadBytesSucceeded()); - assertEquals(remoteRefreshSegmentTracker.getUploadBytesFailed(), remoteSegmentStats.getUploadBytesFailed()); + assertRemoteSegmentStats(remoteSegmentTransferTracker, remoteSegmentStats); + RemoteTranslogStats remoteTranslogStats = shardStats.getStats().getTranslog().getRemoteTranslogStats(); + assertRemoteTranslogStats(remoteTranslogTransferTracker, remoteTranslogStats); closeShards(shard); } @@ -4901,10 +4906,42 @@ public void testRecordsForceMerges() throws IOException { closeShards(shard); } - private void populateSampleRemoteStoreStats(RemoteSegmentTransferTracker tracker) { - tracker.addUploadBytesStarted(10L); - tracker.addUploadBytesStarted(10L); + private void populateSampleRemoteSegmentStats(RemoteSegmentTransferTracker tracker) { + tracker.addUploadBytesStarted(30L); tracker.addUploadBytesSucceeded(10L); tracker.addUploadBytesFailed(10L); } + + private void populateSampleRemoteTranslogStats(RemoteTranslogTransferTracker tracker) { + tracker.incrementTotalUploadsStarted(); + tracker.incrementTotalUploadsStarted(); + tracker.incrementTotalUploadsStarted(); + tracker.incrementTotalUploadsSucceeded(); + tracker.incrementTotalUploadsFailed(); + int bytesStarted = randomIntBetween(100, 1000); + tracker.addUploadBytesStarted(bytesStarted); + tracker.addUploadBytesSucceeded(randomIntBetween(1, bytesStarted / 2)); + tracker.addUploadBytesFailed(randomIntBetween(1, bytesStarted / 2)); + } + + private static void assertRemoteTranslogStats( + RemoteTranslogTransferTracker remoteTranslogTransferTracker, + RemoteTranslogStats remoteTranslogStats + ) { + assertEquals(remoteTranslogTransferTracker.getTotalUploadsStarted(), remoteTranslogStats.getTotalUploadsStarted()); + assertEquals(remoteTranslogTransferTracker.getTotalUploadsSucceeded(), remoteTranslogStats.getTotalUploadsSucceeded()); + assertEquals(remoteTranslogTransferTracker.getTotalUploadsFailed(), remoteTranslogStats.getTotalUploadsFailed()); + assertEquals(remoteTranslogTransferTracker.getUploadBytesStarted(), remoteTranslogStats.getUploadBytesStarted()); + assertEquals(remoteTranslogTransferTracker.getUploadBytesSucceeded(), remoteTranslogStats.getUploadBytesSucceeded()); + assertEquals(remoteTranslogTransferTracker.getUploadBytesFailed(), remoteTranslogStats.getUploadBytesFailed()); + } + + private static void assertRemoteSegmentStats( + RemoteSegmentTransferTracker remoteSegmentTransferTracker, + RemoteSegmentStats remoteSegmentStats + ) { + assertEquals(remoteSegmentTransferTracker.getUploadBytesStarted(), remoteSegmentStats.getUploadBytesStarted()); + assertEquals(remoteSegmentTransferTracker.getUploadBytesSucceeded(), remoteSegmentStats.getUploadBytesSucceeded()); + assertEquals(remoteSegmentTransferTracker.getUploadBytesFailed(), remoteSegmentStats.getUploadBytesFailed()); + } } diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java index 0739174fc3afd..6a99063d11353 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java @@ -14,7 +14,6 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.action.ActionListener; import org.opensearch.index.engine.DocIdSeqNoAndSource; import org.opensearch.index.engine.Engine; @@ -34,7 +33,6 @@ import org.opensearch.indices.replication.common.ReplicationType; import org.hamcrest.MatcherAssert; import org.junit.Assert; -import org.junit.Before; import java.io.IOException; import java.nio.file.Path; @@ -65,14 +63,6 @@ public class RemoteIndexShardTests extends SegmentReplicationIndexShardTests { .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, REPOSITORY_NAME) .build(); - @Before - public void setup() { - // Todo: Remove feature flag once remote store integration with segrep goes GA - FeatureFlags.initializeFeatureFlags( - Settings.builder().put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL_SETTING.getKey(), "true").build() - ); - } - protected Settings getIndexSettings() { return settings; } @@ -81,22 +71,18 @@ protected ReplicationGroup getReplicationGroup(int numberOfReplicas) throws IOEx return createGroup(numberOfReplicas, settings, indexMapping, new NRTReplicationEngineFactory(), createTempDir()); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9624") public void testNRTReplicaWithRemoteStorePromotedAsPrimaryRefreshRefresh() throws Exception { testNRTReplicaWithRemoteStorePromotedAsPrimary(false, false); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9624") public void testNRTReplicaWithRemoteStorePromotedAsPrimaryRefreshCommit() throws Exception { testNRTReplicaWithRemoteStorePromotedAsPrimary(false, true); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9624") public void testNRTReplicaWithRemoteStorePromotedAsPrimaryCommitRefresh() throws Exception { testNRTReplicaWithRemoteStorePromotedAsPrimary(true, false); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9624") public void testNRTReplicaWithRemoteStorePromotedAsPrimaryCommitCommit() throws Exception { testNRTReplicaWithRemoteStorePromotedAsPrimary(true, true); } @@ -156,10 +142,6 @@ public void testNRTReplicaWithRemoteStorePromotedAsPrimary(boolean performFlushF assertEquals(InternalEngine.class, nextPrimary.getEngine().getClass()); assertDocCounts(nextPrimary, totalDocs, totalDocs); - // As we are downloading segments from remote segment store on failover, there should not be - // any operations replayed from translog - assertEquals(prevOperationCount, nextPrimary.translogStats().estimatedNumberOfOperations()); - // refresh and push segments to our other replica. nextPrimary.refresh("test"); diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java index 188e371ae223e..c713ccdddd66a 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -261,6 +261,7 @@ public void testReplica() throws IOException { } } + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9773") public void testReplicaPromotion() throws IOException, InterruptedException { setup(false, 3); remoteStoreRefreshListener.afterRefresh(true); diff --git a/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java b/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java index 2a4c572ee2875..4f5cad70fd643 100644 --- a/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java +++ b/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java @@ -13,7 +13,6 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.DocIdSeqNoAndSource; import org.opensearch.index.engine.NRTReplicationEngine; @@ -25,7 +24,6 @@ import org.opensearch.indices.recovery.RecoveryTarget; import org.opensearch.indices.replication.common.ReplicationType; import org.junit.Assert; -import org.junit.Before; import java.io.IOException; import java.nio.file.Path; @@ -43,14 +41,6 @@ public class ReplicaRecoveryWithRemoteTranslogOnPrimaryTests extends OpenSearchI .put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), "100ms") .build(); - @Before - public void setup() { - // Todo: Remove feature flag once remote store integration with segrep goes GA - FeatureFlags.initializeFeatureFlags( - Settings.builder().put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL_SETTING.getKey(), "true").build() - ); - } - public void testStartSequenceForReplicaRecovery() throws Exception { final Path remoteDir = createTempDir(); final String indexMapping = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": {} }"; diff --git a/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java index 836f4e1eda5e7..dbfc66d6de4b3 100644 --- a/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java @@ -519,17 +519,18 @@ public void testStats() throws IOException { builder.startObject(); copy.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); - assertThat( - builder.toString(), - equalTo( - "{\"translog\":{\"operations\":4,\"size_in_bytes\":" - + 326 - + ",\"uncommitted_operations\":4,\"uncommitted_size_in_bytes\":" - + 271 - + ",\"earliest_last_modified_age\":" - + stats.getEarliestLastModifiedAge() - + "}}" - ) + assertEquals( + "{\"translog\":{\"operations\":4,\"size_in_bytes\":" + + 326 + + ",\"uncommitted_operations\":4,\"uncommitted_size_in_bytes\":" + + 271 + + ",\"earliest_last_modified_age\":" + + stats.getEarliestLastModifiedAge() + + ",\"remote_store\":{\"upload\":{" + + "\"total_uploads\":{\"started\":0,\"failed\":0,\"succeeded\":0}," + + "\"total_upload_size\":{\"started_bytes\":0,\"failed_bytes\":0,\"succeeded_bytes\":0}" + + "}}}}", + builder.toString() ); } } diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteTranslogStatsTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteTranslogStatsTests.java new file mode 100644 index 0000000000000..aa390cdba1275 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/translog/RemoteTranslogStatsTests.java @@ -0,0 +1,104 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class RemoteTranslogStatsTests extends OpenSearchTestCase { + RemoteTranslogTransferTracker.Stats transferTrackerStats; + RemoteTranslogStats remoteTranslogStats; + + @Override + public void setUp() throws Exception { + super.setUp(); + transferTrackerStats = getRandomTransferTrackerStats(); + remoteTranslogStats = new RemoteTranslogStats(transferTrackerStats); + } + + public void testRemoteTranslogStatsCreationFromTransferTrackerStats() { + assertEquals(transferTrackerStats.totalUploadsStarted, remoteTranslogStats.getTotalUploadsStarted()); + assertEquals(transferTrackerStats.totalUploadsSucceeded, remoteTranslogStats.getTotalUploadsSucceeded()); + assertEquals(transferTrackerStats.totalUploadsFailed, remoteTranslogStats.getTotalUploadsFailed()); + assertEquals(transferTrackerStats.uploadBytesStarted, remoteTranslogStats.getUploadBytesStarted()); + assertEquals(transferTrackerStats.uploadBytesSucceeded, remoteTranslogStats.getUploadBytesSucceeded()); + assertEquals(transferTrackerStats.uploadBytesFailed, remoteTranslogStats.getUploadBytesFailed()); + } + + public void testRemoteTranslogStatsSerialization() throws IOException { + try (BytesStreamOutput out = new BytesStreamOutput()) { + remoteTranslogStats.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + RemoteTranslogStats remoteTranslogStatsFromStream = new RemoteTranslogStats(in); + assertEquals(remoteTranslogStats, remoteTranslogStatsFromStream); + } + } + } + + public void testAdd() { + RemoteTranslogTransferTracker.Stats otherTransferTrackerStats = getRandomTransferTrackerStats(); + RemoteTranslogStats otherRemoteTranslogStats = new RemoteTranslogStats(otherTransferTrackerStats); + + otherRemoteTranslogStats.add(remoteTranslogStats); + + assertEquals( + otherRemoteTranslogStats.getTotalUploadsStarted(), + otherTransferTrackerStats.totalUploadsStarted + remoteTranslogStats.getTotalUploadsStarted() + ); + assertEquals( + otherRemoteTranslogStats.getTotalUploadsSucceeded(), + otherTransferTrackerStats.totalUploadsSucceeded + remoteTranslogStats.getTotalUploadsSucceeded() + ); + assertEquals( + otherRemoteTranslogStats.getTotalUploadsFailed(), + otherTransferTrackerStats.totalUploadsFailed + remoteTranslogStats.getTotalUploadsFailed() + ); + assertEquals( + otherRemoteTranslogStats.getUploadBytesStarted(), + otherTransferTrackerStats.uploadBytesStarted + remoteTranslogStats.getUploadBytesStarted() + ); + assertEquals( + otherRemoteTranslogStats.getUploadBytesSucceeded(), + otherTransferTrackerStats.uploadBytesSucceeded + remoteTranslogStats.getUploadBytesSucceeded() + ); + assertEquals( + otherRemoteTranslogStats.getUploadBytesFailed(), + otherTransferTrackerStats.uploadBytesFailed + remoteTranslogStats.getUploadBytesFailed() + ); + } + + private static RemoteTranslogTransferTracker.Stats getRandomTransferTrackerStats() { + return new RemoteTranslogTransferTracker.Stats( + new ShardId("test-idx", "test-idx", randomIntBetween(1, 10)), + 0L, + randomLongBetween(100, 500), + randomLongBetween(50, 100), + randomLongBetween(100, 200), + randomLongBetween(10000, 50000), + randomLongBetween(5000, 10000), + randomLongBetween(10000, 20000), + 0L, + 0D, + 0D, + 0D, + 0L, + 0L, + 0L, + 0L, + 0D, + 0D, + 0D + ); + } +} diff --git a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java index d22ad06245687..742dbdeba8c5b 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java @@ -620,8 +620,8 @@ public void testConflictingEngineFactories() { assertThat(e, hasToString(new RegexMatcher(pattern))); } - public void testClusterRemoteTranslogBufferIntervalNull() { + public void testClusterRemoteTranslogBufferIntervalDefault() { IndicesService indicesService = getIndicesService(); - assertNull(indicesService.getClusterRemoteTranslogBufferInterval()); + assertEquals(IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, indicesService.getClusterRemoteTranslogBufferInterval()); } } diff --git a/server/src/test/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandlerTests.java b/server/src/test/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandlerTests.java index 920a3f2946884..131514eb019b3 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandlerTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandlerTests.java @@ -10,7 +10,6 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.mapper.MapperService; @@ -18,7 +17,6 @@ import org.opensearch.index.seqno.ReplicationTracker; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.replication.common.ReplicationType; -import org.junit.Before; import java.nio.file.Path; @@ -31,14 +29,6 @@ public class RemoteStorePeerRecoverySourceHandlerTests extends OpenSearchIndexLe .put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), "100ms") .build(); - @Before - public void setup() { - // Todo: Remove feature flag once remote store integration with segrep goes GA - FeatureFlags.initializeFeatureFlags( - Settings.builder().put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL_SETTING.getKey(), "true").build() - ); - } - public void testReplicaShardRecoveryUptoLastFlushedCommit() throws Exception { final Path remoteDir = createTempDir(); final String indexMapping = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": {} }"; diff --git a/server/src/test/java/org/opensearch/node/RemoteStoreNodeAttributeTests.java b/server/src/test/java/org/opensearch/node/RemoteStoreNodeAttributeTests.java new file mode 100644 index 0000000000000..c4ba271d27ae9 --- /dev/null +++ b/server/src/test/java/org/opensearch/node/RemoteStoreNodeAttributeTests.java @@ -0,0 +1,151 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node; + +import org.opensearch.Version; +import org.opensearch.cluster.metadata.CryptoMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; +import org.opensearch.test.OpenSearchTestCase; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.Locale; +import java.util.Map; + +import static java.util.Collections.emptySet; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_CRYPTO_SETTINGS_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; + +public class RemoteStoreNodeAttributeTests extends OpenSearchTestCase { + + static private final String KEY_ARN = "arn:aws:kms:us-east-1:123456789:key/6e9aa906-2cc3-4924-8ded-f385c78d9dcf"; + static private final String REGION = "us-east-1"; + + public void testCryptoMetadata() throws UnknownHostException { + String repoName = "remote-store-A"; + String repoTypeSettingKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, repoName); + String repoSettingsKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, repoName); + String repoCryptoMetadataKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT, repoName); + String repoCryptoMetadataSettingsKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_CRYPTO_SETTINGS_PREFIX, repoName); + Map attr = Map.of( + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + repoTypeSettingKey, + "s3", + repoSettingsKey, + "abc", + repoSettingsKey + "base_path", + "xyz", + repoCryptoMetadataKey + ".key_provider_name", + "store-test", + repoCryptoMetadataKey + ".key_provider_type", + "aws-kms", + repoCryptoMetadataSettingsKey + ".region", + REGION, + repoCryptoMetadataSettingsKey + ".key_arn", + KEY_ARN + ); + DiscoveryNode node = new DiscoveryNode( + "C", + new TransportAddress(InetAddress.getByName("localhost"), 9876), + attr, + emptySet(), + Version.CURRENT + ); + + RemoteStoreNodeAttribute remoteStoreNodeAttribute = new RemoteStoreNodeAttribute(node); + assertEquals(remoteStoreNodeAttribute.getRepositoriesMetadata().repositories().size(), 1); + RepositoryMetadata repositoryMetadata = remoteStoreNodeAttribute.getRepositoriesMetadata().repositories().get(0); + Settings.Builder settings = Settings.builder(); + settings.put("region", REGION); + settings.put("key_arn", KEY_ARN); + CryptoMetadata cryptoMetadata = new CryptoMetadata("store-test", "aws-kms", settings.build()); + assertEquals(cryptoMetadata, repositoryMetadata.cryptoMetadata()); + } + + public void testInvalidCryptoMetadata() throws UnknownHostException { + String repoName = "remote-store-A"; + String repoTypeSettingKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, repoName); + String repoSettingsKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, repoName); + String repoCryptoMetadataKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT, repoName); + String repoCryptoMetadataSettingsKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_CRYPTO_SETTINGS_PREFIX, repoName); + Map attr = Map.of( + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + repoTypeSettingKey, + "s3", + repoSettingsKey, + "abc", + repoSettingsKey + "base_path", + "xyz", + repoCryptoMetadataSettingsKey + ".region", + REGION, + repoCryptoMetadataSettingsKey + ".key_arn", + KEY_ARN + ); + DiscoveryNode node = new DiscoveryNode( + "C", + new TransportAddress(InetAddress.getByName("localhost"), 9876), + attr, + emptySet(), + Version.CURRENT + ); + + assertThrows(IllegalStateException.class, () -> new RemoteStoreNodeAttribute(node)); + } + + public void testNoCryptoMetadata() throws UnknownHostException { + String repoName = "remote-store-A"; + String repoTypeSettingKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, repoName); + String repoSettingsKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, repoName); + Map attr = Map.of( + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + repoTypeSettingKey, + "s3", + repoSettingsKey, + "abc", + repoSettingsKey + "base_path", + "xyz" + ); + DiscoveryNode node = new DiscoveryNode( + "C", + new TransportAddress(InetAddress.getByName("localhost"), 9876), + attr, + emptySet(), + Version.CURRENT + ); + + RemoteStoreNodeAttribute remoteStoreNodeAttribute = new RemoteStoreNodeAttribute(node); + assertEquals(remoteStoreNodeAttribute.getRepositoriesMetadata().repositories().size(), 1); + RepositoryMetadata repositoryMetadata = remoteStoreNodeAttribute.getRepositoriesMetadata().repositories().get(0); + assertNull(repositoryMetadata.cryptoMetadata()); + } +} diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java index b20cb323e144f..d14858a4441b5 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java @@ -57,6 +57,7 @@ import org.opensearch.common.crypto.CryptoHandler; import org.opensearch.common.crypto.DecryptedRangedStreamProvider; import org.opensearch.common.crypto.EncryptedHeaderContentSupplier; +import org.opensearch.common.crypto.MasterKeyProvider; import org.opensearch.common.io.InputStreamContainer; import org.opensearch.common.lifecycle.Lifecycle; import org.opensearch.common.lifecycle.LifecycleListener; @@ -65,7 +66,6 @@ import org.opensearch.core.common.Strings; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.encryption.CryptoManager; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; @@ -73,6 +73,7 @@ import org.opensearch.index.store.lockmanager.RemoteStoreLockManagerFactory; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.plugins.CryptoPlugin; import org.opensearch.repositories.blobstore.MeteredBlobStoreRepository; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotInfo; @@ -89,7 +90,6 @@ import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; import java.util.function.Function; @@ -102,6 +102,8 @@ public class RepositoriesServiceTests extends OpenSearchTestCase { private RepositoriesService repositoriesService; + private final String kpTypeA = "kp-type-a"; + private final String kpTypeB = "kp-type-b"; @Override public void setUp() throws Exception { @@ -229,28 +231,28 @@ public void testWithSameKeyProviderNames() { "repoName", MeteredRepositoryTypeA.TYPE, keyProviderName, - TestCryptoManagerTypeA.TYPE + kpTypeA ); repositoriesService.applyClusterState(new ClusterChangedEvent("new repo", clusterStateWithRepoTypeA, emptyState())); assertThat(repositoriesService.repositoriesStats().size(), equalTo(1)); MeteredRepositoryTypeA repository = (MeteredRepositoryTypeA) repositoriesService.repository("repoName"); assertNotNull(repository); - assertNotNull(repository.cryptoManager); - assertTrue(repository.cryptoManager instanceof TestCryptoManagerTypeA); + assertNotNull(repository.cryptoHandler); + assertEquals(kpTypeA, repository.cryptoHandler.kpType); ClusterState clusterStateWithRepoTypeB = createClusterStateWithKeyProvider( "repoName", MeteredRepositoryTypeB.TYPE, keyProviderName, - TestCryptoManagerTypeB.TYPE + kpTypeA ); repositoriesService.applyClusterState(new ClusterChangedEvent("new repo", clusterStateWithRepoTypeB, emptyState())); assertThat(repositoriesService.repositoriesStats().size(), equalTo(2)); MeteredRepositoryTypeB repositoryB = (MeteredRepositoryTypeB) repositoriesService.repository("repoName"); assertNotNull(repositoryB); - assertNotNull(repository.cryptoManager); - assertTrue(repository.cryptoManager instanceof TestCryptoManagerTypeA); + assertNotNull(repository.cryptoHandler); + assertEquals(kpTypeA, repository.cryptoHandler.kpType); } public void testCryptoManagersUnchangedWithSameCryptoMetadata() { @@ -259,21 +261,21 @@ public void testCryptoManagersUnchangedWithSameCryptoMetadata() { "repoName", MeteredRepositoryTypeA.TYPE, keyProviderName, - TestCryptoManagerTypeA.TYPE + kpTypeA ); repositoriesService.applyClusterState(new ClusterChangedEvent("new repo", clusterStateWithRepoTypeA, emptyState())); assertThat(repositoriesService.repositoriesStats().size(), equalTo(1)); MeteredRepositoryTypeA repository = (MeteredRepositoryTypeA) repositoriesService.repository("repoName"); assertNotNull(repository); - assertNotNull(repository.cryptoManager); - assertTrue(repository.cryptoManager instanceof TestCryptoManagerTypeA); + assertNotNull(repository.cryptoHandler); + assertEquals(kpTypeA, repository.cryptoHandler.kpType); repositoriesService.applyClusterState(new ClusterChangedEvent("new repo", clusterStateWithRepoTypeA, emptyState())); assertThat(repositoriesService.repositoriesStats().size(), equalTo(1)); repository = (MeteredRepositoryTypeA) repositoriesService.repository("repoName"); assertNotNull(repository); - assertNotNull(repository.cryptoManager); - assertTrue(repository.cryptoManager instanceof TestCryptoManagerTypeA); + assertNotNull(repository.cryptoHandler); + assertEquals(kpTypeA, repository.cryptoHandler.kpType); } public void testRepositoryUpdateWithDifferentCryptoMetadata() { @@ -283,7 +285,7 @@ public void testRepositoryUpdateWithDifferentCryptoMetadata() { "repoName", MeteredRepositoryTypeA.TYPE, keyProviderName, - TestCryptoManagerTypeA.TYPE + kpTypeA ); ClusterService clusterService = mock(ClusterService.class); @@ -303,13 +305,13 @@ public void testRepositoryUpdateWithDifferentCryptoMetadata() { assertThat(repositoriesService.repositoriesStats().size(), equalTo(1)); MeteredRepositoryTypeA repository = (MeteredRepositoryTypeA) repositoriesService.repository("repoName"); assertNotNull(repository); - assertNotNull(repository.cryptoManager); - assertTrue(repository.cryptoManager instanceof TestCryptoManagerTypeA); + assertNotNull(repository.cryptoHandler); + assertEquals(kpTypeA, repository.cryptoHandler.kpType); expectThrows(IllegalArgumentException.class, () -> repositoriesService.registerRepository(request, null)); CryptoSettings cryptoSettings = new CryptoSettings(keyProviderName); - cryptoSettings.keyProviderType(TestCryptoManagerTypeA.TYPE); + cryptoSettings.keyProviderType(kpTypeA); cryptoSettings.settings(Settings.builder().put("key-1", "val-1")); request.cryptoSettings(cryptoSettings); expectThrows(IllegalArgumentException.class, () -> repositoriesService.registerRepository(request, null)); @@ -320,7 +322,7 @@ public void testRepositoryUpdateWithDifferentCryptoMetadata() { cryptoSettings.keyProviderName(keyProviderName); - cryptoSettings.keyProviderType(TestCryptoManagerTypeA.TYPE); + assertEquals(kpTypeA, repository.cryptoHandler.kpType); repositoriesService.registerRepository(request, null); } @@ -332,7 +334,7 @@ public void testCryptoManagerClusterStateChanges() { String keyProviderName = "kp-name-1"; String repoName = "repoName"; - String keyProviderType = TestCryptoManagerTypeA.TYPE; + String keyProviderType = kpTypeA; Settings.Builder settings = Settings.builder(); PutRepositoryRequest request = createPutRepositoryEncryptedRequest( repoName, @@ -353,12 +355,12 @@ public void testCryptoManagerClusterStateChanges() { ); repositoriesService.registerRepository(request, null); MeteredRepositoryTypeA repository = (MeteredRepositoryTypeA) repositoriesService.repository(repoName); - assertNotNull(repository.cryptoManager); - assertTrue(repository.cryptoManager instanceof TestCryptoManagerTypeA); + assertNotNull(repository.cryptoHandler); + assertEquals(kpTypeA, repository.cryptoHandler.kpType); assertTrue(verified.get()); // No change - keyProviderType = TestCryptoManagerTypeA.TYPE; + keyProviderType = kpTypeA; settings = Settings.builder(); request = createPutRepositoryEncryptedRequest(repoName, MeteredRepositoryTypeA.TYPE, keyProviderName, settings, keyProviderType); verified.set(false); @@ -374,13 +376,13 @@ public void testCryptoManagerClusterStateChanges() { repositoriesService.registerRepository(request, null); repository = (MeteredRepositoryTypeA) repositoriesService.repository(repoName); - assertNotNull(repository.cryptoManager); - assertTrue(repository.cryptoManager instanceof TestCryptoManagerTypeA); + assertNotNull(repository.cryptoHandler); + assertEquals(kpTypeA, repository.cryptoHandler.kpType); assertTrue(verified.get()); // Same crypto client in new repo repoName = "repoName-2"; - keyProviderType = TestCryptoManagerTypeA.TYPE; + keyProviderType = kpTypeA; settings = Settings.builder(); request = createPutRepositoryEncryptedRequest(repoName, MeteredRepositoryTypeA.TYPE, keyProviderName, settings, keyProviderType); verified.set(false); @@ -395,13 +397,13 @@ public void testCryptoManagerClusterStateChanges() { ); repositoriesService.registerRepository(request, null); repository = (MeteredRepositoryTypeA) repositoriesService.repository(repoName); - assertNotNull(repository.cryptoManager); - assertTrue(repository.cryptoManager instanceof TestCryptoManagerTypeA); + assertNotNull(repository.cryptoHandler); + assertEquals(kpTypeA, repository.cryptoHandler.kpType); assertTrue(verified.get()); // Different crypto client in new repo repoName = "repoName-3"; - keyProviderType = TestCryptoManagerTypeB.TYPE; + keyProviderType = kpTypeB; settings = Settings.builder(); request = createPutRepositoryEncryptedRequest(repoName, MeteredRepositoryTypeA.TYPE, keyProviderName, settings, keyProviderType); verified.set(false); @@ -416,8 +418,8 @@ public void testCryptoManagerClusterStateChanges() { ); repositoriesService.registerRepository(request, null); repository = (MeteredRepositoryTypeA) repositoriesService.repository(repoName); - assertNotNull(repository.cryptoManager); - assertTrue(repository.cryptoManager instanceof TestCryptoManagerTypeB); + assertNotNull(repository.cryptoHandler); + assertEquals(kpTypeB, repository.cryptoHandler.kpType); assertTrue(verified.get()); } @@ -530,6 +532,13 @@ private void assertThrowsOnRegister(String repoName) { } private static class TestCryptoProvider implements CryptoHandler { + final String kpName; + final String kpType; + + public TestCryptoProvider(String kpName, String kpType) { + this.kpName = kpName; + this.kpType = kpType; + } @Override public Object initEncryptionMetadata() { @@ -584,75 +593,27 @@ public DecryptedRangedStreamProvider createDecryptingStreamOfRange( public long estimateDecryptedLength(Object cryptoContext, long contentLength) { return 0; } - } - - private static abstract class TestCryptoManager implements CryptoManager { - private final String name; - private final AtomicInteger ref; - - private final CryptoHandler cryptoProvider; - - public TestCryptoManager(Settings settings, String keyProviderName) { - this.name = keyProviderName; - this.ref = new AtomicInteger(1); - this.cryptoProvider = new TestCryptoProvider(); - } @Override - public void incRef() { - ref.incrementAndGet(); - } + public void close() throws IOException { - @Override - public boolean tryIncRef() { - ref.incrementAndGet(); - return true; - } - - @Override - public boolean decRef() { - ref.decrementAndGet(); - return true; } - - public int getReferenceCount() { - return ref.get(); - } - - @Override - public String name() { - return name; - } - - public CryptoHandler getCryptoProvider() { - return cryptoProvider; - } - } - - private static class TestCryptoManagerTypeA extends TestCryptoManager { - public static final String TYPE = "type-A"; - - public TestCryptoManagerTypeA(Settings settings, String keyProviderName) { - super(settings, keyProviderName); - } - - @Override - public String type() { - return TYPE; - } - } - private static class TestCryptoManagerTypeB extends TestCryptoManager { - public static final String TYPE = "type-B"; + private static abstract class TestCryptoHandler implements CryptoPlugin { + private final Settings settings; - public TestCryptoManagerTypeB(Settings settings, String keyProviderName) { - super(settings, keyProviderName); + public TestCryptoHandler(Settings settings) { + this.settings = settings; } - @Override - public String type() { - return TYPE; + public CryptoHandler getOrCreateCryptoHandler( + MasterKeyProvider keyProvider, + String keyProviderName, + String keyProviderType, + Runnable onClose + ) { + return new TestCryptoProvider(keyProviderName, keyProviderType); } } @@ -885,7 +846,7 @@ public void close() { private static class MeteredRepositoryTypeA extends MeteredBlobStoreRepository { private static final String TYPE = "type-a"; private static final RepositoryStats STATS = new RepositoryStats(Map.of("GET", 10L)); - private final CryptoManager cryptoManager; + private final TestCryptoProvider cryptoHandler; private MeteredRepositoryTypeA(RepositoryMetadata metadata, ClusterService clusterService) { super( @@ -898,18 +859,12 @@ private MeteredRepositoryTypeA(RepositoryMetadata metadata, ClusterService clust ); if (metadata.cryptoMetadata() != null) { - switch (metadata.cryptoMetadata().keyProviderType()) { - case TestCryptoManagerTypeA.TYPE: - cryptoManager = new TestCryptoManagerTypeA(null, metadata.cryptoMetadata().keyProviderName()); - break; - case TestCryptoManagerTypeB.TYPE: - cryptoManager = new TestCryptoManagerTypeB(null, metadata.cryptoMetadata().keyProviderName()); - break; - default: - cryptoManager = null; - } + cryptoHandler = new TestCryptoProvider( + metadata.cryptoMetadata().keyProviderName(), + metadata.cryptoMetadata().keyProviderType() + ); } else { - cryptoManager = null; + cryptoHandler = null; } } @@ -932,7 +887,7 @@ public BlobPath basePath() { private static class MeteredRepositoryTypeB extends MeteredBlobStoreRepository { private static final String TYPE = "type-b"; private static final RepositoryStats STATS = new RepositoryStats(Map.of("LIST", 20L)); - private final CryptoManager cryptoManager; + private final TestCryptoProvider cryptoHandler; private MeteredRepositoryTypeB(RepositoryMetadata metadata, ClusterService clusterService) { super( @@ -945,18 +900,12 @@ private MeteredRepositoryTypeB(RepositoryMetadata metadata, ClusterService clust ); if (metadata.cryptoMetadata() != null) { - switch (metadata.cryptoMetadata().keyProviderType()) { - case TestCryptoManagerTypeA.TYPE: - cryptoManager = new TestCryptoManagerTypeA(null, metadata.cryptoMetadata().keyProviderName()); - break; - case TestCryptoManagerTypeB.TYPE: - cryptoManager = new TestCryptoManagerTypeB(null, metadata.cryptoMetadata().keyProviderName()); - break; - default: - cryptoManager = null; - } + cryptoHandler = new TestCryptoProvider( + metadata.cryptoMetadata().keyProviderName(), + metadata.cryptoMetadata().keyProviderType() + ); } else { - cryptoManager = null; + cryptoHandler = null; } } diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java index a24fd04d3d4f6..4e60cddd14d73 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java @@ -8,6 +8,7 @@ package org.opensearch.repositories.blobstore; +import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexMetadata; @@ -15,6 +16,7 @@ import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.index.IndexModule; @@ -29,6 +31,8 @@ import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.fs.FsRepository; +import org.opensearch.snapshots.SnapshotInfo; +import org.opensearch.snapshots.SnapshotState; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchSingleNodeTestCase; @@ -36,9 +40,12 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.List; import java.util.Map; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; public class BlobStoreRepositoryHelperTests extends OpenSearchSingleNodeTestCase { @@ -122,13 +129,28 @@ protected Settings getRemoteStoreBackedIndexSettings() { .build(); } + protected SnapshotInfo createSnapshot(String repositoryName, String snapshot, List indices) { + logger.info("--> creating snapshot [{}] of {} in [{}]", snapshot, indices, repositoryName); + + final CreateSnapshotResponse response = client().admin() + .cluster() + .prepareCreateSnapshot(repositoryName, snapshot) + .setIndices(indices.toArray(Strings.EMPTY_ARRAY)) + .setWaitForCompletion(true) + .get(); + SnapshotInfo snapshotInfo = response.getSnapshotInfo(); + assertThat(snapshotInfo.state(), is(SnapshotState.SUCCESS)); + assertThat(snapshotInfo.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo.failedShards(), equalTo(0)); + return snapshotInfo; + } + protected void indexDocuments(Client client, String indexName) { int numDocs = randomIntBetween(10, 20); for (int i = 0; i < numDocs; i++) { String id = Integer.toString(i); client.prepareIndex(indexName).setId(id).setSource("text", "sometext").get(); } - client.admin().indices().prepareFlush(indexName).get(); } protected IndexSettings getIndexSettings(String indexName) { diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java index 0f24d60993f2f..e3e1bf31e82dc 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java @@ -33,13 +33,12 @@ package org.opensearch.repositories.blobstore; import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; -import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.index.shard.ShardId; import org.opensearch.env.Environment; +import org.opensearch.gateway.remote.RemoteClusterStateService; import org.opensearch.index.IndexSettings; import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; import org.opensearch.indices.replication.common.ReplicationType; @@ -48,17 +47,19 @@ import org.opensearch.repositories.RepositoryData; import org.opensearch.repositories.fs.FsRepository; import org.opensearch.snapshots.SnapshotId; -import org.opensearch.test.FeatureFlagSetter; +import org.opensearch.snapshots.SnapshotInfo; import org.opensearch.test.OpenSearchIntegTestCase; import java.io.IOException; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Locale; import java.util.stream.Collectors; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; @@ -69,10 +70,6 @@ * Tests for the {@link BlobStoreRepository} and its subclasses. */ public class BlobStoreRepositoryRemoteIndexTests extends BlobStoreRepositoryHelperTests { - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE, "true").build(); - } @Override protected Settings nodeSettings() { @@ -106,12 +103,15 @@ private Settings buildRemoteStoreNodeAttributes(String repoName, Path repoPath) .put("node.attr." + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, repoName) .put(repoTypeAttributeKey, FsRepository.TYPE) .put(repoSettingsAttributeKeyPrefix + "location", repoPath) + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, repoName) + .put(repoTypeAttributeKey, FsRepository.TYPE) + .put(repoSettingsAttributeKeyPrefix + "location", repoPath) + .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), false) .build(); } // Validate Scenario Normal Snapshot -> remoteStoreShallowCopy Snapshot -> normal Snapshot public void testRetrieveShallowCopySnapshotCase1() throws IOException { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final Client client = client(); final String snapshotRepositoryName = "test-repo"; final String remoteStoreRepositoryName = "test-rs-repo"; @@ -137,13 +137,12 @@ public void testRetrieveShallowCopySnapshotCase1() throws IOException { indexDocuments(client, remoteStoreIndexName); logger.info("--> create first snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-1") - .setWaitForCompletion(true) - .setIndices(indexName, remoteStoreIndexName) - .get(); - final SnapshotId snapshotId1 = createSnapshotResponse.getSnapshotInfo().snapshotId(); + SnapshotInfo snapshotInfo = createSnapshot( + snapshotRepositoryName, + "test-snap-1", + new ArrayList<>(Arrays.asList(indexName, remoteStoreIndexName)) + ); + final SnapshotId snapshotId1 = snapshotInfo.snapshotId(); String[] lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); assert (lockFiles.length == 0) : "there should be no lock files present in directory, but found " + Arrays.toString(lockFiles); @@ -154,13 +153,12 @@ public void testRetrieveShallowCopySnapshotCase1() throws IOException { .build(); updateRepository(client, snapshotRepositoryName, snapshotRepoSettingsForShallowCopy); - createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-2") - .setWaitForCompletion(true) - .setIndices(indexName, remoteStoreIndexName) - .get(); - final SnapshotId snapshotId2 = createSnapshotResponse.getSnapshotInfo().snapshotId(); + snapshotInfo = createSnapshot( + snapshotRepositoryName, + "test-snap-2", + new ArrayList<>(Arrays.asList(indexName, remoteStoreIndexName)) + ); + final SnapshotId snapshotId2 = snapshotInfo.snapshotId(); lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); assert (lockFiles.length == 1) : "there should be only one lock file, but found " + Arrays.toString(lockFiles); @@ -168,13 +166,12 @@ public void testRetrieveShallowCopySnapshotCase1() throws IOException { logger.info("--> create another normal snapshot"); updateRepository(client, snapshotRepositoryName, snapshotRepoSettings); - createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-3") - .setWaitForCompletion(true) - .setIndices(indexName, remoteStoreIndexName) - .get(); - final SnapshotId snapshotId3 = createSnapshotResponse.getSnapshotInfo().snapshotId(); + snapshotInfo = createSnapshot( + snapshotRepositoryName, + "test-snap-3", + new ArrayList<>(Arrays.asList(indexName, remoteStoreIndexName)) + ); + final SnapshotId snapshotId3 = snapshotInfo.snapshotId(); lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); assert (lockFiles.length == 1) : "there should be only one lock file, but found " + Arrays.toString(lockFiles); @@ -204,7 +201,6 @@ public void testRetrieveShallowCopySnapshotCase1() throws IOException { } public void testGetRemoteStoreShallowCopyShardMetadata() throws IOException { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final Client client = client(); final String snapshotRepositoryName = "test-repo"; final String remoteStoreRepositoryName = "test-rs-repo"; @@ -230,13 +226,8 @@ public void testGetRemoteStoreShallowCopyShardMetadata() throws IOException { .build(); updateRepository(client, snapshotRepositoryName, snapshotRepoSettingsForShallowCopy); - CreateSnapshotResponse createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-2") - .setWaitForCompletion(true) - .setIndices(remoteStoreIndexName) - .get(); - final SnapshotId snapshotId = createSnapshotResponse.getSnapshotInfo().snapshotId(); + SnapshotInfo snapshotInfo = createSnapshot(snapshotRepositoryName, "test-snap-2", new ArrayList<>(List.of(remoteStoreIndexName))); + final SnapshotId snapshotId = snapshotInfo.snapshotId(); String[] lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); assert (lockFiles.length == 1) : "there should be only one lock file, but found " + Arrays.toString(lockFiles); @@ -260,7 +251,6 @@ public void testGetRemoteStoreShallowCopyShardMetadata() throws IOException { // Validate Scenario remoteStoreShallowCopy Snapshot -> remoteStoreShallowCopy Snapshot // -> remoteStoreShallowCopy Snapshot -> normal snapshot public void testRetrieveShallowCopySnapshotCase2() throws IOException { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final Client client = client(); final String snapshotRepositoryName = "test-repo"; final String remoteStoreRepositoryName = "test-rs-repo"; @@ -307,26 +297,24 @@ public void testRetrieveShallowCopySnapshotCase2() throws IOException { assertTrue(updatedRepositoryMetadata.settings().getAsBoolean(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), false)); - CreateSnapshotResponse createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-1") - .setWaitForCompletion(true) - .setIndices(indexName, remoteStoreIndexName) - .get(); - final SnapshotId snapshotId1 = createSnapshotResponse.getSnapshotInfo().snapshotId(); + SnapshotInfo snapshotInfo = createSnapshot( + snapshotRepositoryName, + "test-snap-1", + new ArrayList<>(Arrays.asList(indexName, remoteStoreIndexName)) + ); + final SnapshotId snapshotId1 = snapshotInfo.snapshotId(); String[] lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); assert (lockFiles.length == 1) : "lock files are " + Arrays.toString(lockFiles); assert lockFiles[0].endsWith(snapshotId1.getUUID() + ".lock"); logger.info("--> create second remote index shallow snapshot"); - createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-2") - .setWaitForCompletion(true) - .setIndices(indexName, remoteStoreIndexName) - .get(); - final SnapshotId snapshotId2 = createSnapshotResponse.getSnapshotInfo().snapshotId(); + snapshotInfo = createSnapshot( + snapshotRepositoryName, + "test-snap-2", + new ArrayList<>(Arrays.asList(indexName, remoteStoreIndexName)) + ); + final SnapshotId snapshotId2 = snapshotInfo.snapshotId(); lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); assert (lockFiles.length == 2) : "lock files are " + Arrays.toString(lockFiles); @@ -335,13 +323,12 @@ public void testRetrieveShallowCopySnapshotCase2() throws IOException { assert lockFiles[0].contains(snapshotId.getUUID()) || lockFiles[1].contains(snapshotId.getUUID()); } logger.info("--> create third remote index shallow snapshot"); - createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-3") - .setWaitForCompletion(true) - .setIndices(indexName, remoteStoreIndexName) - .get(); - final SnapshotId snapshotId3 = createSnapshotResponse.getSnapshotInfo().snapshotId(); + snapshotInfo = createSnapshot( + snapshotRepositoryName, + "test-snap-3", + new ArrayList<>(Arrays.asList(indexName, remoteStoreIndexName)) + ); + final SnapshotId snapshotId3 = snapshotInfo.snapshotId(); lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); assert (lockFiles.length == 3); @@ -353,13 +340,12 @@ public void testRetrieveShallowCopySnapshotCase2() throws IOException { } logger.info("--> create normal snapshot"); createRepository(client, snapshotRepositoryName, snapshotRepoSettings); - createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-4") - .setWaitForCompletion(true) - .setIndices(indexName, remoteStoreIndexName) - .get(); - final SnapshotId snapshotId4 = createSnapshotResponse.getSnapshotInfo().snapshotId(); + snapshotInfo = createSnapshot( + snapshotRepositoryName, + "test-snap-4", + new ArrayList<>(Arrays.asList(indexName, remoteStoreIndexName)) + ); + final SnapshotId snapshotId4 = snapshotInfo.snapshotId(); lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); assert (lockFiles.length == 3) : "lock files are " + Arrays.toString(lockFiles); diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java index ea9f96ff925cd..e097c7025e4fe 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -41,7 +41,6 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; @@ -105,11 +104,6 @@ protected void assertSnapshotOrGenericThread() { } } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE, "true").build(); - } - public void testRetrieveSnapshots() throws Exception { final Client client = client(); final Path location = OpenSearchIntegTestCase.randomRepoPath(node().settings()); diff --git a/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java b/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java index 93be194b2d112..03f0d27188027 100644 --- a/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java +++ b/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java @@ -34,14 +34,19 @@ import org.opensearch.OpenSearchCorruptionException; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.blobstore.fs.FsBlobContainer; import org.opensearch.common.blobstore.fs.FsBlobStore; +import org.opensearch.common.blobstore.stream.read.ReadContext; +import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.common.compress.DeflateCompressor; import org.opensearch.common.io.Streams; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.compress.CompressorRegistry; @@ -56,7 +61,9 @@ import java.io.EOFException; import java.io.IOException; import java.io.InputStream; +import java.nio.file.Path; import java.util.Map; +import java.util.concurrent.CountDownLatch; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.greaterThan; @@ -114,6 +121,57 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par } } + public void testBlobStoreAsyncOperations() throws IOException, InterruptedException { + BlobStore blobStore = createTestBlobStore(); + MockFsVerifyingBlobContainer mockBlobContainer = new MockFsVerifyingBlobContainer( + (FsBlobStore) blobStore, + BlobPath.cleanPath(), + null + ); + ChecksumBlobStoreFormat checksumSMILE = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent); + + CountDownLatch latch = new CountDownLatch(2); + + ActionListener actionListener = new ActionListener<>() { + @Override + public void onResponse(Void unused) { + logger.info("---> Async write succeeded"); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + logger.info("---> Failure in async write"); + throw new RuntimeException("async write should not fail"); + } + }; + + // Write blobs in different formats + checksumSMILE.writeAsync( + new BlobObj("checksum smile"), + mockBlobContainer, + "check-smile", + CompressorRegistry.none(), + actionListener + ); + checksumSMILE.writeAsync( + new BlobObj("checksum smile compressed"), + mockBlobContainer, + "check-smile-comp", + CompressorRegistry.getCompressor(DeflateCompressor.NAME), + actionListener + ); + + latch.await(); + + // Assert that all checksum blobs can be read + assertEquals(checksumSMILE.read(mockBlobContainer.getDelegate(), "check-smile", xContentRegistry()).getText(), "checksum smile"); + assertEquals( + checksumSMILE.read(mockBlobContainer.getDelegate(), "check-smile-comp", xContentRegistry()).getText(), + "checksum smile compressed" + ); + } + public void testBlobStoreOperations() throws IOException { BlobStore blobStore = createTestBlobStore(); BlobContainer blobContainer = blobStore.blobContainer(BlobPath.cleanPath()); @@ -196,4 +254,35 @@ private long checksum(byte[] buffer) throws IOException { } } } + + public static class MockFsVerifyingBlobContainer extends FsBlobContainer implements AsyncMultiStreamBlobContainer { + + private BlobContainer delegate; + + public MockFsVerifyingBlobContainer(FsBlobStore blobStore, BlobPath blobPath, Path path) { + super(blobStore, blobPath, path); + delegate = blobStore.blobContainer(BlobPath.cleanPath()); + } + + @Override + public void asyncBlobUpload(WriteContext writeContext, ActionListener completionListener) throws IOException { + InputStream inputStream = writeContext.getStreamProvider(Integer.MAX_VALUE).provideStream(0).getInputStream(); + delegate.writeBlob(writeContext.getFileName(), inputStream, writeContext.getFileSize(), true); + completionListener.onResponse(null); + } + + @Override + public void readBlobAsync(String blobName, ActionListener listener) { + throw new RuntimeException("read not supported"); + } + + @Override + public boolean remoteIntegrityCheckSupported() { + return false; + } + + public BlobContainer getDelegate() { + return delegate; + } + } } diff --git a/test/framework/src/main/java/org/opensearch/gateway/MockGatewayMetaState.java b/test/framework/src/main/java/org/opensearch/gateway/MockGatewayMetaState.java index 14d646cf84a0a..dea205619ce95 100644 --- a/test/framework/src/main/java/org/opensearch/gateway/MockGatewayMetaState.java +++ b/test/framework/src/main/java/org/opensearch/gateway/MockGatewayMetaState.java @@ -46,14 +46,12 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.NodeEnvironment; import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.index.recovery.RemoteStoreRestoreService; import org.opensearch.plugins.MetadataUpgrader; -import org.opensearch.repositories.RepositoriesService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; import java.io.IOException; -import java.util.Collections; -import java.util.function.Supplier; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -67,10 +65,26 @@ public class MockGatewayMetaState extends GatewayMetaState { private final DiscoveryNode localNode; private final BigArrays bigArrays; + private final RemoteClusterStateService remoteClusterStateService; + private final RemoteStoreRestoreService remoteStoreRestoreService; public MockGatewayMetaState(DiscoveryNode localNode, BigArrays bigArrays) { this.localNode = localNode; this.bigArrays = bigArrays; + this.remoteClusterStateService = mock(RemoteClusterStateService.class); + this.remoteStoreRestoreService = mock(RemoteStoreRestoreService.class); + } + + public MockGatewayMetaState( + DiscoveryNode localNode, + BigArrays bigArrays, + RemoteClusterStateService remoteClusterStateService, + RemoteStoreRestoreService remoteStoreRestoreService + ) { + this.localNode = localNode; + this.bigArrays = bigArrays; + this.remoteClusterStateService = remoteClusterStateService; + this.remoteStoreRestoreService = remoteStoreRestoreService; } @Override @@ -107,26 +121,6 @@ public void start( } catch (IOException e) { throw new AssertionError(e); } - Supplier remoteClusterStateServiceSupplier = () -> { - if (RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.get(settings) == true) { - return new RemoteClusterStateService( - nodeEnvironment.nodeId(), - () -> new RepositoriesService( - settings, - clusterService, - transportService, - Collections.emptyMap(), - Collections.emptyMap(), - transportService.getThreadPool() - ), - settings, - new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - () -> 0L - ); - } else { - return null; - } - }; start( settings, transportService, @@ -141,8 +135,9 @@ public void start( new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L ), - remoteClusterStateServiceSupplier.get(), - persistedStateRegistry + remoteClusterStateService, + persistedStateRegistry, + remoteStoreRestoreService ); } } diff --git a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java index a17d71957167a..1bb1e44a8a600 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -480,11 +480,12 @@ protected String initWithSnapshotVersion(String repoName, Path repoPath, Version protected SnapshotInfo createFullSnapshot(String repoName, String snapshotName) { logger.info("--> creating full snapshot [{}] in [{}]", snapshotName, repoName); - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(repoName, snapshotName) - .setIncludeGlobalState(true) + final CreateSnapshotResponse response = client().admin() + .cluster() + .prepareCreateSnapshot(repoName, snapshotName) .setWaitForCompletion(true) .get(); - final SnapshotInfo snapshotInfo = createSnapshotResponse.getSnapshotInfo(); + final SnapshotInfo snapshotInfo = response.getSnapshotInfo(); assertThat(snapshotInfo.successfulShards(), is(snapshotInfo.totalShards())); assertThat(snapshotInfo.state(), is(SnapshotState.SUCCESS)); return snapshotInfo; @@ -498,8 +499,8 @@ protected SnapshotInfo createSnapshot(String repositoryName, String snapshot, Li .setIndices(indices.toArray(Strings.EMPTY_ARRAY)) .setWaitForCompletion(true) .get(); + SnapshotInfo snapshotInfo = response.getSnapshotInfo(); - final SnapshotInfo snapshotInfo = response.getSnapshotInfo(); assertThat(snapshotInfo.state(), is(SnapshotState.SUCCESS)); assertThat(snapshotInfo.successfulShards(), greaterThan(0)); assertThat(snapshotInfo.failedShards(), equalTo(0)); diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index 95832dc9544ce..d3e24ccd90500 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -1848,6 +1848,27 @@ public synchronized void stopRandomNonMasterNode() throws IOException { stopRandomNonClusterManagerNode(); } + /** + * Stops all running nodes in cluster + */ + public void stopAllNodes() { + try { + int totalDataNodes = numDataNodes(); + while (totalDataNodes > 0) { + stopRandomDataNode(); + totalDataNodes -= 1; + } + int totalClusterManagerNodes = numClusterManagerNodes(); + while (totalClusterManagerNodes > 1) { + stopRandomNonClusterManagerNode(); + totalClusterManagerNodes -= 1; + } + stopCurrentClusterManagerNode(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + private synchronized void startAndPublishNodesAndClients(List nodeAndClients) { if (nodeAndClients.size() > 0) { final int newClusterManagers = (int) nodeAndClients.stream() diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 3232ce7ddb87b..f06a6e26defeb 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -157,6 +157,7 @@ import org.opensearch.test.disruption.ServiceDisruptionScheme; import org.opensearch.test.store.MockFSIndexStore; import org.opensearch.test.telemetry.MockTelemetryPlugin; +import org.opensearch.test.telemetry.tracing.StrictCheckSpanProcessor; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.TransportInterceptor; import org.opensearch.transport.TransportRequest; @@ -283,9 +284,7 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase { CodecService.DEFAULT_CODEC, CodecService.LZ4, CodecService.BEST_COMPRESSION_CODEC, - CodecService.ZLIB, - CodecService.ZSTD_CODEC, - CodecService.ZSTD_NO_DICT_CODEC + CodecService.ZLIB ); /** @@ -2297,7 +2296,9 @@ public static void afterClass() throws Exception { INSTANCE.printTestMessage("cleaning up after"); INSTANCE.afterInternal(true); checkStaticState(true); + StrictCheckSpanProcessor.validateTracingStateOnShutdown(); } + } finally { SUITE_SEED = null; currentCluster = null; diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java index 2de4c8fdbdfb8..f14fe3bf3961c 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java @@ -73,6 +73,7 @@ import org.opensearch.search.internal.SearchContext; import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.test.telemetry.MockTelemetryPlugin; +import org.opensearch.test.telemetry.tracing.StrictCheckSpanProcessor; import org.opensearch.transport.TransportSettings; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -190,6 +191,7 @@ public static void setUpClass() throws Exception { @AfterClass public static void tearDownClass() throws Exception { stopNode(); + StrictCheckSpanProcessor.validateTracingStateOnShutdown(); } /** diff --git a/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java b/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java index 894e8a67cea1f..6b428a7f65594 100644 --- a/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java +++ b/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java @@ -13,46 +13,22 @@ import org.opensearch.telemetry.metrics.MetricsTelemetry; import org.opensearch.telemetry.tracing.TracingTelemetry; import org.opensearch.test.telemetry.tracing.MockTracingTelemetry; -import org.opensearch.threadpool.ThreadPool; - -import java.util.concurrent.TimeUnit; /** * Mock {@link Telemetry} implementation for testing. */ public class MockTelemetry implements Telemetry { - private final ThreadPool threadPool; - /** * Constructor with settings. * @param settings telemetry settings. */ public MockTelemetry(TelemetrySettings settings) { - this(settings, null); - } - /** - * Constructor with settings. - * @param settings telemetry settings. - * @param threadPool thread pool to watch for termination - */ - public MockTelemetry(TelemetrySettings settings, ThreadPool threadPool) { - this.threadPool = threadPool; } @Override public TracingTelemetry getTracingTelemetry() { - return new MockTracingTelemetry(() -> { - // There could be some asynchronous tasks running that we should await for before the closing - // up the tracer instance. - if (threadPool != null) { - try { - threadPool.awaitTermination(10, TimeUnit.SECONDS); - } catch (final InterruptedException ex) { - Thread.currentThread().interrupt(); - } - } - }); + return new MockTracingTelemetry(); } @Override diff --git a/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetryPlugin.java b/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetryPlugin.java index ebba9857aa8f1..4f483098caf82 100644 --- a/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetryPlugin.java +++ b/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetryPlugin.java @@ -8,34 +8,18 @@ package org.opensearch.test.telemetry; -import org.opensearch.client.Client; -import org.opensearch.cluster.metadata.IndexNameExpressionResolver; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.SetOnce; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.env.Environment; -import org.opensearch.env.NodeEnvironment; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.TelemetryPlugin; -import org.opensearch.repositories.RepositoriesService; -import org.opensearch.script.ScriptService; import org.opensearch.telemetry.Telemetry; import org.opensearch.telemetry.TelemetrySettings; -import org.opensearch.threadpool.ThreadPool; -import org.opensearch.watcher.ResourceWatcherService; -import java.util.Collection; -import java.util.Collections; import java.util.Optional; -import java.util.function.Supplier; /** * Mock {@link TelemetryPlugin} implementation for testing. */ public class MockTelemetryPlugin extends Plugin implements TelemetryPlugin { private static final String MOCK_TRACER_NAME = "mock"; - private final SetOnce threadPool = new SetOnce<>(); /** * Base constructor. @@ -44,27 +28,9 @@ public MockTelemetryPlugin() { } - @Override - public Collection createComponents( - Client client, - ClusterService clusterService, - ThreadPool threadPool, - ResourceWatcherService resourceWatcherService, - ScriptService scriptService, - NamedXContentRegistry xContentRegistry, - Environment environment, - NodeEnvironment nodeEnvironment, - NamedWriteableRegistry namedWriteableRegistry, - IndexNameExpressionResolver indexNameExpressionResolver, - Supplier repositoriesServiceSupplier - ) { - this.threadPool.set(threadPool); - return Collections.emptyList(); - } - @Override public Optional getTelemetry(TelemetrySettings settings) { - return Optional.of(new MockTelemetry(settings, threadPool.get())); + return Optional.of(new MockTelemetry(settings)); } @Override diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingTelemetry.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingTelemetry.java index a5e51dd27541b..c7f5943719230 100644 --- a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingTelemetry.java +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingTelemetry.java @@ -12,11 +12,8 @@ import org.opensearch.telemetry.tracing.TracingContextPropagator; import org.opensearch.telemetry.tracing.TracingTelemetry; import org.opensearch.telemetry.tracing.attributes.Attributes; -import org.opensearch.test.telemetry.tracing.validators.AllSpansAreEndedProperly; -import org.opensearch.test.telemetry.tracing.validators.AllSpansHaveUniqueId; -import java.util.Arrays; -import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; /** * Mock {@link TracingTelemetry} implementation for testing. @@ -24,28 +21,19 @@ public class MockTracingTelemetry implements TracingTelemetry { private final SpanProcessor spanProcessor = new StrictCheckSpanProcessor(); - private final Runnable onClose; + private final AtomicBoolean shutdown = new AtomicBoolean(false); /** * Base constructor. */ - public MockTracingTelemetry() { - this(() -> {}); - } - - /** - * Base constructor. - * - * @param onClose on close hook - */ - public MockTracingTelemetry(final Runnable onClose) { - this.onClose = onClose; - } + public MockTracingTelemetry() {} @Override public Span createSpan(String spanName, Span parentSpan, Attributes attributes) { Span span = new MockSpan(spanName, parentSpan, spanProcessor, attributes); - spanProcessor.onStart(span); + if (shutdown.get() == false) { + spanProcessor.onStart(span); + } return span; } @@ -56,15 +44,7 @@ public TracingContextPropagator getContextPropagator() { @Override public void close() { - // Run onClose hook - onClose.run(); - - List spanData = ((StrictCheckSpanProcessor) spanProcessor).getFinishedSpanItems(); - if (spanData.size() != 0) { - TelemetryValidators validators = new TelemetryValidators( - Arrays.asList(new AllSpansAreEndedProperly(), new AllSpansHaveUniqueId()) - ); - validators.validate(spanData, 1); - } + shutdown.set(true); } + } diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java index e3fca8813b696..c6e57531d23df 100644 --- a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java @@ -9,8 +9,11 @@ package org.opensearch.test.telemetry.tracing; import org.opensearch.telemetry.tracing.Span; +import org.opensearch.test.telemetry.tracing.validators.AllSpansAreEndedProperly; +import org.opensearch.test.telemetry.tracing.validators.AllSpansHaveUniqueId; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -61,4 +64,23 @@ private MockSpanData toMockSpanData(Span span) { ); return spanData; } + + /** + * Ensures the strict check succeeds for all the spans. + */ + public static void validateTracingStateOnShutdown() { + List spanData = new ArrayList<>(spanMap.values()); + if (spanData.size() != 0) { + TelemetryValidators validators = new TelemetryValidators( + Arrays.asList(new AllSpansAreEndedProperly(), new AllSpansHaveUniqueId()) + ); + try { + validators.validate(spanData, 1); + } catch (Error e) { + spanMap.clear(); + throw e; + } + } + + } }